summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-08-25 12:36:36 +1000
committerDave Airlie <airlied@redhat.com>2016-08-25 12:36:36 +1000
commit51d6120792ab5f46d6f5f7f37b65d05cc1afc019 (patch)
tree48a1c5fb08e3a794d1e2a9a88fbffe85a9b34869 /include
parent78acdd4a7e5a5de56c4ac1e10390a98b7c605ed6 (diff)
parent351243897b15aba02ad15317724d616aeaf00c7d (diff)
downloadop-kernel-dev-51d6120792ab5f46d6f5f7f37b65d05cc1afc019.zip
op-kernel-dev-51d6120792ab5f46d6f5f7f37b65d05cc1afc019.tar.gz
Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2016-08-22: - bugfixes and cleanups for rcu-protected requests (Chris) - atomic modeset fixes for gpu reset on pre-g4x (Maarten&Ville) - guc submission improvements (Dave Gordon) - panel power sequence cleanup (Imre) - better use of stolen and unmappable ggtt (Chris), plus prep work to make that happen - rework of framebuffer offsets, prep for multi-plane framebuffers (Ville) - fully partial ggtt vmaps, including fenced ones (Chris) - move lots more of the gem tracking from the object to the vma (Chris) - tune the command parser (Chris) - allow fbc without fences on recent platforms (Chris) - fbc frontbuffer tracking fixes (Chris) - fast prefaulting using io-mappping.h pgprot caching (Chris) * 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (141 commits) io-mapping: Fixup for different names of writecombine io-mapping.h: s/PAGE_KERNEL_IO/PAGE_KERNEL/ drm/i915: Update DRIVER_DATE to 20160822 drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass drm/i915: Embed the io-mapping struct inside drm_i915_private io-mapping: Always create a struct to hold metadata about the io-mapping drm/i915/fbc: Allow on unfenced surfaces, for recent gen drm/i915/fbc: Don't set an illegal fence if unfenced drm/i915: Flush delayed fence releases after reset drm/i915: Reattach comment, complete type specification drm/i915/cmdparser: Accelerate copies from WC memory drm/i915/cmdparser: Use binary search for faster register lookup drm/i915/cmdparser: Check for SKIP descriptors first drm/i915/cmdparser: Compare against the previous command descriptor drm/i915/cmdparser: Improve hash function drm/i915/cmdparser: Only cache the dst vmap drm/i915/cmdparser: Use cached vmappings drm/i915/cmdparser: Add the TIMESTAMP register for the other engines drm/i915/cmdparser: Make initialisation failure non-fatal drm/i915: Stop discarding GTT cache-domain on unbind vma ...
Diffstat (limited to 'include')
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/linux/io-mapping.h98
-rw-r--r--include/uapi/drm/i915_drm.h16
3 files changed, 79 insertions, 37 deletions
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b1755f8..4e1b274 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -93,6 +93,6 @@ extern bool i915_gpu_turbo_disable(void);
#define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
-#define INTEL_BSM_MASK (0xFFFF << 20)
+#define INTEL_BSM_MASK (-(1u << 20))
#endif /* _I915_DRM_H_ */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 645ad06..58df02b 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -31,16 +31,16 @@
* See Documentation/io-mapping.txt
*/
-#ifdef CONFIG_HAVE_ATOMIC_IOMAP
-
-#include <asm/iomap.h>
-
struct io_mapping {
resource_size_t base;
unsigned long size;
pgprot_t prot;
+ void __iomem *iomem;
};
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
* into the kernel virtual space isn't practical. Where
@@ -49,34 +49,25 @@ struct io_mapping {
*/
static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+ resource_size_t base,
+ unsigned long size)
{
- struct io_mapping *iomap;
pgprot_t prot;
- iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
- if (!iomap)
- goto out_err;
-
if (iomap_create_wc(base, size, &prot))
- goto out_free;
+ return NULL;
iomap->base = base;
iomap->size = size;
iomap->prot = prot;
return iomap;
-
-out_free:
- kfree(iomap);
-out_err:
- return NULL;
}
static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
{
iomap_free(mapping->base, mapping->size);
- kfree(mapping);
}
/* Atomic map/unmap */
@@ -121,21 +112,46 @@ io_mapping_unmap(void __iomem *vaddr)
#else
#include <linux/uaccess.h>
-
-/* this struct isn't actually defined anywhere */
-struct io_mapping;
+#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+ resource_size_t base,
+ unsigned long size)
{
- return (struct io_mapping __force *) ioremap_wc(base, size);
+ iomap->base = base;
+ iomap->size = size;
+ iomap->iomem = ioremap_wc(base, size);
+#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
+ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
+#elif defined(pgprot_writecombine)
+ iomap->prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+ iomap->prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+ return iomap;
}
static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
+{
+ iounmap(mapping->iomem);
+}
+
+/* Non-atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
+{
+ return mapping->iomem + offset;
+}
+
+static inline void
+io_mapping_unmap(void __iomem *vaddr)
{
- iounmap((void __force __iomem *) mapping);
}
/* Atomic map/unmap */
@@ -145,30 +161,42 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
{
preempt_disable();
pagefault_disable();
- return ((char __force __iomem *) mapping) + offset;
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
+ io_mapping_unmap(vaddr);
pagefault_enable();
preempt_enable();
}
-/* Non-atomic map/unmap */
-static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping,
- unsigned long offset,
- unsigned long size)
+#endif /* HAVE_ATOMIC_IOMAP */
+
+static inline struct io_mapping *
+io_mapping_create_wc(resource_size_t base,
+ unsigned long size)
{
- return ((char __force __iomem *) mapping) + offset;
+ struct io_mapping *iomap;
+
+ iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
+ if (!iomap)
+ return NULL;
+
+ if (!io_mapping_init_wc(iomap, base, size)) {
+ kfree(iomap);
+ return NULL;
+ }
+
+ return iomap;
}
static inline void
-io_mapping_unmap(void __iomem *vaddr)
+io_mapping_free(struct io_mapping *iomap)
{
+ io_mapping_fini(iomap);
+ kfree(iomap);
}
-#endif /* HAVE_ATOMIC_IOMAP */
-
#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 452629d..5501fe8 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -855,7 +855,16 @@ struct drm_i915_gem_busy {
* having flushed any pending activity), and a non-zero return that
* the object is still in-flight on the GPU. (The GPU has not yet
* signaled completion for all pending requests that reference the
- * object.)
+ * object.) An object is guaranteed to become idle eventually (so
+ * long as no new GPU commands are executed upon it). Due to the
+ * asynchronous nature of the hardware, an object reported
+ * as busy may become idle before the ioctl is completed.
+ *
+ * Furthermore, if the object is busy, which engine is busy is only
+ * provided as a guide. There are race conditions which prevent the
+ * report of which engines are busy from being always accurate.
+ * However, the converse is not true. If the object is idle, the
+ * result of the ioctl, that all engines are idle, is accurate.
*
* The returned dword is split into two fields to indicate both
* the engines on which the object is being read, and the
@@ -878,6 +887,11 @@ struct drm_i915_gem_busy {
* execution engines, e.g. multiple media engines, which are
* mapped to the same identifier in the EXECBUFFER2 ioctl and
* so are not separately reported for busyness.
+ *
+ * Caveat emptor:
+ * Only the boolean result of this query is reliable; that is whether
+ * the object is idle or busy. The report of which engines are busy
+ * should be only used as a heuristic.
*/
__u32 busy;
};
OpenPOWER on IntegriCloud