summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2013-10-16 09:18:21 -0700
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-10-18 15:40:21 +0200
commitb35b380ed46bb01726bec1795e6443e625306757 (patch)
treef2b4868b97dc59e05699066ad06046f7e9186cbd
parentcd66407810869709e0d6764409137d48c6812cfc (diff)
downloadop-kernel-dev-b35b380ed46bb01726bec1795e6443e625306757.zip
op-kernel-dev-b35b380ed46bb01726bec1795e6443e625306757.tar.gz
drm/i915: Make PTE valid encoding optional
We need this to work around a corruption when the boot kernel image loads the hibernated kernel image from swap on Haswell systems - somehow not everything is properly shut off. This is just the prep work, the next patch will implement the actual workaround. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Add a commit message suitable for -fixes and add cc: stable] Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c35
2 files changed, 22 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35874b3..3979a81 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -497,7 +497,8 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level);
+ enum i915_cache_level level,
+ bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 212f6d8..32aa69d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -58,9 +58,10 @@
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
}
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
/* Mark the page as writeable. Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
}
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
}
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
+ enum i915_cache_level level,
+ bool valid)
{
- gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
switch (level) {
@@ -245,7 +250,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -282,7 +287,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
+ pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
act_pt++;
@@ -536,7 +541,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
i++;
}
@@ -548,7 +553,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level));
+ vm->pte_encode(addr, level, true));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -573,7 +578,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
OpenPOWER on IntegriCloud