summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/x86/iommu/intel_ctx.c13
-rw-r--r--sys/x86/iommu/intel_dmar.h5
-rw-r--r--sys/x86/iommu/intel_idpgtbl.c21
-rw-r--r--sys/x86/iommu/intel_utils.c36
4 files changed, 54 insertions, 21 deletions
diff --git a/sys/x86/iommu/intel_ctx.c b/sys/x86/iommu/intel_ctx.c
index 0b3adeb..97f4bc3 100644
--- a/sys/x86/iommu/intel_ctx.c
+++ b/sys/x86/iommu/intel_ctx.c
@@ -96,7 +96,8 @@ dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
re += bus;
dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
VM_PAGE_TO_PHYS(ctxm)));
- dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
+ dmar_flush_root_to_ram(dmar, re);
+ dmar_unmap_pgtbl(sf);
TD_PINNED_ASSERT;
}
@@ -153,6 +154,7 @@ ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp)
(DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
DMAR_CTX1_P);
}
+ dmar_flush_ctx_to_ram(unit, ctxp);
}
static int
@@ -358,7 +360,7 @@ dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func,
ctx->domain = alloc_unrl(dmar->domids);
if (ctx->domain == -1) {
DMAR_UNLOCK(dmar);
- dmar_unmap_pgtbl(sf, true);
+ dmar_unmap_pgtbl(sf);
dmar_ctx_dtr(ctx, true, true);
TD_PINNED_ASSERT;
return (NULL);
@@ -383,7 +385,7 @@ dmar_get_ctx(struct dmar_unit *dmar, device_t dev, int bus, int slot, int func,
} else {
dmar_ctx_dtr(ctx1, true, true);
}
- dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
+ dmar_unmap_pgtbl(sf);
}
ctx->refs++;
if ((ctx->flags & DMAR_CTX_RMRR) != 0)
@@ -474,7 +476,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
if (ctx->refs > 1) {
ctx->refs--;
DMAR_UNLOCK(dmar);
- dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
+ dmar_unmap_pgtbl(sf);
TD_PINNED_ASSERT;
return;
}
@@ -490,6 +492,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
*/
dmar_pte_clear(&ctxp->ctx1);
ctxp->ctx2 = 0;
+ dmar_flush_ctx_to_ram(dmar, ctxp);
dmar_inv_ctx_glob(dmar);
if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
if (dmar->qi_enabled)
@@ -507,7 +510,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task);
KASSERT(TAILQ_EMPTY(&ctx->unload_entries),
("unfinished unloads %p", ctx));
- dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(dmar));
+ dmar_unmap_pgtbl(sf);
free_unr(dmar->domids, ctx->domain);
dmar_ctx_dtr(ctx, true, true);
TD_PINNED_ASSERT;
diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h
index 0b68024..ae06149 100644
--- a/sys/x86/iommu/intel_dmar.h
+++ b/sys/x86/iommu/intel_dmar.h
@@ -230,11 +230,14 @@ struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
struct sf_buf **sf);
-void dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent);
+void dmar_unmap_pgtbl(struct sf_buf *sf);
int dmar_load_root_entry_ptr(struct dmar_unit *unit);
int dmar_inv_ctx_glob(struct dmar_unit *unit);
int dmar_inv_iotlb_glob(struct dmar_unit *unit);
int dmar_flush_write_bufs(struct dmar_unit *unit);
+void dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst);
+void dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst);
+void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
int dmar_enable_translation(struct dmar_unit *unit);
int dmar_disable_translation(struct dmar_unit *unit);
bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
diff --git a/sys/x86/iommu/intel_idpgtbl.c b/sys/x86/iommu/intel_idpgtbl.c
index b1a8c8f..8257974 100644
--- a/sys/x86/iommu/intel_idpgtbl.c
+++ b/sys/x86/iommu/intel_idpgtbl.c
@@ -146,7 +146,7 @@ ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
}
}
/* ctx_get_idmap_pgtbl flushes CPU cache if needed. */
- dmar_unmap_pgtbl(sf, true);
+ dmar_unmap_pgtbl(sf);
VM_OBJECT_WLOCK(tbl->pgtbl_obj);
}
@@ -361,7 +361,7 @@ ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags,
pte = (dmar_pte_t *)sf_buf_kva(*sf);
} else {
if (*sf != NULL)
- dmar_unmap_pgtbl(*sf, DMAR_IS_COHERENT(ctx->dmar));
+ dmar_unmap_pgtbl(*sf);
*idxp = idx;
retry:
pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf);
@@ -397,9 +397,10 @@ retry:
}
dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
VM_PAGE_TO_PHYS(m));
+ dmar_flush_pte_to_ram(ctx->dmar, ptep);
sf_buf_page(sfp)->wire_count += 1;
m->wire_count--;
- dmar_unmap_pgtbl(sfp, DMAR_IS_COHERENT(ctx->dmar));
+ dmar_unmap_pgtbl(sfp);
/* Only executed once. */
goto retry;
}
@@ -467,20 +468,19 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
if (pte == NULL) {
KASSERT((flags & DMAR_PGF_WAITOK) == 0,
("failed waitable pte alloc %p", ctx));
- if (sf != NULL) {
- dmar_unmap_pgtbl(sf,
- DMAR_IS_COHERENT(ctx->dmar));
- }
+ if (sf != NULL)
+ dmar_unmap_pgtbl(sf);
ctx_unmap_buf_locked(ctx, base1, base - base1, flags);
TD_PINNED_ASSERT;
return (ENOMEM);
}
dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
(superpage ? DMAR_PTE_SP : 0));
+ dmar_flush_pte_to_ram(ctx->dmar, pte);
sf_buf_page(sf)->wire_count += 1;
}
if (sf != NULL)
- dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar));
+ dmar_unmap_pgtbl(sf);
TD_PINNED_ASSERT;
return (0);
}
@@ -567,9 +567,10 @@ ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl,
vm_page_t m;
dmar_pte_clear(&pte->pte);
+ dmar_flush_pte_to_ram(ctx->dmar, pte);
m = sf_buf_page(*sf);
if (free_sf) {
- dmar_unmap_pgtbl(*sf, DMAR_IS_COHERENT(ctx->dmar));
+ dmar_unmap_pgtbl(*sf);
*sf = NULL;
}
m->wire_count--;
@@ -651,7 +652,7 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
(uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
}
if (sf != NULL)
- dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar));
+ dmar_unmap_pgtbl(sf);
/*
* See 11.1 Write Buffer Flushing for an explanation why RWBF
* can be ignored there.
diff --git a/sys/x86/iommu/intel_utils.c b/sys/x86/iommu/intel_utils.c
index d81ec04..ca9fe03 100644
--- a/sys/x86/iommu/intel_utils.c
+++ b/sys/x86/iommu/intel_utils.c
@@ -351,20 +351,46 @@ dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
}
void
-dmar_unmap_pgtbl(struct sf_buf *sf, bool coherent)
+dmar_unmap_pgtbl(struct sf_buf *sf)
{
- vm_page_t m;
- m = sf_buf_page(sf);
sf_buf_free(sf);
sched_unpin();
+}
+static void
+dmar_flush_transl_to_ram(struct dmar_unit *unit, void *dst, size_t sz)
+{
+
+ if (DMAR_IS_COHERENT(unit))
+ return;
/*
* If DMAR does not snoop paging structures accesses, flush
* CPU cache to memory.
*/
- if (!coherent)
- pmap_invalidate_cache_pages(&m, 1);
+ pmap_invalidate_cache_range((uintptr_t)dst, (uintptr_t)dst + sz,
+ TRUE);
+}
+
+void
+dmar_flush_pte_to_ram(struct dmar_unit *unit, dmar_pte_t *dst)
+{
+
+ dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
+}
+
+void
+dmar_flush_ctx_to_ram(struct dmar_unit *unit, dmar_ctx_entry_t *dst)
+{
+
+ dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
+}
+
+void
+dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst)
+{
+
+ dmar_flush_transl_to_ram(unit, dst, sizeof(*dst));
}
/*
OpenPOWER on IntegriCloud