summaryrefslogtreecommitdiffstats
path: root/sys/x86/iommu/intel_idpgtbl.c
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-11-01 17:38:52 +0000
committerkib <kib@FreeBSD.org>2013-11-01 17:38:52 +0000
commitf1a1b2ea9b8098bf85f36266353951f64ddb24cd (patch)
tree9ac7d666a6c2a948a053d0e6763dcca76710b5d6 /sys/x86/iommu/intel_idpgtbl.c
parent41ccfbfc303fd659a7b7ef58546e269376a6a55d (diff)
downloadFreeBSD-src-f1a1b2ea9b8098bf85f36266353951f64ddb24cd.zip
FreeBSD-src-f1a1b2ea9b8098bf85f36266353951f64ddb24cd.tar.gz
Add support for queued invalidation.
Right now, the semaphore write is scheduled after each batch, which is not optimal and must be tuned. Discussed with: alc Tested by: pho MFC after: 1 month
Diffstat (limited to 'sys/x86/iommu/intel_idpgtbl.c')
-rw-r--r--sys/x86/iommu/intel_idpgtbl.c77
1 files changed, 29 insertions, 48 deletions
diff --git a/sys/x86/iommu/intel_idpgtbl.c b/sys/x86/iommu/intel_idpgtbl.c
index a1773aa..f62b1c1 100644
--- a/sys/x86/iommu/intel_idpgtbl.c
+++ b/sys/x86/iommu/intel_idpgtbl.c
@@ -67,8 +67,6 @@ __FBSDID("$FreeBSD$");
static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
dmar_gaddr_t size, int flags);
-static void ctx_flush_iotlb(struct dmar_ctx *ctx, dmar_gaddr_t base,
- dmar_gaddr_t size, int flags);
/*
* The cache of the identity mapping page tables for the DMARs. Using
@@ -412,7 +410,6 @@ static int
ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
vm_page_t *ma, uint64_t pflags, int flags)
{
- struct dmar_unit *unit;
dmar_pte_t *pte;
struct sf_buf *sf;
dmar_gaddr_t pg_sz, base1, size1;
@@ -482,17 +479,6 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
}
if (sf != NULL)
dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar));
- DMAR_CTX_PGUNLOCK(ctx);
- unit = ctx->dmar;
- if ((unit->hw_cap & DMAR_CAP_CM) != 0)
- ctx_flush_iotlb(ctx, base1, size1, flags);
- else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
- /* See 11.1 Write Buffer Flushing. */
- DMAR_LOCK(unit);
- dmar_flush_write_bufs(unit);
- DMAR_UNLOCK(unit);
- }
-
TD_PINNED_ASSERT;
return (0);
}
@@ -501,6 +487,10 @@ int
ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
vm_page_t *ma, uint64_t pflags, int flags)
{
+ struct dmar_unit *unit;
+ int error;
+
+ unit = ctx->dmar;
KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
("modifying idmap pagetable ctx %p", ctx));
@@ -527,17 +517,30 @@ ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
DMAR_PTE_TM)) == 0,
("invalid pte flags %jx", (uintmax_t)pflags));
KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
- (ctx->dmar->hw_ecap & DMAR_ECAP_SC) != 0,
+ (unit->hw_ecap & DMAR_ECAP_SC) != 0,
("PTE_SNP for dmar without snoop control %p %jx",
ctx, (uintmax_t)pflags));
KASSERT((pflags & DMAR_PTE_TM) == 0 ||
- (ctx->dmar->hw_ecap & DMAR_ECAP_DI) != 0,
+ (unit->hw_ecap & DMAR_ECAP_DI) != 0,
("PTE_TM for dmar without DIOTLB %p %jx",
ctx, (uintmax_t)pflags));
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
DMAR_CTX_PGLOCK(ctx);
- return (ctx_map_buf_locked(ctx, base, size, ma, pflags, flags));
+ error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags);
+ DMAR_CTX_PGUNLOCK(ctx);
+ if (error != 0)
+ return (error);
+
+ if ((unit->hw_cap & DMAR_CAP_CM) != 0)
+ ctx_flush_iotlb_sync(ctx, base, size);
+ else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
+ /* See 11.1 Write Buffer Flushing. */
+ DMAR_LOCK(unit);
+ dmar_flush_write_bufs(unit);
+ DMAR_UNLOCK(unit);
+ }
+ return (0);
}
static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base,
@@ -646,8 +649,6 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
}
if (sf != NULL)
dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar));
- DMAR_CTX_PGUNLOCK(ctx);
- ctx_flush_iotlb(ctx, base1, size1, flags);
/*
* See 11.1 Write Buffer Flushing for an explanation why RWBF
* can be ignored there.
@@ -661,9 +662,12 @@ int
ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
int flags)
{
+ int error;
DMAR_CTX_PGLOCK(ctx);
- return (ctx_unmap_buf_locked(ctx, base, size, flags));
+ error = ctx_unmap_buf_locked(ctx, base, size, flags);
+ DMAR_CTX_PGUNLOCK(ctx);
+ return (error);
}
int
@@ -730,13 +734,8 @@ ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
return (iotlbr);
}
-/*
- * flags is only intended for PGF_WAITOK, to disallow queued
- * invalidation.
- */
-static void
-ctx_flush_iotlb(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
- int flags)
+void
+ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size)
{
struct dmar_unit *unit;
dmar_gaddr_t isize;
@@ -744,20 +743,8 @@ ctx_flush_iotlb(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
int am, iro;
unit = ctx->dmar;
-#if 0
- if ((unit->hw_ecap & DMAR_ECAP_QI) != 0 &&
- (flags & DMAR_PGF_WAITOK) != 0) {
- /*
- * XXXKIB: There, a queued invalidation interface
- * could be used. But since queued and registered
- * interfaces cannot be used simultaneously, and we
- * must use sleep-less (i.e. register) interface when
- * DMAR_PGF_WAITOK is not specified, only register
- * interface is suitable.
- */
- return;
- }
-#endif
+ KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
+ unit->unit));
iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
DMAR_LOCK(unit);
if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
@@ -769,13 +756,7 @@ ctx_flush_iotlb(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
(uintmax_t)iotlbr));
} else {
for (; size > 0; base += isize, size -= isize) {
- for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) {
- isize = 1ULL << (am + DMAR_PAGE_SHIFT);
- if ((base & (isize - 1)) == 0 && size >= isize)
- break;
- if (am == 0)
- break;
- }
+ am = calc_am(unit, base, size, &isize);
dmar_write8(unit, iro, base | am);
iotlbr = ctx_wait_iotlb_flush(unit,
DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain),
OpenPOWER on IntegriCloud