summaryrefslogtreecommitdiffstats
path: root/sys/x86
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2015-06-26 07:01:29 +0000
committerkib <kib@FreeBSD.org>2015-06-26 07:01:29 +0000
commit6b3dcf6ce0a202b704c8fb3d03951cc2cb909c67 (patch)
treedfe9131ecf9660d6f20cddc79cc1ac3c97e40d8c /sys/x86
parentbd7f5ebf0a3fb96ae6d9bcf47516400eb19cd01e (diff)
downloadFreeBSD-src-6b3dcf6ce0a202b704c8fb3d03951cc2cb909c67.zip
FreeBSD-src-6b3dcf6ce0a202b704c8fb3d03951cc2cb909c67.tar.gz
Split the DMAR unit domains and contexts. Domains carry address space
and related data structures. Contexts attach requests initiators to domains. There is still 1:1 correspondence between contexts and domains on the running system, since only busdma currently allocates them, using dmar_get_ctx_for_dev(). Large part of the change is formal rename of the ctx to domain, but patch also reworks the context allocation and free to allow for independent domain creation. The helper dmar_move_ctx_to_domain() is introduced for future use, to reassign request initiator from one domain to another. The hard issue which is not yet resolved with the context move is proper handling (or reserving) RMRR entries in the destination domain as required by ACPI DMAR table for moved context. Tested by: pho Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/x86')
-rw-r--r--sys/x86/iommu/busdma_dmar.c49
-rw-r--r--sys/x86/iommu/intel_ctx.c514
-rw-r--r--sys/x86/iommu/intel_dmar.h173
-rw-r--r--sys/x86/iommu/intel_drv.c111
-rw-r--r--sys/x86/iommu/intel_gas.c284
-rw-r--r--sys/x86/iommu/intel_idpgtbl.c275
-rw-r--r--sys/x86/iommu/intel_qi.c8
-rw-r--r--sys/x86/iommu/intel_utils.c48
8 files changed, 839 insertions, 623 deletions
diff --git a/sys/x86/iommu/busdma_dmar.c b/sys/x86/iommu/busdma_dmar.c
index 3ef8107..434ae82 100644
--- a/sys/x86/iommu/busdma_dmar.c
+++ b/sys/x86/iommu/busdma_dmar.c
@@ -225,7 +225,7 @@ dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester),
pci_get_bus(requester), pci_get_slot(requester),
pci_get_function(requester));
- ctx = dmar_get_ctx(dmar, requester, rid, disabled, rmrr);
+ ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr);
if (ctx == NULL)
return (NULL);
if (disabled) {
@@ -371,16 +371,18 @@ dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
{
struct bus_dma_tag_dmar *tag;
struct bus_dmamap_dmar *map;
+ struct dmar_domain *domain;
tag = (struct bus_dma_tag_dmar *)dmat;
map = (struct bus_dmamap_dmar *)map1;
if (map != NULL) {
- DMAR_CTX_LOCK(tag->ctx);
+ domain = tag->ctx->domain;
+ DMAR_DOMAIN_LOCK(domain);
if (!TAILQ_EMPTY(&map->map_entries)) {
- DMAR_CTX_UNLOCK(tag->ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
return (EBUSY);
}
- DMAR_CTX_UNLOCK(tag->ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
free(map, M_DMAR_DMAMAP);
}
tag->map_count--;
@@ -457,6 +459,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
struct dmar_map_entries_tailq *unroll_list)
{
struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
struct dmar_map_entry *entry;
dmar_gaddr_t size;
bus_size_t buflen1;
@@ -466,6 +469,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
if (segs == NULL)
segs = tag->segments;
ctx = tag->ctx;
+ domain = ctx->domain;
seg = *segp;
error = 0;
idx = 0;
@@ -487,7 +491,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
if (seg + 1 < tag->common.nsegments)
gas_flags |= DMAR_GM_CANSPLIT;
- error = dmar_gas_map(ctx, &tag->common, size, offset,
+ error = dmar_gas_map(domain, &tag->common, size, offset,
DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
gas_flags, ma + idx, &entry);
if (error != 0)
@@ -534,10 +538,10 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
- DMAR_CTX_LOCK(ctx);
+ DMAR_DOMAIN_LOCK(domain);
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
entry->flags |= DMAR_MAP_ENTRY_MAP;
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
segs[seg].ds_addr = entry->start + offset;
@@ -559,11 +563,13 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
int flags, bus_dma_segment_t *segs, int *segp)
{
struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
struct dmar_map_entry *entry, *entry1;
struct dmar_map_entries_tailq unroll_list;
int error;
ctx = tag->ctx;
+ domain = ctx->domain;
atomic_add_long(&ctx->loads, 1);
TAILQ_INIT(&unroll_list);
@@ -575,7 +581,7 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
* partial buffer load, so unfortunately we have to
* revert all work done.
*/
- DMAR_CTX_LOCK(ctx);
+ DMAR_DOMAIN_LOCK(domain);
TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
entry1) {
/*
@@ -586,19 +592,19 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
*/
TAILQ_REMOVE(&map->map_entries, entry, dmamap_link);
TAILQ_REMOVE(&unroll_list, entry, unroll_link);
- TAILQ_INSERT_TAIL(&ctx->unload_entries, entry,
+ TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
dmamap_link);
}
- DMAR_CTX_UNLOCK(ctx);
- taskqueue_enqueue(ctx->dmar->delayed_taskqueue,
- &ctx->unload_task);
+ DMAR_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->dmar->delayed_taskqueue,
+ &domain->unload_task);
}
if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 &&
!map->cansleep)
error = EINPROGRESS;
if (error == EINPROGRESS)
- dmar_bus_schedule_dmamap(ctx->dmar, map);
+ dmar_bus_schedule_dmamap(domain->dmar, map);
return (error);
}
@@ -764,6 +770,7 @@ dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
struct bus_dma_tag_dmar *tag;
struct bus_dmamap_dmar *map;
struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
#if defined(__amd64__)
struct dmar_map_entries_tailq entries;
#endif
@@ -771,20 +778,22 @@ dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
tag = (struct bus_dma_tag_dmar *)dmat;
map = (struct bus_dmamap_dmar *)map1;
ctx = tag->ctx;
+ domain = ctx->domain;
atomic_add_long(&ctx->unloads, 1);
#if defined(__i386__)
- DMAR_CTX_LOCK(ctx);
- TAILQ_CONCAT(&ctx->unload_entries, &map->map_entries, dmamap_link);
- DMAR_CTX_UNLOCK(ctx);
- taskqueue_enqueue(ctx->dmar->delayed_taskqueue, &ctx->unload_task);
+ DMAR_DOMAIN_LOCK(domain);
+ TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
+ DMAR_DOMAIN_UNLOCK(domain);
+ taskqueue_enqueue(domain->dmar->delayed_taskqueue,
+ &domain->unload_task);
#else /* defined(__amd64__) */
TAILQ_INIT(&entries);
- DMAR_CTX_LOCK(ctx);
+ DMAR_DOMAIN_LOCK(domain);
TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
THREAD_NO_SLEEPING();
- dmar_ctx_unload(ctx, &entries, false);
+ dmar_domain_unload(domain, &entries, false);
THREAD_SLEEPING_OK();
KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
#endif
diff --git a/sys/x86/iommu/intel_ctx.c b/sys/x86/iommu/intel_ctx.c
index a18adcf..402612b 100644
--- a/sys/x86/iommu/intel_ctx.c
+++ b/sys/x86/iommu/intel_ctx.c
@@ -68,8 +68,13 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pcivar.h>
static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context");
+static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain");
-static void dmar_ctx_unload_task(void *arg, int pending);
+static void dmar_domain_unload_task(void *arg, int pending);
+static void dmar_unref_domain_locked(struct dmar_unit *dmar,
+ struct dmar_domain *domain);
+static void dmar_domain_destroy(struct dmar_domain *domain);
+static void dmar_ctx_dtr(struct dmar_ctx *ctx);
static void
dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
@@ -108,8 +113,8 @@ dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
{
dmar_ctx_entry_t *ctxp;
- ctxp = dmar_map_pgtbl(ctx->dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->rid),
- DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
+ ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 +
+ PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
ctxp += ctx->rid & 0xff;
return (ctxp);
}
@@ -119,7 +124,7 @@ ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
{
bus_addr_t maxaddr;
- maxaddr = MIN(ctx->end, BUS_SPACE_MAXADDR);
+ maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY;
@@ -130,33 +135,42 @@ ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
ctx->ctx_tag.common.maxsegsz = maxaddr;
ctx->ctx_tag.ctx = ctx;
ctx->ctx_tag.owner = dev;
- /* XXXKIB initialize tag further */
}
static void
-ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp)
+ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move)
{
struct dmar_unit *unit;
+ struct dmar_domain *domain;
vm_page_t ctx_root;
- unit = ctx->dmar;
- KASSERT(ctxp->ctx1 == 0 && ctxp->ctx2 == 0,
+ domain = ctx->domain;
+ unit = domain->dmar;
+ KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
unit->unit, pci_get_bus(ctx->ctx_tag.owner),
pci_get_slot(ctx->ctx_tag.owner),
pci_get_function(ctx->ctx_tag.owner),
- ctxp->ctx1,
- ctxp->ctx2));
- ctxp->ctx2 = DMAR_CTX2_DID(ctx->domain);
- ctxp->ctx2 |= ctx->awlvl;
- if ((ctx->flags & DMAR_CTX_IDMAP) != 0 &&
+ ctxp->ctx1, ctxp->ctx2));
+ /*
+ * For update due to move, the store is not atomic. It is
+ * possible that DMAR read upper doubleword, while low
+ * doubleword is not yet updated. The domain id is stored in
+ * the upper doubleword, while the table pointer in the lower.
+ *
+ * There is no good solution, for the same reason it is wrong
+ * to clear P bit in the ctx entry for update.
+ */
+ dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) |
+ domain->awlvl);
+ if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 &&
(unit->hw_ecap & DMAR_ECAP_PT) != 0) {
- KASSERT(ctx->pgtbl_obj == NULL,
+ KASSERT(domain->pgtbl_obj == NULL,
("ctx %p non-null pgtbl_obj", ctx));
- dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
+ dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P);
} else {
- ctx_root = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_NOALLOC);
- dmar_pte_store(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
+ ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC);
+ dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR |
(DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) |
DMAR_CTX1_P);
}
@@ -164,7 +178,31 @@ ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp)
}
static int
-ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
+dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force)
+{
+ int error;
+
+ /*
+ * If dmar declares Caching Mode as Set, follow 11.5 "Caching
+ * Mode Consideration" and do the (global) invalidation of the
+ * negative TLB entries.
+ */
+ if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force)
+ return (0);
+ if (dmar->qi_enabled) {
+ dmar_qi_invalidate_ctx_glob_locked(dmar);
+ if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)
+ dmar_qi_invalidate_iotlb_glob_locked(dmar);
+ return (0);
+ }
+ error = dmar_inv_ctx_glob(dmar);
+ if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force))
+ error = dmar_inv_iotlb_glob(dmar);
+ return (error);
+}
+
+static int
+domain_init_rmrr(struct dmar_domain *domain, device_t dev)
{
struct dmar_map_entries_tailq rmrr_entries;
struct dmar_map_entry *entry, *entry1;
@@ -175,7 +213,7 @@ ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
error = 0;
TAILQ_INIT(&rmrr_entries);
- dmar_ctx_parse_rmrr(ctx, dev, &rmrr_entries);
+ dmar_dev_parse_rmrr(domain, dev, &rmrr_entries);
TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) {
/*
* VT-d specification requires that the start of an
@@ -195,7 +233,7 @@ ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
if (bootverbose) {
device_printf(dev, "BIOS bug: dmar%d RMRR "
"region (%jx, %jx) corrected\n",
- ctx->dmar->unit, start, end);
+ domain->dmar->unit, start, end);
}
entry->end += DMAR_PAGE_SIZE * 0x20;
}
@@ -205,8 +243,9 @@ ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
- error1 = dmar_gas_map_region(ctx, entry, DMAR_MAP_ENTRY_READ |
- DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma);
+ error1 = dmar_gas_map_region(domain, entry,
+ DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
+ DMAR_GM_CANWAIT, ma);
/*
* Non-failed RMRR entries are owned by context rb
* tree. Get rid of the failed entry, but do not stop
@@ -214,18 +253,19 @@ ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
* loaded and removed on the context destruction.
*/
if (error1 == 0 && entry->end != entry->start) {
- DMAR_LOCK(ctx->dmar);
- ctx->flags |= DMAR_CTX_RMRR;
- DMAR_UNLOCK(ctx->dmar);
+ DMAR_LOCK(domain->dmar);
+ domain->refs++; /* XXXKIB prevent free */
+ domain->flags |= DMAR_DOMAIN_RMRR;
+ DMAR_UNLOCK(domain->dmar);
} else {
if (error1 != 0) {
device_printf(dev,
"dmar%d failed to map RMRR region (%jx, %jx) %d\n",
- ctx->dmar->unit, start, end, error1);
+ domain->dmar->unit, start, end, error1);
error = error1;
}
TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
- dmar_gas_free_entry(ctx, entry);
+ dmar_gas_free_entry(domain, entry);
}
for (i = 0; i < size; i++)
vm_page_putfake(ma[i]);
@@ -234,47 +274,144 @@ ctx_init_rmrr(struct dmar_ctx *ctx, device_t dev)
return (error);
}
+static struct dmar_domain *
+dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
+{
+ struct dmar_domain *domain;
+ int error, id, mgaw;
+
+ id = alloc_unr(dmar->domids);
+ if (id == -1)
+ return (NULL);
+ domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
+ domain->domain = id;
+ LIST_INIT(&domain->contexts);
+ RB_INIT(&domain->rb_root);
+ TAILQ_INIT(&domain->unload_entries);
+ TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain);
+ mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF);
+ domain->dmar = dmar;
+
+ /*
+ * For now, use the maximal usable physical address of the
+ * installed memory to calculate the mgaw on id_mapped domain.
+ * It is useful for the identity mapping, and less so for the
+ * virtualized bus address space.
+ */
+ domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR;
+ mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped);
+ error = domain_set_agaw(domain, mgaw);
+ if (error != 0)
+ goto fail;
+ if (!id_mapped)
+ /* Use all supported address space for remapping. */
+ domain->end = 1ULL << (domain->agaw - 1);
+
+ dmar_gas_init_domain(domain);
+
+ if (id_mapped) {
+ if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
+ domain->pgtbl_obj = domain_get_idmap_pgtbl(domain,
+ domain->end);
+ }
+ domain->flags |= DMAR_DOMAIN_IDMAP;
+ } else {
+ error = domain_alloc_pgtbl(domain);
+ if (error != 0)
+ goto fail;
+ /* Disable local apic region access */
+ error = dmar_gas_reserve_region(domain, 0xfee00000,
+ 0xfeefffff + 1);
+ if (error != 0)
+ goto fail;
+ }
+ return (domain);
+
+fail:
+ dmar_domain_destroy(domain);
+ return (NULL);
+}
+
static struct dmar_ctx *
-dmar_get_ctx_alloc(struct dmar_unit *dmar, uint16_t rid)
+dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
{
struct dmar_ctx *ctx;
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
- RB_INIT(&ctx->rb_root);
- TAILQ_INIT(&ctx->unload_entries);
- TASK_INIT(&ctx->unload_task, 0, dmar_ctx_unload_task, ctx);
- mtx_init(&ctx->lock, "dmarctx", NULL, MTX_DEF);
- ctx->dmar = dmar;
+ ctx->domain = domain;
ctx->rid = rid;
+ ctx->refs = 1;
return (ctx);
}
static void
-dmar_ctx_dtr(struct dmar_ctx *ctx, bool gas_inited, bool pgtbl_inited)
+dmar_ctx_link(struct dmar_ctx *ctx)
{
+ struct dmar_domain *domain;
+
+ domain = ctx->domain;
+ DMAR_ASSERT_LOCKED(domain->dmar);
+ KASSERT(domain->refs >= domain->ctx_cnt,
+ ("dom %p ref underflow %d %d", domain, domain->refs,
+ domain->ctx_cnt));
+ domain->refs++;
+ domain->ctx_cnt++;
+ LIST_INSERT_HEAD(&domain->contexts, ctx, link);
+}
+
+static void
+dmar_ctx_unlink(struct dmar_ctx *ctx)
+{
+ struct dmar_domain *domain;
+
+ domain = ctx->domain;
+ DMAR_ASSERT_LOCKED(domain->dmar);
+ KASSERT(domain->refs > 0,
+ ("domain %p ctx dtr refs %d", domain, domain->refs));
+ KASSERT(domain->ctx_cnt >= domain->refs,
+ ("domain %p ctx dtr refs %d ctx_cnt %d", domain,
+ domain->refs, domain->ctx_cnt));
+ domain->refs--;
+ domain->ctx_cnt--;
+ LIST_REMOVE(ctx, link);
+}
- if (gas_inited) {
- DMAR_CTX_LOCK(ctx);
- dmar_gas_fini_ctx(ctx);
- DMAR_CTX_UNLOCK(ctx);
+static void
+dmar_domain_destroy(struct dmar_domain *domain)
+{
+
+ KASSERT(TAILQ_EMPTY(&domain->unload_entries),
+ ("unfinished unloads %p", domain));
+ KASSERT(LIST_EMPTY(&domain->contexts),
+ ("destroying dom %p with contexts", domain));
+ KASSERT(domain->ctx_cnt == 0,
+ ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
+ KASSERT(domain->refs == 0,
+ ("destroying dom %p with refs %d", domain, domain->refs));
+ if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) {
+ DMAR_DOMAIN_LOCK(domain);
+ dmar_gas_fini_domain(domain);
+ DMAR_DOMAIN_UNLOCK(domain);
}
- if (pgtbl_inited) {
- if (ctx->pgtbl_obj != NULL)
- DMAR_CTX_PGLOCK(ctx);
- ctx_free_pgtbl(ctx);
+ if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) {
+ if (domain->pgtbl_obj != NULL)
+ DMAR_DOMAIN_PGLOCK(domain);
+ domain_free_pgtbl(domain);
}
- mtx_destroy(&ctx->lock);
- free(ctx, M_DMAR_CTX);
+ mtx_destroy(&domain->lock);
+ free_unr(domain->dmar->domids, domain->domain);
+ free(domain, M_DMAR_DOMAIN);
}
struct dmar_ctx *
-dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped,
- bool rmrr_init)
+dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid,
+ bool id_mapped, bool rmrr_init)
{
+ struct dmar_domain *domain, *domain1;
struct dmar_ctx *ctx, *ctx1;
dmar_ctx_entry_t *ctxp;
struct sf_buf *sf;
- int bus, slot, func, error, mgaw;
+ int bus, slot, func, error;
bool enable;
bus = pci_get_bus(dev);
@@ -292,67 +429,18 @@ dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped,
*/
DMAR_UNLOCK(dmar);
dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid));
- ctx1 = dmar_get_ctx_alloc(dmar, rid);
-
- if (id_mapped) {
- /*
- * For now, use the maximal usable physical
- * address of the installed memory to
- * calculate the mgaw. It is useful for the
- * identity mapping, and less so for the
- * virtualized bus address space.
- */
- ctx1->end = ptoa(Maxmem);
- mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, false);
- error = ctx_set_agaw(ctx1, mgaw);
- if (error != 0) {
- dmar_ctx_dtr(ctx1, false, false);
- TD_PINNED_ASSERT;
- return (NULL);
- }
- } else {
- ctx1->end = BUS_SPACE_MAXADDR;
- mgaw = dmar_maxaddr2mgaw(dmar, ctx1->end, true);
- error = ctx_set_agaw(ctx1, mgaw);
- if (error != 0) {
- dmar_ctx_dtr(ctx1, false, false);
- TD_PINNED_ASSERT;
- return (NULL);
- }
- /* Use all supported address space for remapping. */
- ctx1->end = 1ULL << (ctx1->agaw - 1);
+ domain1 = dmar_domain_alloc(dmar, id_mapped);
+ if (domain1 == NULL) {
+ TD_PINNED_ASSERT;
+ return (NULL);
}
-
-
- dmar_gas_init_ctx(ctx1);
- if (id_mapped) {
- if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
- ctx1->pgtbl_obj = ctx_get_idmap_pgtbl(ctx1,
- ctx1->end);
- }
- ctx1->flags |= DMAR_CTX_IDMAP;
- } else {
- error = ctx_alloc_pgtbl(ctx1);
- if (error != 0) {
- dmar_ctx_dtr(ctx1, true, false);
- TD_PINNED_ASSERT;
- return (NULL);
- }
- /* Disable local apic region access */
- error = dmar_gas_reserve_region(ctx1, 0xfee00000,
- 0xfeefffff + 1);
- if (error != 0) {
- dmar_ctx_dtr(ctx1, true, true);
- TD_PINNED_ASSERT;
- return (NULL);
- }
- error = ctx_init_rmrr(ctx1, dev);
- if (error != 0) {
- dmar_ctx_dtr(ctx1, true, true);
- TD_PINNED_ASSERT;
- return (NULL);
- }
+ error = domain_init_rmrr(domain1, dev);
+ if (error != 0) {
+ dmar_domain_destroy(domain1);
+ TD_PINNED_ASSERT;
+ return (NULL);
}
+ ctx1 = dmar_ctx_alloc(domain1, rid);
ctxp = dmar_map_ctx_entry(ctx1, &sf);
DMAR_LOCK(dmar);
@@ -362,16 +450,10 @@ dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped,
*/
ctx = dmar_find_ctx_locked(dmar, rid);
if (ctx == NULL) {
+ domain = domain1;
ctx = ctx1;
+ dmar_ctx_link(ctx);
ctx->ctx_tag.owner = dev;
- ctx->domain = alloc_unrl(dmar->domids);
- if (ctx->domain == -1) {
- DMAR_UNLOCK(dmar);
- dmar_unmap_pgtbl(sf);
- dmar_ctx_dtr(ctx, true, true);
- TD_PINNED_ASSERT;
- return (NULL);
- }
ctx_tag_init(ctx, dev);
/*
@@ -379,46 +461,33 @@ dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped,
* DMAR unit. Enable the translation after
* everything is set up.
*/
- if (LIST_EMPTY(&dmar->contexts))
+ if (LIST_EMPTY(&dmar->domains))
enable = true;
- LIST_INSERT_HEAD(&dmar->contexts, ctx, link);
- ctx_id_entry_init(ctx, ctxp);
+ LIST_INSERT_HEAD(&dmar->domains, domain, link);
+ ctx_id_entry_init(ctx, ctxp, false);
device_printf(dev,
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
"agaw %d %s-mapped\n",
dmar->unit, dmar->segment, bus, slot,
- func, rid, ctx->domain, ctx->mgaw, ctx->agaw,
- id_mapped ? "id" : "re");
+ func, rid, domain->domain, domain->mgaw,
+ domain->agaw, id_mapped ? "id" : "re");
} else {
- dmar_ctx_dtr(ctx1, true, true);
+ /* Nothing needs to be done to destroy ctx1. */
+ dmar_domain_destroy(domain1);
+ domain = ctx->domain;
+ ctx->refs++; /* tag referenced us */
}
dmar_unmap_pgtbl(sf);
+ } else {
+ domain = ctx->domain;
+ ctx->refs++; /* tag referenced us */
}
- ctx->refs++;
- if ((ctx->flags & DMAR_CTX_RMRR) != 0)
- ctx->refs++; /* XXXKIB */
- /*
- * If dmar declares Caching Mode as Set, follow 11.5 "Caching
- * Mode Consideration" and do the (global) invalidation of the
- * negative TLB entries.
- */
- if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) {
- if (dmar->qi_enabled) {
- dmar_qi_invalidate_ctx_glob_locked(dmar);
- if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0)
- dmar_qi_invalidate_iotlb_glob_locked(dmar);
- } else {
- error = dmar_inv_ctx_glob(dmar);
- if (error == 0 &&
- (dmar->hw_ecap & DMAR_ECAP_DI) != 0)
- error = dmar_inv_iotlb_glob(dmar);
- if (error != 0) {
- dmar_free_ctx_locked(dmar, ctx);
- TD_PINNED_ASSERT;
- return (NULL);
- }
- }
+ error = dmar_flush_for_ctx_entry(dmar, enable);
+ if (error != 0) {
+ dmar_free_ctx_locked(dmar, ctx);
+ TD_PINNED_ASSERT;
+ return (NULL);
}
/*
@@ -439,11 +508,74 @@ dmar_get_ctx(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped,
return (ctx);
}
+int
+dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
+{
+ struct dmar_unit *dmar;
+ struct dmar_domain *old_domain;
+ dmar_ctx_entry_t *ctxp;
+ struct sf_buf *sf;
+ int error;
+
+ dmar = domain->dmar;
+ old_domain = ctx->domain;
+ if (domain == old_domain)
+ return (0);
+ KASSERT(old_domain->dmar == dmar,
+ ("domain %p %u moving between dmars %u %u", domain,
+ domain->domain, old_domain->dmar->unit, domain->dmar->unit));
+ TD_PREP_PINNED_ASSERT;
+
+ ctxp = dmar_map_ctx_entry(ctx, &sf);
+ DMAR_LOCK(dmar);
+ dmar_ctx_unlink(ctx);
+ ctx->domain = domain;
+ dmar_ctx_link(ctx);
+ ctx_id_entry_init(ctx, ctxp, true);
+ dmar_unmap_pgtbl(sf);
+ error = dmar_flush_for_ctx_entry(dmar, true);
+ /* If flush failed, rolling back would not work as well. */
+ printf("dmar%d rid %x domain %d->%d %s-mapped\n",
+ dmar->unit, ctx->rid, old_domain->domain, domain->domain,
+ (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re");
+ dmar_unref_domain_locked(dmar, old_domain);
+ TD_PINNED_ASSERT;
+ return (error);
+}
+
+static void
+dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
+{
+
+ DMAR_ASSERT_LOCKED(dmar);
+ KASSERT(domain->refs >= 1,
+ ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs));
+ KASSERT(domain->refs > domain->ctx_cnt,
+ ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain,
+ domain->refs, domain->ctx_cnt));
+
+ if (domain->refs > 1) {
+ domain->refs--;
+ DMAR_UNLOCK(dmar);
+ return;
+ }
+
+ KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0,
+ ("lost ref on RMRR domain %p", domain));
+
+ LIST_REMOVE(domain, link);
+ DMAR_UNLOCK(dmar);
+
+ taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task);
+ dmar_domain_destroy(domain);
+}
+
void
dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
{
struct sf_buf *sf;
dmar_ctx_entry_t *ctxp;
+ struct dmar_domain *domain;
DMAR_ASSERT_LOCKED(dmar);
KASSERT(ctx->refs >= 1,
@@ -459,8 +591,6 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
return;
}
- KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0,
- ("lost ref on RMRR ctx %p", ctx));
KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
@@ -488,8 +618,6 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
return;
}
- KASSERT((ctx->flags & DMAR_CTX_RMRR) == 0,
- ("lost ref on RMRR ctx %p", ctx));
KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
("lost ref on disabled ctx %p", ctx));
@@ -507,19 +635,11 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
else
dmar_inv_iotlb_glob(dmar);
}
- LIST_REMOVE(ctx, link);
- DMAR_UNLOCK(dmar);
-
- /*
- * The rest of the destruction is invisible for other users of
- * the dmar unit.
- */
- taskqueue_drain(dmar->delayed_taskqueue, &ctx->unload_task);
- KASSERT(TAILQ_EMPTY(&ctx->unload_entries),
- ("unfinished unloads %p", ctx));
dmar_unmap_pgtbl(sf);
- free_unr(dmar->domids, ctx->domain);
- dmar_ctx_dtr(ctx, true, true);
+ domain = ctx->domain;
+ dmar_ctx_unlink(ctx);
+ free(ctx, M_DMAR_CTX);
+ dmar_unref_domain_locked(dmar, domain);
TD_PINNED_ASSERT;
}
@@ -528,86 +648,92 @@ dmar_free_ctx(struct dmar_ctx *ctx)
{
struct dmar_unit *dmar;
- dmar = ctx->dmar;
+ dmar = ctx->domain->dmar;
DMAR_LOCK(dmar);
dmar_free_ctx_locked(dmar, ctx);
}
+/*
+ * Returns with the domain locked.
+ */
struct dmar_ctx *
dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
{
+ struct dmar_domain *domain;
struct dmar_ctx *ctx;
DMAR_ASSERT_LOCKED(dmar);
- LIST_FOREACH(ctx, &dmar->contexts, link) {
- if (ctx->rid == rid)
- return (ctx);
+ LIST_FOREACH(domain, &dmar->domains, link) {
+ LIST_FOREACH(ctx, &domain->contexts, link) {
+ if (ctx->rid == rid)
+ return (ctx);
+ }
}
return (NULL);
}
void
-dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free)
+dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
{
- struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
- ctx = entry->ctx;
- DMAR_CTX_LOCK(ctx);
+ domain = entry->domain;
+ DMAR_DOMAIN_LOCK(domain);
if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
- dmar_gas_free_region(ctx, entry);
+ dmar_gas_free_region(domain, entry);
else
- dmar_gas_free_space(ctx, entry);
- DMAR_CTX_UNLOCK(ctx);
+ dmar_gas_free_space(domain, entry);
+ DMAR_DOMAIN_UNLOCK(domain);
if (free)
- dmar_gas_free_entry(ctx, entry);
+ dmar_gas_free_entry(domain, entry);
else
entry->flags = 0;
}
void
-dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free)
+dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free)
{
struct dmar_unit *unit;
- unit = entry->ctx->dmar;
+ unit = entry->domain->dmar;
if (unit->qi_enabled) {
DMAR_LOCK(unit);
- dmar_qi_invalidate_locked(entry->ctx, entry->start,
+ dmar_qi_invalidate_locked(entry->domain, entry->start,
entry->end - entry->start, &entry->gseq);
if (!free)
entry->flags |= DMAR_MAP_ENTRY_QI_NF;
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
} else {
- ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end -
- entry->start);
- dmar_ctx_free_entry(entry, free);
+ domain_flush_iotlb_sync(entry->domain, entry->start,
+ entry->end - entry->start);
+ dmar_domain_free_entry(entry, free);
}
}
void
-dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries,
- bool cansleep)
+dmar_domain_unload(struct dmar_domain *domain,
+ struct dmar_map_entries_tailq *entries, bool cansleep)
{
struct dmar_unit *unit;
struct dmar_map_entry *entry, *entry1;
struct dmar_qi_genseq gseq;
int error;
- unit = ctx->dmar;
+ unit = domain->dmar;
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
- ("not mapped entry %p %p", ctx, entry));
- error = ctx_unmap_buf(ctx, entry->start, entry->end -
+ ("not mapped entry %p %p", domain, entry));
+ error = domain_unmap_buf(domain, entry->start, entry->end -
entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
- KASSERT(error == 0, ("unmap %p error %d", ctx, error));
+ KASSERT(error == 0, ("unmap %p error %d", domain, error));
if (!unit->qi_enabled) {
- ctx_flush_iotlb_sync(ctx, entry->start,
+ domain_flush_iotlb_sync(domain, entry->start,
entry->end - entry->start);
TAILQ_REMOVE(entries, entry, dmamap_link);
- dmar_ctx_free_entry(entry, true);
+ dmar_domain_free_entry(entry, true);
}
}
if (TAILQ_EMPTY(entries))
@@ -618,7 +744,7 @@ dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries,
TAILQ_FOREACH(entry, entries, dmamap_link) {
entry->gseq.gen = 0;
entry->gseq.seq = 0;
- dmar_qi_invalidate_locked(ctx, entry->start, entry->end -
+ dmar_qi_invalidate_locked(domain, entry->start, entry->end -
entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ?
&gseq : NULL);
}
@@ -631,21 +757,21 @@ dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries,
}
static void
-dmar_ctx_unload_task(void *arg, int pending)
+dmar_domain_unload_task(void *arg, int pending)
{
- struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
struct dmar_map_entries_tailq entries;
- ctx = arg;
+ domain = arg;
TAILQ_INIT(&entries);
for (;;) {
- DMAR_CTX_LOCK(ctx);
- TAILQ_SWAP(&ctx->unload_entries, &entries, dmar_map_entry,
+ DMAR_DOMAIN_LOCK(domain);
+ TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry,
dmamap_link);
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
if (TAILQ_EMPTY(&entries))
break;
- dmar_ctx_unload(ctx, &entries, true);
+ dmar_domain_unload(domain, &entries, true);
}
}
diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h
index 401ff2f..d1945a1 100644
--- a/sys/x86/iommu/intel_dmar.h
+++ b/sys/x86/iommu/intel_dmar.h
@@ -50,10 +50,10 @@ struct dmar_map_entry {
current R/B tree node */
u_int flags;
TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
- RB_ENTRY(dmar_map_entry) rb_entry; /* Links for ctx entries */
+ RB_ENTRY(dmar_map_entry) rb_entry; /* Links for domain entries */
TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
dmamap_load failure */
- struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
struct dmar_qi_genseq gseq;
};
@@ -73,51 +73,84 @@ RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
#define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
#define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */
+/*
+ * Locking annotations:
+ * (u) - Protected by dmar unit lock
+ * (d) - Protected by domain lock
+ * (c) - Immutable after initialization
+ */
+
+/*
+ * The domain abstraction. Most non-constant members of the domain
+ * are locked by the owning dmar unit lock, not by the domain lock.
+ * Most important, dmar lock protects the contexts list.
+ *
+ * The domain lock protects the address map for the domain, and list
+ * of unload entries delayed.
+ *
+ * Page tables pages and pages content is protected by the vm object
+ * lock pgtbl_obj, which contains the page tables pages.
+ */
+struct dmar_domain {
+ int domain; /* (c) DID, written in context entry */
+ int mgaw; /* (c) Real max address width */
+ int agaw; /* (c) Adjusted guest address width */
+ int pglvl; /* (c) The pagelevel */
+ int awlvl; /* (c) The pagelevel as the bitmask,
+ to set in context entry */
+ dmar_gaddr_t end; /* (c) Highest address + 1 in
+ the guest AS */
+ u_int ctx_cnt; /* (u) Number of contexts owned */
+ u_int refs; /* (u) Refs, including ctx */
+ struct dmar_unit *dmar; /* (c) */
+ struct mtx lock; /* (c) */
+ LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
+ LIST_HEAD(, dmar_ctx) contexts; /* (u) */
+ vm_object_t pgtbl_obj; /* (c) Page table pages */
+ u_int flags; /* (u) */
+ u_int entries_cnt; /* (d) */
+ struct dmar_gas_entries_tree rb_root; /* (d) */
+ struct dmar_map_entries_tailq unload_entries; /* (d) Entries to
+ unload */
+ struct dmar_map_entry *first_place, *last_place; /* (d) */
+ struct task unload_task; /* (c) */
+};
+
struct dmar_ctx {
- uint16_t rid; /* pci RID */
- int domain; /* DID */
- int mgaw; /* Real max address width */
- int agaw; /* Adjusted guest address width */
- int pglvl; /* The pagelevel */
- int awlvl; /* The pagelevel as the bitmask, to set in
- context entry */
- dmar_gaddr_t end;/* Highest address + 1 in the guest AS */
- u_int refs; /* References to the context, from tags */
- struct dmar_unit *dmar;
- struct bus_dma_tag_dmar ctx_tag; /* Root tag */
- struct mtx lock;
- LIST_ENTRY(dmar_ctx) link; /* Member in the dmar list */
- vm_object_t pgtbl_obj; /* Page table pages */
- u_int flags; /* Protected by dmar lock */
+ struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */
+ uint16_t rid; /* (c) pci RID */
uint64_t last_fault_rec[2]; /* Last fault reported */
- u_int entries_cnt;
- u_long loads;
- u_long unloads;
- struct dmar_gas_entries_tree rb_root;
- struct dmar_map_entries_tailq unload_entries; /* Entries to unload */
- struct dmar_map_entry *first_place, *last_place;
- struct task unload_task;
+ struct dmar_domain *domain; /* (c) */
+ LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
+ u_int refs; /* (u) References from tags */
+ u_int flags; /* (u) */
+ u_long loads; /* atomic updates, for stat only */
+ u_long unloads; /* same */
};
+#define DMAR_DOMAIN_GAS_INITED 0x0001
+#define DMAR_DOMAIN_PGTBL_INITED 0x0002
+#define DMAR_DOMAIN_IDMAP 0x0010 /* Domain uses identity
+ page table */
+#define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
+ cannot be turned off */
+
/* struct dmar_ctx flags */
#define DMAR_CTX_FAULTED 0x0001 /* Fault was reported,
last_fault_rec is valid */
-#define DMAR_CTX_IDMAP 0x0002 /* Context uses identity page table */
-#define DMAR_CTX_RMRR 0x0004 /* Context contains RMRR entry,
- cannot be turned off */
-#define DMAR_CTX_DISABLED 0x0008 /* Device is disabled, the
+#define DMAR_CTX_DISABLED 0x0002 /* Device is disabled, the
ephemeral reference is kept
to prevent context destruction */
-#define DMAR_CTX_PGLOCK(ctx) VM_OBJECT_WLOCK((ctx)->pgtbl_obj)
-#define DMAR_CTX_PGTRYLOCK(ctx) VM_OBJECT_TRYWLOCK((ctx)->pgtbl_obj)
-#define DMAR_CTX_PGUNLOCK(ctx) VM_OBJECT_WUNLOCK((ctx)->pgtbl_obj)
-#define DMAR_CTX_ASSERT_PGLOCKED(ctx) \
- VM_OBJECT_ASSERT_WLOCKED((ctx)->pgtbl_obj)
+#define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
+#define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
+#define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
+#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
+ VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
-#define DMAR_CTX_LOCK(ctx) mtx_lock(&(ctx)->lock)
-#define DMAR_CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->lock)
-#define DMAR_CTX_ASSERT_LOCKED(ctx) mtx_assert(&(ctx)->lock, MA_OWNED)
+#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
+#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
+#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
struct dmar_msi_data {
int irq;
@@ -157,7 +190,7 @@ struct dmar_unit {
/* Data for being a dmar */
struct mtx lock;
- LIST_HEAD(, dmar_ctx) contexts;
+ LIST_HEAD(, dmar_domain) domains;
struct unrhdr *domids;
vm_object_t ctx_obj;
u_int barrier_flags;
@@ -228,13 +261,13 @@ struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
u_int dmar_nd2mask(u_int nd);
bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
-int ctx_set_agaw(struct dmar_ctx *ctx, int mgaw);
-int dmar_maxaddr2mgaw(struct dmar_unit* unit, dmar_gaddr_t maxaddr,
+int domain_set_agaw(struct dmar_domain *domain, int mgaw);
+int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr,
bool allow_less);
vm_pindex_t pglvl_max_pages(int pglvl);
-int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl);
+int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
-dmar_gaddr_t ctx_page_size(struct dmar_ctx *ctx, int lvl);
+dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
dmar_gaddr_t *isizep);
struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
@@ -268,55 +301,61 @@ void dmar_enable_qi_intr(struct dmar_unit *unit);
void dmar_disable_qi_intr(struct dmar_unit *unit);
int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
-void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start,
+void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
-vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr);
+vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
+ dmar_gaddr_t maxaddr);
void put_idmap_pgtbl(vm_object_t obj);
-int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
- vm_page_t *ma, uint64_t pflags, int flags);
-int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
- int flags);
-void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base,
+int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+ dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
+int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+ dmar_gaddr_t size, int flags);
+void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size);
-int ctx_alloc_pgtbl(struct dmar_ctx *ctx);
-void ctx_free_pgtbl(struct dmar_ctx *ctx);
+int domain_alloc_pgtbl(struct dmar_domain *domain);
+void domain_free_pgtbl(struct dmar_domain *domain);
struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
bool rmrr);
-struct dmar_ctx *dmar_get_ctx(struct dmar_unit *dmar, device_t dev,
+struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
+int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
void dmar_free_ctx(struct dmar_ctx *ctx);
struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
-void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free);
-void dmar_ctx_unload(struct dmar_ctx *ctx,
+void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free);
+void dmar_domain_unload(struct dmar_domain *domain,
struct dmar_map_entries_tailq *entries, bool cansleep);
-void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free);
+void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free);
int dmar_init_busdma(struct dmar_unit *unit);
void dmar_fini_busdma(struct dmar_unit *unit);
device_t dmar_get_requester(device_t dev, uint16_t *rid);
-void dmar_gas_init_ctx(struct dmar_ctx *ctx);
-void dmar_gas_fini_ctx(struct dmar_ctx *ctx);
-struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags);
-void dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
-void dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
-int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
- dmar_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma,
- struct dmar_map_entry **res);
-void dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
-int dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
- u_int eflags, u_int flags, vm_page_t *ma);
-int dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start,
+void dmar_gas_init_domain(struct dmar_domain *domain);
+void dmar_gas_fini_domain(struct dmar_domain *domain);
+struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
+ u_int flags);
+void dmar_gas_free_entry(struct dmar_domain *domain,
+ struct dmar_map_entry *entry);
+void dmar_gas_free_space(struct dmar_domain *domain,
+ struct dmar_map_entry *entry);
+int dmar_gas_map(struct dmar_domain *domain,
+ const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
+ u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res);
+void dmar_gas_free_region(struct dmar_domain *domain,
+ struct dmar_map_entry *entry);
+int dmar_gas_map_region(struct dmar_domain *domain,
+ struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
+int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t end);
-void dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
+void dmar_dev_parse_rmrr(struct dmar_domain *domain, device_t dev,
struct dmar_map_entries_tailq *rmrr_entries);
int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
diff --git a/sys/x86/iommu/intel_drv.c b/sys/x86/iommu/intel_drv.c
index c239579..47588af 100644
--- a/sys/x86/iommu/intel_drv.c
+++ b/sys/x86/iommu/intel_drv.c
@@ -459,6 +459,7 @@ dmar_attach(device_t dev)
mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
&unit->lock);
+ LIST_INIT(&unit->domains);
/*
* 9.2 "Context Entry":
@@ -842,7 +843,7 @@ dmar_find_ioapic(u_int apic_id, uint16_t *rid)
}
struct rmrr_iter_args {
- struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
device_t dev;
int dev_domain;
int dev_busno;
@@ -887,7 +888,8 @@ dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
if (match == 1) {
if (dmar_match_verbose)
printf("matched\n");
- entry = dmar_gas_alloc_entry(ria->ctx, DMAR_PGF_WAITOK);
+ entry = dmar_gas_alloc_entry(ria->domain,
+ DMAR_PGF_WAITOK);
entry->start = resmem->BaseAddress;
/* The RMRR entry end address is inclusive. */
entry->end = resmem->EndAddress;
@@ -902,7 +904,7 @@ dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
}
void
-dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
+dmar_dev_parse_rmrr(struct dmar_domain *domain, device_t dev,
struct dmar_map_entries_tailq *rmrr_entries)
{
struct rmrr_iter_args ria;
@@ -918,7 +920,7 @@ dmar_ctx_parse_rmrr(struct dmar_ctx *ctx, device_t dev,
dev_path);
}
- ria.ctx = ctx;
+ ria.domain = domain;
ria.dev = dev;
ria.dev_path = dev_path;
ria.rmrr_entries = rmrr_entries;
@@ -1038,7 +1040,7 @@ dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
printf("dmar%d: instantiating RMRR contexts\n", dmar->unit);
dmar_iterate_tbl(dmar_inst_rmrr_iter, &iria);
DMAR_LOCK(dmar);
- if (!LIST_EMPTY(&dmar->contexts)) {
+ if (!LIST_EMPTY(&dmar->domains)) {
KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
("dmar%d: RMRR not handled but translation is already enabled",
dmar->unit));
@@ -1053,7 +1055,7 @@ dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
#include <ddb/db_lex.h>
static void
-dmar_print_ctx_entry(const struct dmar_map_entry *entry)
+dmar_print_domain_entry(const struct dmar_map_entry *entry)
{
struct dmar_map_entry *l, *r;
@@ -1077,43 +1079,59 @@ dmar_print_ctx_entry(const struct dmar_map_entry *entry)
}
static void
-dmar_print_ctx(struct dmar_ctx *ctx, bool show_mappings)
+dmar_print_ctx(struct dmar_ctx *ctx)
{
- struct dmar_map_entry *entry;
db_printf(
- " @%p pci%d:%d:%d dom %d mgaw %d agaw %d pglvl %d end %jx\n"
- " refs %d flags %x pgobj %p map_ents %u loads %lu unloads %lu\n",
+ " @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n",
ctx, pci_get_bus(ctx->ctx_tag.owner),
pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner), ctx->domain, ctx->mgaw,
- ctx->agaw, ctx->pglvl, (uintmax_t)ctx->end, ctx->refs,
- ctx->flags, ctx->pgtbl_obj, ctx->entries_cnt, ctx->loads,
- ctx->unloads);
+ pci_get_function(ctx->ctx_tag.owner), ctx->refs, ctx->flags,
+ ctx->loads, ctx->unloads);
+}
+
+static void
+dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
+{
+ struct dmar_map_entry *entry;
+ struct dmar_ctx *ctx;
+
+ db_printf(
+ " @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n"
+ " ctx_cnt %d flags %x pgobj %p map_ents %u\n",
+ domain, domain->domain, domain->mgaw, domain->agaw, domain->pglvl,
+ (uintmax_t)domain->end, domain->refs, domain->ctx_cnt,
+ domain->flags, domain->pgtbl_obj, domain->entries_cnt);
+ if (!LIST_EMPTY(&domain->contexts)) {
+ db_printf(" Contexts:\n");
+ LIST_FOREACH(ctx, &domain->contexts, link)
+ dmar_print_ctx(ctx);
+ }
if (!show_mappings)
return;
db_printf(" mapped:\n");
- RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) {
- dmar_print_ctx_entry(entry);
+ RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) {
+ dmar_print_domain_entry(entry);
if (db_pager_quit)
break;
}
if (db_pager_quit)
return;
db_printf(" unloading:\n");
- TAILQ_FOREACH(entry, &ctx->unload_entries, dmamap_link) {
- dmar_print_ctx_entry(entry);
+ TAILQ_FOREACH(entry, &domain->unload_entries, dmamap_link) {
+ dmar_print_domain_entry(entry);
if (db_pager_quit)
break;
}
}
-DB_FUNC(dmar_ctx, db_dmar_print_ctx, db_show_table, CS_OWN, NULL)
+DB_FUNC(dmar_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL)
{
struct dmar_unit *unit;
+ struct dmar_domain *domain;
struct dmar_ctx *ctx;
bool show_mappings, valid;
- int domain, bus, device, function, i, t;
+ int pci_domain, bus, device, function, i, t;
db_expr_t radix;
valid = false;
@@ -1134,7 +1152,7 @@ DB_FUNC(dmar_ctx, db_dmar_print_ctx, db_show_table, CS_OWN, NULL)
show_mappings = false;
}
if (t == tNUMBER) {
- domain = db_tok_number;
+ pci_domain = db_tok_number;
t = db_read_token();
if (t == tNUMBER) {
bus = db_tok_number;
@@ -1152,19 +1170,24 @@ DB_FUNC(dmar_ctx, db_dmar_print_ctx, db_show_table, CS_OWN, NULL)
db_radix = radix;
db_skip_to_eol();
if (!valid) {
- db_printf("usage: show dmar_ctx [/m] "
+ db_printf("usage: show dmar_domain [/m] "
"<domain> <bus> <device> <func>\n");
return;
}
for (i = 0; i < dmar_devcnt; i++) {
unit = device_get_softc(dmar_devs[i]);
- LIST_FOREACH(ctx, &unit->contexts, link) {
- if (domain == unit->segment &&
- bus == pci_get_bus(ctx->ctx_tag.owner) &&
- device == pci_get_slot(ctx->ctx_tag.owner) &&
- function == pci_get_function(ctx->ctx_tag.owner)) {
- dmar_print_ctx(ctx, show_mappings);
- goto out;
+ LIST_FOREACH(domain, &unit->domains, link) {
+ LIST_FOREACH(ctx, &domain->contexts, link) {
+ if (pci_domain == unit->segment &&
+ bus == pci_get_bus(ctx->ctx_tag.owner) &&
+ device ==
+ pci_get_slot(ctx->ctx_tag.owner) &&
+ function ==
+ pci_get_function(ctx->ctx_tag.owner)) {
+ dmar_print_domain(domain,
+ show_mappings);
+ goto out;
+ }
}
}
}
@@ -1172,10 +1195,10 @@ out:;
}
static void
-dmar_print_one(int idx, bool show_ctxs, bool show_mappings)
+dmar_print_one(int idx, bool show_domains, bool show_mappings)
{
struct dmar_unit *unit;
- struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
int i, frir;
unit = device_get_softc(dmar_devs[idx]);
@@ -1187,6 +1210,10 @@ dmar_print_one(int idx, bool show_ctxs, bool show_mappings)
dmar_read4(unit, DMAR_GSTS_REG),
dmar_read4(unit, DMAR_FSTS_REG),
dmar_read4(unit, DMAR_FECTL_REG));
+ if (unit->ir_enabled) {
+ db_printf("ir is enabled; IRT @%p phys 0x%jx maxcnt %d\n",
+ unit->irt, (uintmax_t)unit->irt_phys, unit->irte_cnt);
+ }
db_printf("fed 0x%x fea 0x%x feua 0x%x\n",
dmar_read4(unit, DMAR_FEDATA_REG),
dmar_read4(unit, DMAR_FEADDR_REG),
@@ -1225,10 +1252,10 @@ dmar_print_one(int idx, bool show_ctxs, bool show_mappings)
db_printf("qi is disabled\n");
}
}
- if (show_ctxs) {
- db_printf("contexts:\n");
- LIST_FOREACH(ctx, &unit->contexts, link) {
- dmar_print_ctx(ctx, show_mappings);
+ if (show_domains) {
+ db_printf("domains:\n");
+ LIST_FOREACH(domain, &unit->domains, link) {
+ dmar_print_domain(domain, show_mappings);
if (db_pager_quit)
break;
}
@@ -1237,27 +1264,27 @@ dmar_print_one(int idx, bool show_ctxs, bool show_mappings)
DB_SHOW_COMMAND(dmar, db_dmar_print)
{
- bool show_ctxs, show_mappings;
+ bool show_domains, show_mappings;
- show_ctxs = strchr(modif, 'c') != NULL;
+ show_domains = strchr(modif, 'd') != NULL;
show_mappings = strchr(modif, 'm') != NULL;
if (!have_addr) {
- db_printf("usage: show dmar [/c] [/m] index\n");
+ db_printf("usage: show dmar [/d] [/m] index\n");
return;
}
- dmar_print_one((int)addr, show_ctxs, show_mappings);
+ dmar_print_one((int)addr, show_domains, show_mappings);
}
DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars)
{
int i;
- bool show_ctxs, show_mappings;
+ bool show_domains, show_mappings;
- show_ctxs = strchr(modif, 'c') != NULL;
+ show_domains = strchr(modif, 'd') != NULL;
show_mappings = strchr(modif, 'm') != NULL;
for (i = 0; i < dmar_devcnt; i++) {
- dmar_print_one(i, show_ctxs, show_mappings);
+ dmar_print_one(i, show_domains, show_mappings);
if (db_pager_quit)
break;
}
diff --git a/sys/x86/iommu/intel_gas.c b/sys/x86/iommu/intel_gas.c
index 5a7a730..8b18b4c 100644
--- a/sys/x86/iommu/intel_gas.c
+++ b/sys/x86/iommu/intel_gas.c
@@ -84,7 +84,7 @@ intel_gas_init(void)
SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
struct dmar_map_entry *
-dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags)
+dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags)
{
struct dmar_map_entry *res;
@@ -94,20 +94,20 @@ dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags)
res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (res != NULL) {
- res->ctx = ctx;
- atomic_add_int(&ctx->entries_cnt, 1);
+ res->domain = domain;
+ atomic_add_int(&domain->entries_cnt, 1);
}
return (res);
}
void
-dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
+dmar_gas_free_entry(struct dmar_domain *domain, struct dmar_map_entry *entry)
{
- KASSERT(ctx == entry->ctx,
- ("mismatched free ctx %p entry %p entry->ctx %p", ctx,
- entry, entry->ctx));
- atomic_subtract_int(&ctx->entries_cnt, 1);
+ KASSERT(domain == entry->domain,
+ ("mismatched free domain %p entry %p entry->domain %p", domain,
+ entry, entry->domain));
+ atomic_subtract_int(&domain->entries_cnt, 1);
uma_zfree(dmar_map_entry_zone, entry);
}
@@ -158,30 +158,30 @@ RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
dmar_gas_cmp_entries);
static void
-dmar_gas_fix_free(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
+dmar_gas_fix_free(struct dmar_domain *domain, struct dmar_map_entry *entry)
{
struct dmar_map_entry *next;
- next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
- entry->free_after = (next != NULL ? next->start : ctx->end) -
+ next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
+ entry->free_after = (next != NULL ? next->start : domain->end) -
entry->end;
dmar_gas_augment_entry(entry);
}
#ifdef INVARIANTS
static void
-dmar_gas_check_free(struct dmar_ctx *ctx)
+dmar_gas_check_free(struct dmar_domain *domain)
{
struct dmar_map_entry *entry, *next, *l, *r;
dmar_gaddr_t v;
- RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) {
- KASSERT(ctx == entry->ctx,
- ("mismatched free ctx %p entry %p entry->ctx %p", ctx,
- entry, entry->ctx));
- next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
+ RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) {
+ KASSERT(domain == entry->domain,
+ ("mismatched free domain %p entry %p entry->domain %p",
+ domain, entry, entry->domain));
+ next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
if (next == NULL) {
- MPASS(entry->free_after == ctx->end - entry->end);
+ MPASS(entry->free_after == domain->end - entry->end);
} else {
MPASS(entry->free_after = next->start - entry->end);
MPASS(entry->end <= next->start);
@@ -206,93 +206,95 @@ dmar_gas_check_free(struct dmar_ctx *ctx)
#endif
static bool
-dmar_gas_rb_insert(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
+dmar_gas_rb_insert(struct dmar_domain *domain, struct dmar_map_entry *entry)
{
struct dmar_map_entry *prev, *found;
- found = RB_INSERT(dmar_gas_entries_tree, &ctx->rb_root, entry);
- dmar_gas_fix_free(ctx, entry);
- prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
+ found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry);
+ dmar_gas_fix_free(domain, entry);
+ prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
if (prev != NULL)
- dmar_gas_fix_free(ctx, prev);
+ dmar_gas_fix_free(domain, prev);
return (found == NULL);
}
static void
-dmar_gas_rb_remove(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
+dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry)
{
struct dmar_map_entry *prev;
- prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
- RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
+ prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
+ RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
if (prev != NULL)
- dmar_gas_fix_free(ctx, prev);
+ dmar_gas_fix_free(domain, prev);
}
void
-dmar_gas_init_ctx(struct dmar_ctx *ctx)
+dmar_gas_init_domain(struct dmar_domain *domain)
{
struct dmar_map_entry *begin, *end;
- begin = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK);
- end = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK);
+ begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
+ end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
- DMAR_CTX_LOCK(ctx);
- KASSERT(ctx->entries_cnt == 2, ("dirty ctx %p", ctx));
- KASSERT(RB_EMPTY(&ctx->rb_root), ("non-empty entries %p", ctx));
+ DMAR_DOMAIN_LOCK(domain);
+ KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
+ KASSERT(RB_EMPTY(&domain->rb_root), ("non-empty entries %p", domain));
begin->start = 0;
begin->end = DMAR_PAGE_SIZE;
- begin->free_after = ctx->end - begin->end;
+ begin->free_after = domain->end - begin->end;
begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
- dmar_gas_rb_insert(ctx, begin);
+ dmar_gas_rb_insert(domain, begin);
- end->start = ctx->end;
- end->end = ctx->end;
+ end->start = domain->end;
+ end->end = domain->end;
end->free_after = 0;
end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
- dmar_gas_rb_insert(ctx, end);
+ dmar_gas_rb_insert(domain, end);
- ctx->first_place = begin;
- ctx->last_place = end;
- DMAR_CTX_UNLOCK(ctx);
+ domain->first_place = begin;
+ domain->last_place = end;
+ domain->flags |= DMAR_DOMAIN_GAS_INITED;
+ DMAR_DOMAIN_UNLOCK(domain);
}
void
-dmar_gas_fini_ctx(struct dmar_ctx *ctx)
+dmar_gas_fini_domain(struct dmar_domain *domain)
{
struct dmar_map_entry *entry, *entry1;
- DMAR_CTX_ASSERT_LOCKED(ctx);
- KASSERT(ctx->entries_cnt == 2, ("ctx still in use %p", ctx));
+ DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain));
- entry = RB_MIN(dmar_gas_entries_tree, &ctx->rb_root);
- KASSERT(entry->start == 0, ("start entry start %p", ctx));
- KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", ctx));
+ entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root);
+ KASSERT(entry->start == 0, ("start entry start %p", domain));
+ KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain));
KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
- ("start entry flags %p", ctx));
- RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
- dmar_gas_free_entry(ctx, entry);
-
- entry = RB_MAX(dmar_gas_entries_tree, &ctx->rb_root);
- KASSERT(entry->start == ctx->end, ("end entry start %p", ctx));
- KASSERT(entry->end == ctx->end, ("end entry end %p", ctx));
- KASSERT(entry->free_after == 0, ("end entry free_after%p", ctx));
+ ("start entry flags %p", domain));
+ RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
+ dmar_gas_free_entry(domain, entry);
+
+ entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root);
+ KASSERT(entry->start == domain->end, ("end entry start %p", domain));
+ KASSERT(entry->end == domain->end, ("end entry end %p", domain));
+ KASSERT(entry->free_after == 0, ("end entry free_after %p", domain));
KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
- ("end entry flags %p", ctx));
- RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
- dmar_gas_free_entry(ctx, entry);
+ ("end entry flags %p", domain));
+ RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
+ dmar_gas_free_entry(domain, entry);
- RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &ctx->rb_root, entry1) {
+ RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root,
+ entry1) {
KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0,
- ("non-RMRR entry left %p", ctx));
- RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
- dmar_gas_free_entry(ctx, entry);
+ ("non-RMRR entry left %p", domain));
+ RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
+ dmar_gas_free_entry(domain, entry);
}
}
struct dmar_gas_match_args {
- struct dmar_ctx *ctx;
+ struct dmar_domain *domain;
dmar_gaddr_t size;
int offset;
const struct bus_dma_tag_common *common;
@@ -371,12 +373,12 @@ dmar_gas_match_insert(struct dmar_gas_match_args *a,
*/
a->entry->end = a->entry->start + a->size;
- next = RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, prev);
+ next = RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root, prev);
KASSERT(next->start >= a->entry->end &&
next->start - a->entry->start >= a->size &&
prev->end <= a->entry->end,
("dmar_gas_match_insert hole failed %p prev (%jx, %jx) "
- "free_after %jx next (%jx, %jx) entry (%jx, %jx)", a->ctx,
+ "free_after %jx next (%jx, %jx) entry (%jx, %jx)", a->domain,
(uintmax_t)prev->start, (uintmax_t)prev->end,
(uintmax_t)prev->free_after,
(uintmax_t)next->start, (uintmax_t)next->end,
@@ -385,19 +387,19 @@ dmar_gas_match_insert(struct dmar_gas_match_args *a,
prev->free_after = a->entry->start - prev->end;
a->entry->free_after = next->start - a->entry->end;
- found = dmar_gas_rb_insert(a->ctx, a->entry);
+ found = dmar_gas_rb_insert(a->domain, a->entry);
KASSERT(found, ("found dup %p start %jx size %jx",
- a->ctx, (uintmax_t)a->entry->start, (uintmax_t)a->size));
+ a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
a->entry->flags = DMAR_MAP_ENTRY_MAP;
- KASSERT(RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root,
+ KASSERT(RB_PREV(dmar_gas_entries_tree, &a->domain->rb_root,
a->entry) == prev,
("entry %p prev %p inserted prev %p", a->entry, prev,
- RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, a->entry)));
- KASSERT(RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root,
+ RB_PREV(dmar_gas_entries_tree, &a->domain->rb_root, a->entry)));
+ KASSERT(RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root,
a->entry) == next,
("entry %p next %p inserted next %p", a->entry, next,
- RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, a->entry)));
+ RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root, a->entry)));
}
static int
@@ -434,11 +436,12 @@ dmar_gas_uppermatch(struct dmar_gas_match_args *a)
struct dmar_map_entry *next, *prev, find_entry;
find_entry.start = a->common->highaddr;
- next = RB_NFIND(dmar_gas_entries_tree, &a->ctx->rb_root, &find_entry);
+ next = RB_NFIND(dmar_gas_entries_tree, &a->domain->rb_root,
+ &find_entry);
if (next == NULL)
return (ENOMEM);
- prev = RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, next);
- KASSERT(prev != NULL, ("no prev %p %jx", a->ctx,
+ prev = RB_PREV(dmar_gas_entries_tree, &a->domain->rb_root, next);
+ KASSERT(prev != NULL, ("no prev %p %jx", a->domain,
(uintmax_t)find_entry.start));
for (;;) {
a->entry->start = prev->start + DMAR_PAGE_SIZE;
@@ -446,7 +449,7 @@ dmar_gas_uppermatch(struct dmar_gas_match_args *a)
a->entry->start = a->common->highaddr;
a->entry->start = roundup2(a->entry->start,
a->common->alignment);
- if (dmar_gas_match_one(a, prev, a->ctx->end)) {
+ if (dmar_gas_match_one(a, prev, a->domain->end)) {
dmar_gas_match_insert(a, prev);
return (0);
}
@@ -459,27 +462,28 @@ dmar_gas_uppermatch(struct dmar_gas_match_args *a)
* non-optimal way.
*/
prev = next;
- next = RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, prev);
- KASSERT(next != NULL, ("no next %p %jx", a->ctx,
+ next = RB_NEXT(dmar_gas_entries_tree, &a->domain->rb_root,
+ prev);
+ KASSERT(next != NULL, ("no next %p %jx", a->domain,
(uintmax_t)find_entry.start));
- if (next->end >= a->ctx->end)
+ if (next->end >= a->domain->end)
return (ENOMEM);
}
}
static int
-dmar_gas_find_space(struct dmar_ctx *ctx,
+dmar_gas_find_space(struct dmar_domain *domain,
const struct bus_dma_tag_common *common, dmar_gaddr_t size,
int offset, u_int flags, struct dmar_map_entry *entry)
{
struct dmar_gas_match_args a;
int error;
- DMAR_CTX_ASSERT_LOCKED(ctx);
- KASSERT(entry->flags == 0, ("dirty entry %p %p", ctx, entry));
+ DMAR_DOMAIN_ASSERT_LOCKED(domain);
+ KASSERT(entry->flags == 0, ("dirty entry %p %p", domain, entry));
KASSERT((size & DMAR_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size));
- a.ctx = ctx;
+ a.domain = domain;
a.size = size;
a.offset = offset;
a.common = common;
@@ -488,14 +492,14 @@ dmar_gas_find_space(struct dmar_ctx *ctx,
/* Handle lower region. */
if (common->lowaddr > 0) {
- error = dmar_gas_lowermatch(&a, RB_ROOT(&ctx->rb_root));
+ error = dmar_gas_lowermatch(&a, RB_ROOT(&domain->rb_root));
if (error == 0)
return (0);
KASSERT(error == ENOMEM,
("error %d from dmar_gas_lowermatch", error));
}
/* Handle upper region. */
- if (common->highaddr >= ctx->end)
+ if (common->highaddr >= domain->end)
return (ENOMEM);
error = dmar_gas_uppermatch(&a);
KASSERT(error == ENOMEM,
@@ -504,26 +508,26 @@ dmar_gas_find_space(struct dmar_ctx *ctx,
}
static int
-dmar_gas_alloc_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
+dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
u_int flags)
{
struct dmar_map_entry *next, *prev;
bool found;
- DMAR_CTX_ASSERT_LOCKED(ctx);
+ DMAR_DOMAIN_ASSERT_LOCKED(domain);
if ((entry->start & DMAR_PAGE_MASK) != 0 ||
(entry->end & DMAR_PAGE_MASK) != 0)
return (EINVAL);
if (entry->start >= entry->end)
return (EINVAL);
- if (entry->end >= ctx->end)
+ if (entry->end >= domain->end)
return (EINVAL);
- next = RB_NFIND(dmar_gas_entries_tree, &ctx->rb_root, entry);
- KASSERT(next != NULL, ("next must be non-null %p %jx", ctx,
+ next = RB_NFIND(dmar_gas_entries_tree, &domain->rb_root, entry);
+ KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
(uintmax_t)entry->start));
- prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, next);
+ prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, next);
/* prev could be NULL */
/*
@@ -551,23 +555,23 @@ dmar_gas_alloc_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
if (prev != NULL && prev->end > entry->start) {
/* This assumes that prev is the placeholder entry. */
- dmar_gas_rb_remove(ctx, prev);
+ dmar_gas_rb_remove(domain, prev);
prev = NULL;
}
if (next != NULL && next->start < entry->end) {
- dmar_gas_rb_remove(ctx, next);
+ dmar_gas_rb_remove(domain, next);
next = NULL;
}
- found = dmar_gas_rb_insert(ctx, entry);
+ found = dmar_gas_rb_insert(domain, entry);
KASSERT(found, ("found RMRR dup %p start %jx end %jx",
- ctx, (uintmax_t)entry->start, (uintmax_t)entry->end));
+ domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
entry->flags = DMAR_MAP_ENTRY_RMRR;
#ifdef INVARIANTS
struct dmar_map_entry *ip, *in;
- ip = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
- in = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
+ ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
+ in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
KASSERT(prev == NULL || ip == prev,
("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
entry, entry->start, entry->end, prev,
@@ -584,47 +588,47 @@ dmar_gas_alloc_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
}
void
-dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
+dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry)
{
- DMAR_CTX_ASSERT_LOCKED(ctx);
+ DMAR_DOMAIN_ASSERT_LOCKED(domain);
KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP,
- ("permanent entry %p %p", ctx, entry));
+ ("permanent entry %p %p", domain, entry));
- dmar_gas_rb_remove(ctx, entry);
+ dmar_gas_rb_remove(domain, entry);
entry->flags &= ~DMAR_MAP_ENTRY_MAP;
#ifdef INVARIANTS
if (dmar_check_free)
- dmar_gas_check_free(ctx);
+ dmar_gas_check_free(domain);
#endif
}
void
-dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
+dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry)
{
struct dmar_map_entry *next, *prev;
- DMAR_CTX_ASSERT_LOCKED(ctx);
+ DMAR_DOMAIN_ASSERT_LOCKED(domain);
KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR,
- ("non-RMRR entry %p %p", ctx, entry));
+ ("non-RMRR entry %p %p", domain, entry));
- prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
- next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
- dmar_gas_rb_remove(ctx, entry);
+ prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
+ next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
+ dmar_gas_rb_remove(domain, entry);
entry->flags &= ~DMAR_MAP_ENTRY_RMRR;
if (prev == NULL)
- dmar_gas_rb_insert(ctx, ctx->first_place);
+ dmar_gas_rb_insert(domain, domain->first_place);
if (next == NULL)
- dmar_gas_rb_insert(ctx, ctx->last_place);
+ dmar_gas_rb_insert(domain, domain->last_place);
}
int
-dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
- dmar_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma,
- struct dmar_map_entry **res)
+dmar_gas_map(struct dmar_domain *domain,
+ const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
+ u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res)
{
struct dmar_map_entry *entry;
int error;
@@ -632,70 +636,72 @@ dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0,
("invalid flags 0x%x", flags));
- entry = dmar_gas_alloc_entry(ctx, (flags & DMAR_GM_CANWAIT) != 0 ?
+ entry = dmar_gas_alloc_entry(domain, (flags & DMAR_GM_CANWAIT) != 0 ?
DMAR_PGF_WAITOK : 0);
if (entry == NULL)
return (ENOMEM);
- DMAR_CTX_LOCK(ctx);
- error = dmar_gas_find_space(ctx, common, size, offset, flags, entry);
+ DMAR_DOMAIN_LOCK(domain);
+ error = dmar_gas_find_space(domain, common, size, offset, flags,
+ entry);
if (error == ENOMEM) {
- DMAR_CTX_UNLOCK(ctx);
- dmar_gas_free_entry(ctx, entry);
+ DMAR_DOMAIN_UNLOCK(domain);
+ dmar_gas_free_entry(domain, entry);
return (error);
}
#ifdef INVARIANTS
if (dmar_check_free)
- dmar_gas_check_free(ctx);
+ dmar_gas_check_free(domain);
#endif
KASSERT(error == 0,
("unexpected error %d from dmar_gas_find_entry", error));
- KASSERT(entry->end < ctx->end, ("allocated GPA %jx, max GPA %jx",
- (uintmax_t)entry->end, (uintmax_t)ctx->end));
+ KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx",
+ (uintmax_t)entry->end, (uintmax_t)domain->end));
entry->flags |= eflags;
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
- error = ctx_map_buf(ctx, entry->start, entry->end - entry->start, ma,
+ error = domain_map_buf(domain, entry->start, entry->end - entry->start,
+ ma,
((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
(flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
- dmar_ctx_unload_entry(entry, true);
+ dmar_domain_unload_entry(entry, true);
return (error);
}
KASSERT(error == 0,
- ("unexpected error %d from ctx_map_buf", error));
+ ("unexpected error %d from domain_map_buf", error));
*res = entry;
return (0);
}
int
-dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
+dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
u_int eflags, u_int flags, vm_page_t *ma)
{
dmar_gaddr_t start;
int error;
- KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", ctx,
+ KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
entry, entry->flags));
KASSERT((flags & ~(DMAR_GM_CANWAIT)) == 0,
("invalid flags 0x%x", flags));
start = entry->start;
- DMAR_CTX_LOCK(ctx);
- error = dmar_gas_alloc_region(ctx, entry, flags);
+ DMAR_DOMAIN_LOCK(domain);
+ error = dmar_gas_alloc_region(domain, entry, flags);
if (error != 0) {
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
return (error);
}
entry->flags |= eflags;
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
if (entry->end == entry->start)
return (0);
- error = ctx_map_buf(ctx, entry->start, entry->end - entry->start,
+ error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma + OFF_TO_IDX(start - entry->start),
((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
@@ -703,31 +709,31 @@ dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
(flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
if (error == ENOMEM) {
- dmar_ctx_unload_entry(entry, false);
+ dmar_domain_unload_entry(entry, false);
return (error);
}
KASSERT(error == 0,
- ("unexpected error %d from ctx_map_buf", error));
+ ("unexpected error %d from domain_map_buf", error));
return (0);
}
int
-dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start,
+dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
dmar_gaddr_t end)
{
struct dmar_map_entry *entry;
int error;
- entry = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK);
+ entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
entry->start = start;
entry->end = end;
- DMAR_CTX_LOCK(ctx);
- error = dmar_gas_alloc_region(ctx, entry, DMAR_GM_CANWAIT);
+ DMAR_DOMAIN_LOCK(domain);
+ error = dmar_gas_alloc_region(domain, entry, DMAR_GM_CANWAIT);
if (error == 0)
entry->flags |= DMAR_MAP_ENTRY_UNMAPPED;
- DMAR_CTX_UNLOCK(ctx);
+ DMAR_DOMAIN_UNLOCK(domain);
if (error != 0)
- dmar_gas_free_entry(ctx, entry);
+ dmar_gas_free_entry(domain, entry);
return (error);
}
diff --git a/sys/x86/iommu/intel_idpgtbl.c b/sys/x86/iommu/intel_idpgtbl.c
index b9492dd..e105b9b 100644
--- a/sys/x86/iommu/intel_idpgtbl.c
+++ b/sys/x86/iommu/intel_idpgtbl.c
@@ -66,8 +66,8 @@ __FBSDID("$FreeBSD$");
#include <x86/iommu/busdma_dmar.h>
#include <x86/iommu/intel_dmar.h>
-static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
- dmar_gaddr_t size, int flags);
+static int domain_unmap_buf_locked(struct dmar_domain *domain,
+ dmar_gaddr_t base, dmar_gaddr_t size, int flags);
/*
* The cache of the identity mapping page tables for the DMARs. Using
@@ -105,7 +105,7 @@ static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
* mapped by the page table page.
*/
static void
-ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
+domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
dmar_gaddr_t addr)
{
vm_page_t m1;
@@ -124,7 +124,7 @@ ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
pg_sz = pglvl_page_size(tbl->pglvl, lvl);
if (lvl != tbl->leaf) {
for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
- ctx_idmap_nextlvl(tbl, lvl + 1, base + i, f);
+ domain_idmap_nextlvl(tbl, lvl + 1, base + i, f);
}
VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf);
@@ -146,7 +146,7 @@ ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
}
}
- /* ctx_get_idmap_pgtbl flushes CPU cache if needed. */
+ /* domain_get_idmap_pgtbl flushes CPU cache if needed. */
dmar_unmap_pgtbl(sf);
VM_OBJECT_WLOCK(tbl->pgtbl_obj);
}
@@ -160,7 +160,7 @@ ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
* maxaddr is typically mapped.
*/
vm_object_t
-ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
+domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr)
{
struct dmar_unit *unit;
struct idpgtbl *tbl;
@@ -173,8 +173,8 @@ ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
/*
* First, determine where to stop the paging structures.
*/
- for (i = 0; i < ctx->pglvl; i++) {
- if (i == ctx->pglvl - 1 || ctx_is_sp_lvl(ctx, i)) {
+ for (i = 0; i < domain->pglvl; i++) {
+ if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
leaf = i;
break;
}
@@ -191,12 +191,12 @@ ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
sx_slock(&idpgtbl_lock);
LIST_FOREACH(tbl, &idpgtbls, link) {
if (tbl->maxaddr >= maxaddr &&
- dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
+ dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
tbl->leaf == leaf) {
res = tbl->pgtbl_obj;
vm_object_reference(res);
sx_sunlock(&idpgtbl_lock);
- ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
+ domain->pglvl = tbl->pglvl; /* XXXKIB ? */
goto end;
}
}
@@ -210,12 +210,12 @@ ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
sx_xlock(&idpgtbl_lock);
LIST_FOREACH(tbl, &idpgtbls, link) {
if (tbl->maxaddr >= maxaddr &&
- dmar_pglvl_supported(ctx->dmar, tbl->pglvl) &&
+ dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
tbl->leaf == leaf) {
res = tbl->pgtbl_obj;
vm_object_reference(res);
sx_xunlock(&idpgtbl_lock);
- ctx->pglvl = tbl->pglvl; /* XXXKIB ? */
+ domain->pglvl = tbl->pglvl; /* XXXKIB ? */
return (res);
}
}
@@ -224,13 +224,13 @@ ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr)
* Still not found, create new page table.
*/
tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
- tbl->pglvl = ctx->pglvl;
+ tbl->pglvl = domain->pglvl;
tbl->leaf = leaf;
tbl->maxaddr = maxaddr;
tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
VM_OBJECT_WLOCK(tbl->pgtbl_obj);
- ctx_idmap_nextlvl(tbl, 0, 0, 0);
+ domain_idmap_nextlvl(tbl, 0, 0, 0);
VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
LIST_INSERT_HEAD(&idpgtbls, tbl, link);
res = tbl->pgtbl_obj;
@@ -251,7 +251,7 @@ end:
* If DMAR cannot look into the chipset write buffer, flush it
* as well.
*/
- unit = ctx->dmar;
+ unit = domain->dmar;
if (!DMAR_IS_COHERENT(unit)) {
VM_OBJECT_WLOCK(res);
for (m = vm_page_lookup(res, 0); m != NULL;
@@ -320,10 +320,11 @@ put_idmap_pgtbl(vm_object_t obj)
* the level lvl.
*/
static int
-ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
+domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
{
- base >>= DMAR_PAGE_SHIFT + (ctx->pglvl - lvl - 1) * DMAR_NPTEPGSHIFT;
+ base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
+ DMAR_NPTEPGSHIFT;
return (base & DMAR_PTEMASK);
}
@@ -333,31 +334,34 @@ ctx_pgtbl_pte_off(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
* lvl.
*/
static vm_pindex_t
-ctx_pgtbl_get_pindex(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl)
+domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
{
vm_pindex_t idx, pidx;
int i;
- KASSERT(lvl >= 0 && lvl < ctx->pglvl, ("wrong lvl %p %d", ctx, lvl));
+ KASSERT(lvl >= 0 && lvl < domain->pglvl,
+ ("wrong lvl %p %d", domain, lvl));
- for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx)
- idx = ctx_pgtbl_pte_off(ctx, base, i) + pidx * DMAR_NPTEPG + 1;
+ for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
+ idx = domain_pgtbl_pte_off(domain, base, i) +
+ pidx * DMAR_NPTEPG + 1;
+ }
return (idx);
}
static dmar_pte_t *
-ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags,
- vm_pindex_t *idxp, struct sf_buf **sf)
+domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
+ int flags, vm_pindex_t *idxp, struct sf_buf **sf)
{
vm_page_t m;
struct sf_buf *sfp;
dmar_pte_t *pte, *ptep;
vm_pindex_t idx, idx1;
- DMAR_CTX_ASSERT_PGLOCKED(ctx);
+ DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL"));
- idx = ctx_pgtbl_get_pindex(ctx, base, lvl);
+ idx = domain_pgtbl_get_pindex(domain, base, lvl);
if (*sf != NULL && idx == *idxp) {
pte = (dmar_pte_t *)sf_buf_kva(*sf);
} else {
@@ -365,14 +369,15 @@ ctx_pgtbl_map_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags,
dmar_unmap_pgtbl(*sf);
*idxp = idx;
retry:
- pte = dmar_map_pgtbl(ctx->pgtbl_obj, idx, flags, sf);
+ pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
if (pte == NULL) {
- KASSERT(lvl > 0, ("lost root page table page %p", ctx));
+ KASSERT(lvl > 0,
+ ("lost root page table page %p", domain));
/*
* Page table page does not exists, allocate
* it and create pte in the up level.
*/
- m = dmar_pgalloc(ctx->pgtbl_obj, idx, flags |
+ m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
DMAR_PGF_ZERO);
if (m == NULL)
return (NULL);
@@ -380,25 +385,26 @@ retry:
/*
* Prevent potential free while pgtbl_obj is
* unlocked in the recursive call to
- * ctx_pgtbl_map_pte(), if other thread did
+ * domain_pgtbl_map_pte(), if other thread did
* pte write and clean while the lock if
* dropped.
*/
m->wire_count++;
sfp = NULL;
- ptep = ctx_pgtbl_map_pte(ctx, base, lvl - 1, flags,
- &idx1, &sfp);
+ ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
+ flags, &idx1, &sfp);
if (ptep == NULL) {
KASSERT(m->pindex != 0,
- ("loosing root page %p", ctx));
+ ("loosing root page %p", domain));
m->wire_count--;
- dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
+ dmar_pgfree(domain->pgtbl_obj, m->pindex,
+ flags);
return (NULL);
}
dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
VM_PAGE_TO_PHYS(m));
- dmar_flush_pte_to_ram(ctx->dmar, ptep);
+ dmar_flush_pte_to_ram(domain->dmar, ptep);
sf_buf_page(sfp)->wire_count += 1;
m->wire_count--;
dmar_unmap_pgtbl(sfp);
@@ -406,13 +412,13 @@ retry:
goto retry;
}
}
- pte += ctx_pgtbl_pte_off(ctx, base, lvl);
+ pte += domain_pgtbl_pte_off(domain, base, lvl);
return (pte);
}
static int
-ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
- vm_page_t *ma, uint64_t pflags, int flags)
+domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
+ dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
{
dmar_pte_t *pte;
struct sf_buf *sf;
@@ -421,7 +427,7 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
int lvl;
bool superpage;
- DMAR_CTX_ASSERT_PGLOCKED(ctx);
+ DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
base1 = base;
size1 = size;
@@ -431,15 +437,15 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
pi += run_sz) {
for (lvl = 0, c = 0, superpage = false;; lvl++) {
- pg_sz = ctx_page_size(ctx, lvl);
+ pg_sz = domain_page_size(domain, lvl);
run_sz = pg_sz >> DMAR_PAGE_SHIFT;
- if (lvl == ctx->pglvl - 1)
+ if (lvl == domain->pglvl - 1)
break;
/*
* Check if the current base suitable for the
* superpage mapping. First, verify the level.
*/
- if (!ctx_is_sp_lvl(ctx, lvl))
+ if (!domain_is_sp_lvl(domain, lvl))
continue;
/*
* Next, look at the size of the mapping and
@@ -463,22 +469,23 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
}
}
KASSERT(size >= pg_sz,
- ("mapping loop overflow %p %jx %jx %jx", ctx,
+ ("mapping loop overflow %p %jx %jx %jx", domain,
(uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
- pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
+ pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
if (pte == NULL) {
KASSERT((flags & DMAR_PGF_WAITOK) == 0,
- ("failed waitable pte alloc %p", ctx));
+ ("failed waitable pte alloc %p", domain));
if (sf != NULL)
dmar_unmap_pgtbl(sf);
- ctx_unmap_buf_locked(ctx, base1, base - base1, flags);
+ domain_unmap_buf_locked(domain, base1, base - base1,
+ flags);
TD_PINNED_ASSERT;
return (ENOMEM);
}
dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
(superpage ? DMAR_PTE_SP : 0));
- dmar_flush_pte_to_ram(ctx->dmar, pte);
+ dmar_flush_pte_to_ram(domain->dmar, pte);
sf_buf_page(sf)->wire_count += 1;
}
if (sf != NULL)
@@ -488,32 +495,32 @@ ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
}
int
-ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
+domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
vm_page_t *ma, uint64_t pflags, int flags)
{
struct dmar_unit *unit;
int error;
- unit = ctx->dmar;
+ unit = domain->dmar;
- KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
- ("modifying idmap pagetable ctx %p", ctx));
+ KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
+ ("modifying idmap pagetable domain %p", domain));
KASSERT((base & DMAR_PAGE_MASK) == 0,
- ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
+ ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
KASSERT((size & DMAR_PAGE_MASK) == 0,
- ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
+ ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
- KASSERT(size > 0, ("zero size %p %jx %jx", ctx, (uintmax_t)base,
+ KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
- KASSERT(base < (1ULL << ctx->agaw),
- ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
- (uintmax_t)size, ctx->agaw));
- KASSERT(base + size < (1ULL << ctx->agaw),
- ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
- (uintmax_t)size, ctx->agaw));
+ KASSERT(base < (1ULL << domain->agaw),
+ ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
+ (uintmax_t)size, domain->agaw));
+ KASSERT(base + size < (1ULL << domain->agaw),
+ ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
+ (uintmax_t)size, domain->agaw));
KASSERT(base + size > base,
- ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
+ ("size overflow %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
("neither read nor write %jx", (uintmax_t)pflags));
@@ -523,21 +530,21 @@ ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
(unit->hw_ecap & DMAR_ECAP_SC) != 0,
("PTE_SNP for dmar without snoop control %p %jx",
- ctx, (uintmax_t)pflags));
+ domain, (uintmax_t)pflags));
KASSERT((pflags & DMAR_PTE_TM) == 0 ||
(unit->hw_ecap & DMAR_ECAP_DI) != 0,
("PTE_TM for dmar without DIOTLB %p %jx",
- ctx, (uintmax_t)pflags));
+ domain, (uintmax_t)pflags));
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
- DMAR_CTX_PGLOCK(ctx);
- error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags);
- DMAR_CTX_PGUNLOCK(ctx);
+ DMAR_DOMAIN_PGLOCK(domain);
+ error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
+ DMAR_DOMAIN_PGUNLOCK(domain);
if (error != 0)
return (error);
if ((unit->hw_cap & DMAR_CAP_CM) != 0)
- ctx_flush_iotlb_sync(ctx, base, size);
+ domain_flush_iotlb_sync(domain, base, size);
else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
/* See 11.1 Write Buffer Flushing. */
DMAR_LOCK(unit);
@@ -547,29 +554,31 @@ ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
return (0);
}
-static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base,
- int lvl, int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_fs);
+static void domain_unmap_clear_pte(struct dmar_domain *domain,
+ dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
+ struct sf_buf **sf, bool free_fs);
static void
-ctx_free_pgtbl_pde(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl, int flags)
+domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
+ int lvl, int flags)
{
struct sf_buf *sf;
dmar_pte_t *pde;
vm_pindex_t idx;
sf = NULL;
- pde = ctx_pgtbl_map_pte(ctx, base, lvl, flags, &idx, &sf);
- ctx_unmap_clear_pte(ctx, base, lvl, flags, pde, &sf, true);
+ pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
+ domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true);
}
static void
-ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl,
+domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
{
vm_page_t m;
dmar_pte_clear(&pte->pte);
- dmar_flush_pte_to_ram(ctx->dmar, pte);
+ dmar_flush_pte_to_ram(domain->dmar, pte);
m = sf_buf_page(*sf);
if (free_sf) {
dmar_unmap_pgtbl(*sf);
@@ -579,20 +588,20 @@ ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base, int lvl,
if (m->wire_count != 0)
return;
KASSERT(lvl != 0,
- ("lost reference (lvl) on root pg ctx %p base %jx lvl %d",
- ctx, (uintmax_t)base, lvl));
+ ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
+ domain, (uintmax_t)base, lvl));
KASSERT(m->pindex != 0,
- ("lost reference (idx) on root pg ctx %p base %jx lvl %d",
- ctx, (uintmax_t)base, lvl));
- dmar_pgfree(ctx->pgtbl_obj, m->pindex, flags);
- ctx_free_pgtbl_pde(ctx, base, lvl - 1, flags);
+ ("lost reference (idx) on root pg domain %p base %jx lvl %d",
+ domain, (uintmax_t)base, lvl));
+ dmar_pgfree(domain->pgtbl_obj, m->pindex, flags);
+ domain_free_pgtbl_pde(domain, base, lvl - 1, flags);
}
/*
* Assumes that the unmap is never partial.
*/
static int
-ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
+domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, int flags)
{
dmar_pte_t *pte;
@@ -601,26 +610,26 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
dmar_gaddr_t pg_sz;
int lvl;
- DMAR_CTX_ASSERT_PGLOCKED(ctx);
+ DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
if (size == 0)
return (0);
- KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
- ("modifying idmap pagetable ctx %p", ctx));
+ KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
+ ("modifying idmap pagetable domain %p", domain));
KASSERT((base & DMAR_PAGE_MASK) == 0,
- ("non-aligned base %p %jx %jx", ctx, (uintmax_t)base,
+ ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
KASSERT((size & DMAR_PAGE_MASK) == 0,
- ("non-aligned size %p %jx %jx", ctx, (uintmax_t)base,
+ ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
- KASSERT(base < (1ULL << ctx->agaw),
- ("base too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
- (uintmax_t)size, ctx->agaw));
- KASSERT(base + size < (1ULL << ctx->agaw),
- ("end too high %p %jx %jx agaw %d", ctx, (uintmax_t)base,
- (uintmax_t)size, ctx->agaw));
+ KASSERT(base < (1ULL << domain->agaw),
+ ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
+ (uintmax_t)size, domain->agaw));
+ KASSERT(base + size < (1ULL << domain->agaw),
+ ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
+ (uintmax_t)size, domain->agaw));
KASSERT(base + size > base,
- ("size overflow %p %jx %jx", ctx, (uintmax_t)base,
+ ("size overflow %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
@@ -629,26 +638,27 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
TD_PREP_PINNED_ASSERT;
for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
- for (lvl = 0; lvl < ctx->pglvl; lvl++) {
- if (lvl != ctx->pglvl - 1 && !ctx_is_sp_lvl(ctx, lvl))
+ for (lvl = 0; lvl < domain->pglvl; lvl++) {
+ if (lvl != domain->pglvl - 1 &&
+ !domain_is_sp_lvl(domain, lvl))
continue;
- pg_sz = ctx_page_size(ctx, lvl);
+ pg_sz = domain_page_size(domain, lvl);
if (pg_sz > size)
continue;
- pte = ctx_pgtbl_map_pte(ctx, base, lvl, flags,
+ pte = domain_pgtbl_map_pte(domain, base, lvl, flags,
&idx, &sf);
KASSERT(pte != NULL,
("sleeping or page missed %p %jx %d 0x%x",
- ctx, (uintmax_t)base, lvl, flags));
+ domain, (uintmax_t)base, lvl, flags));
if ((pte->pte & DMAR_PTE_SP) != 0 ||
- lvl == ctx->pglvl - 1) {
- ctx_unmap_clear_pte(ctx, base, lvl, flags,
- pte, &sf, false);
+ lvl == domain->pglvl - 1) {
+ domain_unmap_clear_pte(domain, base, lvl,
+ flags, pte, &sf, false);
break;
}
}
KASSERT(size >= pg_sz,
- ("unmapping loop overflow %p %jx %jx %jx", ctx,
+ ("unmapping loop overflow %p %jx %jx %jx", domain,
(uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
}
if (sf != NULL)
@@ -663,54 +673,58 @@ ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
}
int
-ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
- int flags)
+domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
+ dmar_gaddr_t size, int flags)
{
int error;
- DMAR_CTX_PGLOCK(ctx);
- error = ctx_unmap_buf_locked(ctx, base, size, flags);
- DMAR_CTX_PGUNLOCK(ctx);
+ DMAR_DOMAIN_PGLOCK(domain);
+ error = domain_unmap_buf_locked(domain, base, size, flags);
+ DMAR_DOMAIN_PGUNLOCK(domain);
return (error);
}
int
-ctx_alloc_pgtbl(struct dmar_ctx *ctx)
+domain_alloc_pgtbl(struct dmar_domain *domain)
{
vm_page_t m;
- KASSERT(ctx->pgtbl_obj == NULL, ("already initialized %p", ctx));
+ KASSERT(domain->pgtbl_obj == NULL,
+ ("already initialized %p", domain));
- ctx->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
- IDX_TO_OFF(pglvl_max_pages(ctx->pglvl)), 0, 0, NULL);
- DMAR_CTX_PGLOCK(ctx);
- m = dmar_pgalloc(ctx->pgtbl_obj, 0, DMAR_PGF_WAITOK |
+ domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
+ IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
+ DMAR_DOMAIN_PGLOCK(domain);
+ m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK |
DMAR_PGF_ZERO | DMAR_PGF_OBJL);
/* No implicit free of the top level page table page. */
m->wire_count = 1;
- DMAR_CTX_PGUNLOCK(ctx);
+ DMAR_DOMAIN_PGUNLOCK(domain);
+ DMAR_LOCK(domain->dmar);
+ domain->flags |= DMAR_DOMAIN_PGTBL_INITED;
+ DMAR_UNLOCK(domain->dmar);
return (0);
}
void
-ctx_free_pgtbl(struct dmar_ctx *ctx)
+domain_free_pgtbl(struct dmar_domain *domain)
{
vm_object_t obj;
vm_page_t m;
- obj = ctx->pgtbl_obj;
+ obj = domain->pgtbl_obj;
if (obj == NULL) {
- KASSERT((ctx->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
- (ctx->flags & DMAR_CTX_IDMAP) != 0,
- ("lost pagetable object ctx %p", ctx));
+ KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
+ (domain->flags & DMAR_DOMAIN_IDMAP) != 0,
+ ("lost pagetable object domain %p", domain));
return;
}
- DMAR_CTX_ASSERT_PGLOCKED(ctx);
- ctx->pgtbl_obj = NULL;
+ DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
+ domain->pgtbl_obj = NULL;
- if ((ctx->flags & DMAR_CTX_IDMAP) != 0) {
+ if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0) {
put_idmap_pgtbl(obj);
- ctx->flags &= ~DMAR_CTX_IDMAP;
+ domain->flags &= ~DMAR_DOMAIN_IDMAP;
return;
}
@@ -723,7 +737,7 @@ ctx_free_pgtbl(struct dmar_ctx *ctx)
}
static inline uint64_t
-ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
+domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
{
uint64_t iotlbr;
@@ -739,21 +753,22 @@ ctx_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
}
void
-ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size)
+domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
+ dmar_gaddr_t size)
{
struct dmar_unit *unit;
dmar_gaddr_t isize;
uint64_t iotlbr;
int am, iro;
- unit = ctx->dmar;
+ unit = domain->dmar;
KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
unit->unit));
iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
DMAR_LOCK(unit);
if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
- iotlbr = ctx_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
- DMAR_IOTLB_DID(ctx->domain), iro);
+ iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
+ DMAR_IOTLB_DID(domain->domain), iro);
KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
DMAR_IOTLB_IAIG_INVLD,
("dmar%d: invalidation failed %jx", unit->unit,
@@ -762,9 +777,9 @@ ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size)
for (; size > 0; base += isize, size -= isize) {
am = calc_am(unit, base, size, &isize);
dmar_write8(unit, iro, base | am);
- iotlbr = ctx_wait_iotlb_flush(unit,
- DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain),
- iro);
+ iotlbr = domain_wait_iotlb_flush(unit,
+ DMAR_IOTLB_IIRG_PAGE |
+ DMAR_IOTLB_DID(domain->domain), iro);
KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
DMAR_IOTLB_IAIG_INVLD,
("dmar%d: PSI invalidation failed "
diff --git a/sys/x86/iommu/intel_qi.c b/sys/x86/iommu/intel_qi.c
index 293e2be..bdcdff2 100644
--- a/sys/x86/iommu/intel_qi.c
+++ b/sys/x86/iommu/intel_qi.c
@@ -213,14 +213,14 @@ dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
}
void
-dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
+dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
dmar_gaddr_t size, struct dmar_qi_genseq *pseq)
{
struct dmar_unit *unit;
dmar_gaddr_t isize;
int am;
- unit = ctx->dmar;
+ unit = domain->dmar;
DMAR_ASSERT_LOCKED(unit);
for (; size > 0; base += isize, size -= isize) {
am = calc_am(unit, base, size, &isize);
@@ -228,7 +228,7 @@ dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV |
DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW |
DMAR_IQ_DESCR_IOTLB_DR |
- DMAR_IQ_DESCR_IOTLB_DID(ctx->domain),
+ DMAR_IQ_DESCR_IOTLB_DID(domain->domain),
base | am);
}
if (pseq != NULL) {
@@ -348,7 +348,7 @@ dmar_qi_task(void *arg, int pending __unused)
break;
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
- dmar_ctx_free_entry(entry, (entry->flags &
+ dmar_domain_free_entry(entry, (entry->flags &
DMAR_MAP_ENTRY_QI_NF) == 0);
DMAR_LOCK(unit);
}
diff --git a/sys/x86/iommu/intel_utils.c b/sys/x86/iommu/intel_utils.c
index f696f9d..1519af8 100644
--- a/sys/x86/iommu/intel_utils.c
+++ b/sys/x86/iommu/intel_utils.c
@@ -100,14 +100,13 @@ static const struct sagaw_bits_tag {
{.agaw = 64, .cap = DMAR_CAP_SAGAW_6LVL, .awlvl = DMAR_CTX2_AW_6LVL,
.pglvl = 6}
};
-#define SIZEOF_SAGAW_BITS (sizeof(sagaw_bits) / sizeof(sagaw_bits[0]))
bool
dmar_pglvl_supported(struct dmar_unit *unit, int pglvl)
{
int i;
- for (i = 0; i < SIZEOF_SAGAW_BITS; i++) {
+ for (i = 0; i < nitems(sagaw_bits); i++) {
if (sagaw_bits[i].pglvl != pglvl)
continue;
if ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0)
@@ -117,26 +116,23 @@ dmar_pglvl_supported(struct dmar_unit *unit, int pglvl)
}
int
-ctx_set_agaw(struct dmar_ctx *ctx, int mgaw)
+domain_set_agaw(struct dmar_domain *domain, int mgaw)
{
int sagaw, i;
- ctx->mgaw = mgaw;
- sagaw = DMAR_CAP_SAGAW(ctx->dmar->hw_cap);
- for (i = 0; i < SIZEOF_SAGAW_BITS; i++) {
+ domain->mgaw = mgaw;
+ sagaw = DMAR_CAP_SAGAW(domain->dmar->hw_cap);
+ for (i = 0; i < nitems(sagaw_bits); i++) {
if (sagaw_bits[i].agaw >= mgaw) {
- ctx->agaw = sagaw_bits[i].agaw;
- ctx->pglvl = sagaw_bits[i].pglvl;
- ctx->awlvl = sagaw_bits[i].awlvl;
+ domain->agaw = sagaw_bits[i].agaw;
+ domain->pglvl = sagaw_bits[i].pglvl;
+ domain->awlvl = sagaw_bits[i].awlvl;
return (0);
}
}
- device_printf(ctx->dmar->dev,
- "context request mgaw %d for pci%d:%d:%d:%d, "
- "no agaw found, sagaw %x\n", mgaw, ctx->dmar->segment,
- pci_get_bus(ctx->ctx_tag.owner),
- pci_get_slot(ctx->ctx_tag.owner),
- pci_get_function(ctx->ctx_tag.owner), sagaw);
+ device_printf(domain->dmar->dev,
+ "context request mgaw %d: no agaw found, sagaw %x\n",
+ mgaw, sagaw);
return (EINVAL);
}
@@ -152,18 +148,18 @@ dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less)
{
int i;
- for (i = 0; i < SIZEOF_SAGAW_BITS; i++) {
+ for (i = 0; i < nitems(sagaw_bits); i++) {
if ((1ULL << sagaw_bits[i].agaw) >= maxaddr &&
(DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap) != 0)
break;
}
- if (allow_less && i == SIZEOF_SAGAW_BITS) {
+ if (allow_less && i == nitems(sagaw_bits)) {
do {
i--;
} while ((DMAR_CAP_SAGAW(unit->hw_cap) & sagaw_bits[i].cap)
== 0);
}
- if (i < SIZEOF_SAGAW_BITS)
+ if (i < nitems(sagaw_bits))
return (sagaw_bits[i].agaw);
KASSERT(0, ("no mgaw for maxaddr %jx allow_less %d",
(uintmax_t) maxaddr, allow_less));
@@ -192,7 +188,7 @@ pglvl_max_pages(int pglvl)
* the context ctx.
*/
int
-ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl)
+domain_is_sp_lvl(struct dmar_domain *domain, int lvl)
{
int alvl, cap_sps;
static const int sagaw_sp[] = {
@@ -202,10 +198,9 @@ ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl)
DMAR_CAP_SPS_1T
};
- alvl = ctx->pglvl - lvl - 1;
- cap_sps = DMAR_CAP_SPS(ctx->dmar->hw_cap);
- return (alvl < sizeof(sagaw_sp) / sizeof(sagaw_sp[0]) &&
- (sagaw_sp[alvl] & cap_sps) != 0);
+ alvl = domain->pglvl - lvl - 1;
+ cap_sps = DMAR_CAP_SPS(domain->dmar->hw_cap);
+ return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0);
}
dmar_gaddr_t
@@ -224,16 +219,15 @@ pglvl_page_size(int total_pglvl, int lvl)
KASSERT(lvl >= 0 && lvl < total_pglvl,
("total %d lvl %d", total_pglvl, lvl));
rlvl = total_pglvl - lvl - 1;
- KASSERT(rlvl < sizeof(pg_sz) / sizeof(pg_sz[0]),
- ("sizeof pg_sz lvl %d", lvl));
+ KASSERT(rlvl < nitems(pg_sz), ("sizeof pg_sz lvl %d", lvl));
return (pg_sz[rlvl]);
}
dmar_gaddr_t
-ctx_page_size(struct dmar_ctx *ctx, int lvl)
+domain_page_size(struct dmar_domain *domain, int lvl)
{
- return (pglvl_page_size(ctx->pglvl, lvl));
+ return (pglvl_page_size(domain->pglvl, lvl));
}
int
OpenPOWER on IntegriCloud