diff options
Diffstat (limited to 'sys/x86/iommu/busdma_dmar.c')
-rw-r--r-- | sys/x86/iommu/busdma_dmar.c | 49 |
1 files changed, 29 insertions, 20 deletions
diff --git a/sys/x86/iommu/busdma_dmar.c b/sys/x86/iommu/busdma_dmar.c index 3ef8107..434ae82 100644 --- a/sys/x86/iommu/busdma_dmar.c +++ b/sys/x86/iommu/busdma_dmar.c @@ -225,7 +225,7 @@ dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr) disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester), pci_get_bus(requester), pci_get_slot(requester), pci_get_function(requester)); - ctx = dmar_get_ctx(dmar, requester, rid, disabled, rmrr); + ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr); if (ctx == NULL) return (NULL); if (disabled) { @@ -371,16 +371,18 @@ dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) { struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; + struct dmar_domain *domain; tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; if (map != NULL) { - DMAR_CTX_LOCK(tag->ctx); + domain = tag->ctx->domain; + DMAR_DOMAIN_LOCK(domain); if (!TAILQ_EMPTY(&map->map_entries)) { - DMAR_CTX_UNLOCK(tag->ctx); + DMAR_DOMAIN_UNLOCK(domain); return (EBUSY); } - DMAR_CTX_UNLOCK(tag->ctx); + DMAR_DOMAIN_UNLOCK(domain); free(map, M_DMAR_DMAMAP); } tag->map_count--; @@ -457,6 +459,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, struct dmar_map_entries_tailq *unroll_list) { struct dmar_ctx *ctx; + struct dmar_domain *domain; struct dmar_map_entry *entry; dmar_gaddr_t size; bus_size_t buflen1; @@ -466,6 +469,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, if (segs == NULL) segs = tag->segments; ctx = tag->ctx; + domain = ctx->domain; seg = *segp; error = 0; idx = 0; @@ -487,7 +491,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, if (seg + 1 < tag->common.nsegments) gas_flags |= DMAR_GM_CANSPLIT; - error = dmar_gas_map(ctx, &tag->common, size, offset, + error = dmar_gas_map(domain, &tag->common, size, offset, DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, gas_flags, ma + idx, &entry); if (error != 0) @@ -534,10 +538,10 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, (uintmax_t)entry->start, (uintmax_t)entry->end, (uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz)); - DMAR_CTX_LOCK(ctx); + DMAR_DOMAIN_LOCK(domain); TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); entry->flags |= DMAR_MAP_ENTRY_MAP; - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); segs[seg].ds_addr = entry->start + offset; @@ -559,11 +563,13 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, int flags, bus_dma_segment_t *segs, int *segp) { struct dmar_ctx *ctx; + struct dmar_domain *domain; struct dmar_map_entry *entry, *entry1; struct dmar_map_entries_tailq unroll_list; int error; ctx = tag->ctx; + domain = ctx->domain; atomic_add_long(&ctx->loads, 1); TAILQ_INIT(&unroll_list); @@ -575,7 +581,7 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, * partial buffer load, so unfortunately we have to * revert all work done. */ - DMAR_CTX_LOCK(ctx); + DMAR_DOMAIN_LOCK(domain); TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, entry1) { /* @@ -586,19 +592,19 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, */ TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); TAILQ_REMOVE(&unroll_list, entry, unroll_link); - TAILQ_INSERT_TAIL(&ctx->unload_entries, entry, + TAILQ_INSERT_TAIL(&domain->unload_entries, entry, dmamap_link); } - DMAR_CTX_UNLOCK(ctx); - taskqueue_enqueue(ctx->dmar->delayed_taskqueue, - &ctx->unload_task); + DMAR_DOMAIN_UNLOCK(domain); + taskqueue_enqueue(domain->dmar->delayed_taskqueue, + &domain->unload_task); } if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && !map->cansleep) error = EINPROGRESS; if (error == EINPROGRESS) - dmar_bus_schedule_dmamap(ctx->dmar, map); + dmar_bus_schedule_dmamap(domain->dmar, map); return (error); } @@ -764,6 +770,7 @@ dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) struct bus_dma_tag_dmar *tag; struct bus_dmamap_dmar *map; struct dmar_ctx *ctx; + struct dmar_domain *domain; #if defined(__amd64__) struct dmar_map_entries_tailq entries; #endif @@ -771,20 +778,22 @@ dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) tag = (struct bus_dma_tag_dmar *)dmat; map = (struct bus_dmamap_dmar *)map1; ctx = tag->ctx; + domain = ctx->domain; atomic_add_long(&ctx->unloads, 1); #if defined(__i386__) - DMAR_CTX_LOCK(ctx); - TAILQ_CONCAT(&ctx->unload_entries, &map->map_entries, dmamap_link); - DMAR_CTX_UNLOCK(ctx); - taskqueue_enqueue(ctx->dmar->delayed_taskqueue, &ctx->unload_task); + DMAR_DOMAIN_LOCK(domain); + TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link); + DMAR_DOMAIN_UNLOCK(domain); + taskqueue_enqueue(domain->dmar->delayed_taskqueue, + &domain->unload_task); #else /* defined(__amd64__) */ TAILQ_INIT(&entries); - DMAR_CTX_LOCK(ctx); + DMAR_DOMAIN_LOCK(domain); TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); - DMAR_CTX_UNLOCK(ctx); + DMAR_DOMAIN_UNLOCK(domain); THREAD_NO_SLEEPING(); - dmar_ctx_unload(ctx, &entries, false); + dmar_domain_unload(domain, &entries, false); THREAD_SLEEPING_OK(); KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx)); #endif |