diff options
author | tmm <tmm@FreeBSD.org> | 2003-07-10 23:27:35 +0000 |
---|---|---|
committer | tmm <tmm@FreeBSD.org> | 2003-07-10 23:27:35 +0000 |
commit | 5b603201cc2ae0b710ed68f289001529568a6e1b (patch) | |
tree | 11a0e2331c5f4e82b7579e43d973f2f65cedbc72 /sys/sparc64/include | |
parent | 6b0bee64fce19f60a859b714ffe17282bcd705e9 (diff) | |
download | FreeBSD-src-5b603201cc2ae0b710ed68f289001529568a6e1b.zip FreeBSD-src-5b603201cc2ae0b710ed68f289001529568a6e1b.tar.gz |
Lock down the IOMMU bus_dma implementation to make it safe to use
without Giant held.
A quick outline of the locking strategy:
Since all IOMMUs are synchronized, there is a single lock, iommu_mtx,
which protects the hardware registers (where needed) and the global and
per-IOMMU software states. As soon as the IOMMUs are divorced, each struct
iommu_state will have its own mutex (and the remaining global state
will be moved into the struct).
The dvma rman has its own internal mutex; the TSB slots may only be
accessed by the owner of the corresponding resource, so neither needs
extra protection.
Since there is a second access path to maps via LRU queues, the consumer-
provided locking is not sufficient; therefore, each map which is on a
queue is additionally protected by iommu_mtx (in part, there is one
member which only the map owner may access). Each map on a queue may
be accessed and removed from or repositioned in a queue in any context as
long as the lock is held; only the owner may insert a map.
To reduce lock contention, some bus_dma functions remove the map from
the queue temporarily (on behalf of the map owner) for some operations and
reinsert it when they are done. Shorter operations and operations which are
not done on behalf of the lock owner are completely covered by the lock.
To facilitate the locking, reorganize the streaming buffer handling;
while being there, fix an old oversight which would cause the streaming
buffer to always be flushed, regardless of whether streaming was enabled
in the TSB entry. The streaming buffer is still disabled for now, since
there are a number of drivers which lack critical bus_dmamp_sync() calls.
Additional testing by: jake
Diffstat (limited to 'sys/sparc64/include')
-rw-r--r-- | sys/sparc64/include/bus.h | 15 | ||||
-rw-r--r-- | sys/sparc64/include/bus_private.h | 24 | ||||
-rw-r--r-- | sys/sparc64/include/iommuvar.h | 51 |
3 files changed, 55 insertions, 35 deletions
diff --git a/sys/sparc64/include/bus.h b/sys/sparc64/include/bus.h index 4e0c2b4..f97c13b 100644 --- a/sys/sparc64/include/bus.h +++ b/sys/sparc64/include/bus.h @@ -900,23 +900,14 @@ memsetw(void *d, int val, size_t size) #define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */ #define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */ #define BUS_DMA_COHERENT 0x004 /* hint: map memory in a coherent way */ -#define BUS_DMA_NOWRITE 0x008 #define BUS_DMA_BUS1 0x010 #define BUS_DMA_BUS2 0x020 #define BUS_DMA_BUS3 0x040 #define BUS_DMA_BUS4 0x080 -/* - * The following flags are from NetBSD, but are not implemented for all - * architetures, and should therefore not be used in MI code. - * Some have different values than under NetBSD. - */ -#define BUS_DMA_STREAMING 0x100 /* hint: sequential, unidirectional */ -#define BUS_DMA_READ 0x200 /* mapping is device -> memory only */ -#define BUS_DMA_WRITE 0x400 /* mapping is memory -> device only */ -#define BUS_DMA_NOCACHE BUS_DMA_BUS1 -/* Don't bother with alignment */ -#define BUS_DMA_DVMA BUS_DMA_BUS2 +/* The following two flags are non-standard. */ +#define BUS_DMA_NOWRITE 0x100 +#define BUS_DMA_NOCACHE 0x200 /* Forwards needed by prototypes below. */ struct mbuf; diff --git a/sys/sparc64/include/bus_private.h b/sys/sparc64/include/bus_private.h index 71c8b76..55f6e53 100644 --- a/sys/sparc64/include/bus_private.h +++ b/sys/sparc64/include/bus_private.h @@ -54,13 +54,29 @@ struct bus_dmamap_res { SLIST_ENTRY(bus_dmamap_res) dr_link; }; +/* + * Callers of the bus_dma interfaces must always protect their tags and maps + * appropriately against concurrent access. However, when a map is on a LRU + * queue, there is a second access path to it; for this case, the locking rules + * are given in the parenthesized comments below: + * q - locked by the mutex protecting the queue. + * p - private to the owner of the map, no access through the queue. + * * - comment refers to pointer target. + * Only the owner of the map is allowed to insert the map into a queue. Removal + * and repositioning (i.e. temporal removal and reinsertion) is allowed to all + * if the queue lock is held. + */ struct bus_dmamap { - TAILQ_ENTRY(bus_dmamap) dm_maplruq; - SLIST_HEAD(, bus_dmamap_res) dm_reslist; - int dm_onq; - int dm_loaded; + TAILQ_ENTRY(bus_dmamap) dm_maplruq; /* (q) */ + SLIST_HEAD(, bus_dmamap_res) dm_reslist; /* (q, *q) */ + int dm_onq; /* (q) */ + int dm_flags; /* (p) */ }; +/* Flag values. */ +#define DMF_LOADED 1 /* Map is loaded */ +#define DMF_COHERENT 2 /* Coherent mapping requested */ + int sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp); void sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map); diff --git a/sys/sparc64/include/iommuvar.h b/sys/sparc64/include/iommuvar.h index 6640573..2bdf55d 100644 --- a/sys/sparc64/include/iommuvar.h +++ b/sys/sparc64/include/iommuvar.h @@ -40,45 +40,58 @@ #define trunc_io_page(x) trunc_page(x) /* - * per-IOMMU state + * Per-IOMMU state. The parenthesized comments indicate the locking strategy: + * i - protected by iommu_mtx. + * r - read-only after initialization. + * * - comment refers to pointer target / target hardware registers + * (for bus_addr_t). + * iommu_map_lruq is also locked by iommu_mtx. Elements of iommu_tsb may only + * be accessed from functions operating on the map owning the corresponding + * resource, so the locking the user is required to do to protect the map is + * sufficient. As soon as the TSBs are divorced, these will be moved into struct + * iommu_state, and each state struct will get its own lock. + * iommu_dvma_rman needs to be moved there too, but has its own internal lock. */ struct iommu_state { - int is_tsbsize; /* 0 = 8K, ... */ - u_int64_t is_dvmabase; - int64_t is_cr; /* IOMMU control register value */ + int is_tsbsize; /* (r) 0 = 8K, ... */ + u_int64_t is_dvmabase; /* (r) */ + int64_t is_cr; /* (r) Control reg value */ - vm_paddr_t is_flushpa[2]; - volatile int64_t *is_flushva[2]; + vm_paddr_t is_flushpa[2]; /* (r) */ + volatile int64_t *is_flushva[2]; /* (r, *i) */ /* + * (i) * When a flush is completed, 64 bytes will be stored at the given * location, the first double word being 1, to indicate completion. * The lower 6 address bits are ignored, so the addresses need to be * suitably aligned; over-allocate a large enough margin to be able * to adjust it. * Two such buffers are needed. - * Needs to be volatile or egcs optimizes away loads. */ volatile char is_flush[STRBUF_FLUSHSYNC_NBYTES * 3 - 1]; /* copies of our parents state, to allow us to be self contained */ - bus_space_tag_t is_bustag; /* our bus tag */ - bus_space_handle_t is_bushandle; - bus_addr_t is_iommu; /* IOMMU registers */ - bus_addr_t is_sb[2]; /* streaming buffer */ - bus_addr_t is_dtag; /* tag diagnostics access */ - bus_addr_t is_ddram; /* data ram diag. access */ - bus_addr_t is_dqueue; /* LRU queue diag. access */ - bus_addr_t is_dva; /* VA diag. register */ - bus_addr_t is_dtcmp; /* tag compare diag. access */ + bus_space_tag_t is_bustag; /* (r) Our bus tag */ + bus_space_handle_t is_bushandle; /* (r) */ + bus_addr_t is_iommu; /* (r, *i) IOMMU registers */ + bus_addr_t is_sb[2]; /* (r, *i) Streaming buffer */ + /* Tag diagnostics access */ + bus_addr_t is_dtag; /* (r, *r) */ + /* Data RAM diagnostic access */ + bus_addr_t is_ddram; /* (r, *r) */ + /* LRU queue diag. access */ + bus_addr_t is_dqueue; /* (r, *r) */ + /* Virtual address diagnostics register */ + bus_addr_t is_dva; /* (r, *r) */ + /* Tag compare diagnostics access */ + bus_addr_t is_dtcmp; /* (r, *r) */ - STAILQ_ENTRY(iommu_state) is_link; + STAILQ_ENTRY(iommu_state) is_link; /* (r) */ }; /* interfaces for PCI/SBUS code */ void iommu_init(char *, struct iommu_state *, int, u_int32_t, int); void iommu_reset(struct iommu_state *); -void iommu_enter(struct iommu_state *, vm_offset_t, vm_paddr_t, int); -void iommu_remove(struct iommu_state *, vm_offset_t, size_t); void iommu_decode_fault(struct iommu_state *, vm_offset_t); extern struct bus_dma_methods iommu_dma_methods; |