diff options
author | tmm <tmm@FreeBSD.org> | 2003-01-21 18:22:26 +0000 |
---|---|---|
committer | tmm <tmm@FreeBSD.org> | 2003-01-21 18:22:26 +0000 |
commit | c5298f4e6cc794e4370a71027577dc4ebfc4ac5e (patch) | |
tree | 7329e75f2c4a6881852d249a411ecd62deb32a00 /sys/sparc64 | |
parent | cb27222f4795837aa939e012105958acc32c3d82 (diff) | |
download | FreeBSD-src-c5298f4e6cc794e4370a71027577dc4ebfc4ac5e.zip FreeBSD-src-c5298f4e6cc794e4370a71027577dc4ebfc4ac5e.tar.gz |
Fixes for a number of problems in the IOMMU code:
1.) Fix an off-by-one in the DVMA space handling, which would make it
possible to allocate one page beyond the end of the DVMA area.
This page was aliased to the first page. Apparently, this bug was
responsible for the trashed nvram/eeprom some people were reporting,
in conjunction with a number of unfortunate coincidences.
2.) Fix broken boundary and and lowaddr calculations.
3.) Fix a memory leak on an error path.
4.) Update a outdated comment to reflect the introduction of IOMMU_MAX_PRE,
make the usage of IOMMU_MAX_PRE more consistent and KASSERT that the
preallocation size is not 0.
5.) Fix a case where an error return was lost.
6.) When signalling an error to the caller by invoking the callback, do
not use a segment pointer of NULL for compatability with existing
drivers.
Also, increase the maximum segment number to 64; it is rather arbitrary,
with the exception of the of the stack space consumed by the segment
array.
Special thanks go to Harti Brandt <brandt@fokus.fraunhofer.de> for
spotting 4 and 5, and testing many iterations of patches.
Pointy hats to: tmm
Diffstat (limited to 'sys/sparc64')
-rw-r--r-- | sys/sparc64/include/bus_private.h | 7 | ||||
-rw-r--r-- | sys/sparc64/sparc64/iommu.c | 32 |
2 files changed, 23 insertions, 16 deletions
diff --git a/sys/sparc64/include/bus_private.h b/sys/sparc64/include/bus_private.h index 4b68ffd..6c3a86e 100644 --- a/sys/sparc64/include/bus_private.h +++ b/sys/sparc64/include/bus_private.h @@ -33,7 +33,12 @@ #include <sys/queue.h> -#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) +/* + * This is more or less arbitrary, except for the stack space consumed by + * the segments array. Choose more than ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1), + * since in practice we could be map pages more than once. + */ +#define BUS_DMAMAP_NSEGS 64 struct bus_dmamap_res { struct resource *dr_res; diff --git a/sys/sparc64/sparc64/iommu.c b/sys/sparc64/sparc64/iommu.c index c72415e..ff707d3 100644 --- a/sys/sparc64/sparc64/iommu.c +++ b/sys/sparc64/sparc64/iommu.c @@ -288,6 +288,7 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase, struct iommu_state *first; vm_size_t size; vm_offset_t offs; + u_int64_t end; int i; /* @@ -317,13 +318,13 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase, * First IOMMU to be registered; set up resource mamangement * and allocate TSB memory. */ + end = is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT)); iommu_dvma_rman.rm_type = RMAN_ARRAY; iommu_dvma_rman.rm_descr = "DVMA Memory"; if (rman_init(&iommu_dvma_rman) != 0 || rman_manage_region(&iommu_dvma_rman, (is->is_dvmabase >> IO_PAGE_SHIFT) + resvpg, - (is->is_dvmabase + (size << - (IO_PAGE_SHIFT - IOTTE_SHIFT))) >> IO_PAGE_SHIFT) != 0) + (end >> IO_PAGE_SHIFT) - 1) != 0) panic("iommu_init: can't initialize dvma rman"); /* * Allocate memory for I/O page tables. They need to be @@ -517,7 +518,7 @@ iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map, { struct resource *res; struct bus_dmamap_res *bdr; - bus_size_t align, bound, sgsize; + bus_size_t align, sgsize; if ((bdr = malloc(sizeof(*bdr), M_IOMMU, M_NOWAIT)) == NULL) return (EAGAIN); @@ -531,12 +532,14 @@ iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map, sgsize = round_io_page(size) >> IO_PAGE_SHIFT; if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE) panic("iommu_dvmamap_load: illegal boundary specified"); - bound = ulmax(t->dt_boundary >> IO_PAGE_SHIFT, 1); res = rman_reserve_resource_bound(&iommu_dvma_rman, 0L, - t->dt_lowaddr, sgsize, bound >> IO_PAGE_SHIFT, + t->dt_lowaddr >> IO_PAGE_SHIFT, sgsize, + t->dt_boundary >> IO_PAGE_SHIFT, RF_ACTIVE | rman_make_alignment_flags(align), NULL); - if (res == NULL) + if (res == NULL) { + free(bdr, M_IOMMU); return (ENOMEM); + } bdr->dr_res = res; bdr->dr_used = 0; @@ -719,7 +722,7 @@ iommu_dvmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is, * is possible to have multiple discontiguous segments in a single map, * which is handled by allocating additional resources, instead of * increasing the size, to avoid fragmentation. - * Clamp preallocation to BUS_SPACE_MAXSIZE. In some situations we can + * Clamp preallocation to IOMMU_MAX_PRE. In some situations we can * handle more; that case is handled by reallocating at map load time. */ totsz = ulmin(IOMMU_SIZE_ROUNDUP(dt->dt_maxsize), IOMMU_MAX_PRE); @@ -732,7 +735,10 @@ iommu_dvmamap_create(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is, */ maxpre = imin(dt->dt_nsegments, IOMMU_MAX_PRE_SEG); presz = dt->dt_maxsize / maxpre; - for (i = 0; i < maxpre && totsz < IOMMU_MAX_PRE; i++) { + KASSERT(presz != 0, ("iommu_dvmamap_create: bogus preallocation size " + ", nsegments = %d, maxpre = %d, maxsize = %lu", dt->dt_nsegments, + maxpre, dt->dt_maxsize)); + for (i = 1; i < maxpre && totsz < IOMMU_MAX_PRE; i++) { currsz = round_io_page(ulmin(presz, IOMMU_MAX_PRE - totsz)); error = iommu_dvma_valloc(dt, is, *mapp, currsz); if (error != 0) @@ -783,7 +789,6 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is, return (error); sgcnt = *segp; - error = 0; firstpg = 1; for (; buflen > 0; ) { /* @@ -812,10 +817,8 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is, } sgcnt++; if (sgcnt >= dt->dt_nsegments || - sgcnt >= BUS_DMAMAP_NSEGS) { - error = EFBIG; - break; - } + sgcnt >= BUS_DMAMAP_NSEGS) + return (EFBIG); /* * No extra alignment here - the common practice in the * busdma code seems to be that only the first segment @@ -833,7 +836,6 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is, } *segp = sgcnt; return (0); - } int @@ -860,7 +862,7 @@ iommu_dvmamap_load(bus_dma_tag_t pt, bus_dma_tag_t dt, struct iommu_state *is, if (error != 0) { iommu_dvmamap_vunload(is, map); - (*cb)(cba, NULL, 0, error); + (*cb)(cba, sgs, 0, error); } else { /* Move the map to the end of the LRU queue. */ iommu_map_insq(map); |