summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/iommureg.h5
-rw-r--r--sys/sparc64/include/iommuvar.h29
-rw-r--r--sys/sparc64/pci/psycho.c19
-rw-r--r--sys/sparc64/pci/psychoreg.h4
-rw-r--r--sys/sparc64/sbus/sbus.c16
-rw-r--r--sys/sparc64/sbus/sbusreg.h4
-rw-r--r--sys/sparc64/sparc64/iommu.c161
7 files changed, 95 insertions, 143 deletions
diff --git a/sys/sparc64/include/iommureg.h b/sys/sparc64/include/iommureg.h
index 65d59ce..0218cfa 100644
--- a/sys/sparc64/include/iommureg.h
+++ b/sys/sparc64/include/iommureg.h
@@ -64,8 +64,7 @@
#define STRBUF_EN 0x0000000000000001UL
#define STRBUF_D 0x0000000000000002UL
-#define IOMMU_BITS 34
-#define IOMMU_MAXADDR ((1UL << IOMMU_BITS) - 1)
+#define IOMMU_MAXADDR(bits) ((1UL << (bits)) - 1)
/*
* control register bits
@@ -121,7 +120,7 @@
/* Accesses to same bus segment? */
#define IOTTE_LOCAL 0x0800000000000000UL
/* Let's assume this is correct */
-#define IOTTE_PAMASK 0x000001ffffffe000UL
+#define IOTTE_PAMASK 0x000007ffffffe000UL
/* Accesses to cacheable space */
#define IOTTE_C 0x0000000000000010UL
/* Writeable */
diff --git a/sys/sparc64/include/iommuvar.h b/sys/sparc64/include/iommuvar.h
index 9c290c0..792a823 100644
--- a/sys/sparc64/include/iommuvar.h
+++ b/sys/sparc64/include/iommuvar.h
@@ -40,20 +40,31 @@
#define trunc_io_page(x) trunc_page(x)
/*
+ * LRU queue handling for lazy resource allocation
+ */
+TAILQ_HEAD(iommu_maplruq_head, bus_dmamap);
+
+/*
* Per-IOMMU state. The parenthesized comments indicate the locking strategy:
- * i - protected by iommu_mtx.
+ * i - protected by is_mtx.
* r - read-only after initialization.
* * - comment refers to pointer target / target hardware registers
* (for bus_addr_t).
- * iommu_map_lruq is also locked by iommu_mtx. Elements of iommu_tsb may only
- * be accessed from functions operating on the map owning the corresponding
- * resource, so the locking the user is required to do to protect the map is
- * sufficient. As soon as the TSBs are divorced, these will be moved into struct
- * iommu_state, and each state struct will get its own lock.
- * iommu_dvma_rman needs to be moved there too, but has its own internal lock.
+ * is_maplruq is also locked by is_mtx. Elements of is_tsb may only be
+ * accessed from functions operating on the map owning the corresponding
+ * resource, so the locking the user is required to do to protect the
+ * map is sufficient.
+ * dm_reslist of all maps are locked by is_mtx as well.
+ * is_dvma_rman has its own internal lock.
*/
struct iommu_state {
+ struct mtx is_mtx;
+ struct rman is_dvma_rman; /* DVMA space rman */
+ struct iommu_maplruq_head is_maplruq; /* (i) LRU queue */
+ vm_paddr_t is_ptsb; /* (r) TSB physical address */
+ u_int64_t *is_tsb; /* (*i) TSB virtual address */
int is_tsbsize; /* (r) 0 = 8K, ... */
+ u_int64_t is_pmaxaddr; /* (r) max. physical address */
u_int64_t is_dvmabase; /* (r) */
int64_t is_cr; /* (r) Control reg value */
@@ -85,11 +96,9 @@ struct iommu_state {
bus_addr_t is_dva; /* (r, *r) */
/* Tag compare diagnostics access */
bus_addr_t is_dtcmp; /* (r, *r) */
-
- STAILQ_ENTRY(iommu_state) is_link; /* (r) */
};
-/* interfaces for PCI/SBUS code */
+/* interfaces for PCI/SBus code */
void iommu_init(char *, struct iommu_state *, int, u_int32_t, int);
void iommu_reset(struct iommu_state *);
void iommu_decode_fault(struct iommu_state *, vm_offset_t);
diff --git a/sys/sparc64/pci/psycho.c b/sys/sparc64/pci/psycho.c
index 6ecc6c8..e409670 100644
--- a/sys/sparc64/pci/psycho.c
+++ b/sys/sparc64/pci/psycho.c
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/reboot.h>
+#include <sys/rman.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_pci.h>
@@ -66,8 +67,6 @@ __FBSDID("$FreeBSD$");
#include <machine/resource.h>
#include <machine/ver.h>
-#include <sys/rman.h>
-
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -557,9 +556,15 @@ psycho_attach(device_t dev)
* For the moment, 32KB should be more than enough.
*/
sc->sc_is = malloc(sizeof(struct iommu_state), M_DEVBUF,
- M_NOWAIT);
+ M_NOWAIT | M_ZERO);
if (sc->sc_is == NULL)
panic("%s: malloc iommu_state failed", __func__);
+ if (sc->sc_mode == PSYCHO_MODE_SABRE)
+ sc->sc_is->is_pmaxaddr =
+ IOMMU_MAXADDR(SABRE_IOMMU_BITS);
+ else
+ sc->sc_is->is_pmaxaddr =
+ IOMMU_MAXADDR(PSYCHO_IOMMU_BITS);
sc->sc_is->is_sb[0] = 0;
sc->sc_is->is_sb[1] = 0;
if (OF_getproplen(node, "no-streaming-cache") < 0)
@@ -577,9 +582,9 @@ psycho_attach(device_t dev)
sc->sc_pci_memt = psycho_alloc_bus_tag(sc, PCI_MEMORY_BUS_SPACE);
sc->sc_pci_iot = psycho_alloc_bus_tag(sc, PCI_IO_BUS_SPACE);
sc->sc_pci_cfgt = psycho_alloc_bus_tag(sc, PCI_CONFIG_BUS_SPACE);
- if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, IOMMU_MAXADDR, ~0,
- NULL, NULL, IOMMU_MAXADDR, 0xff, 0xffffffff, 0, NULL, NULL,
- &sc->sc_pci_dmat) != 0)
+ if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
+ sc->sc_is->is_pmaxaddr, ~0, NULL, NULL, sc->sc_is->is_pmaxaddr,
+ 0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_pci_dmat) != 0)
panic("%s: bus_dma_tag_create failed", __func__);
/* Customize the tag. */
sc->sc_pci_dmat->dt_cookie = sc->sc_is;
@@ -1075,7 +1080,7 @@ psycho_setup_intr(device_t dev, device_t child, struct resource *ires,
* XXX installing the workaround for an affected device and the
* actual workaround in psycho_intr_stub() should be moved to
* psycho(4)-specific bus_dma_tag_create() and bus_dmamap_sync()
- * methods, respectively, once we make use of BUS_GET_DMA_TAG(),
+ * methods, respectively, once DMA tag creation is newbus'ified,
* so the workaround isn't only applied for interrupt handlers
* but also for polling(4) callbacks.
*/
diff --git a/sys/sparc64/pci/psychoreg.h b/sys/sparc64/pci/psychoreg.h
index 1bf300f..b15e3d9 100644
--- a/sys/sparc64/pci/psychoreg.h
+++ b/sys/sparc64/pci/psychoreg.h
@@ -299,4 +299,8 @@
#define PCSR_SECBUS 0x40 /* Secondary bus number register */
#define PCSR_SUBBUS 0x41 /* Subordinate bus number register */
+/* Width of the physical addresses the IOMMU translates to */
+#define PSYCHO_IOMMU_BITS 41
+#define SABRE_IOMMU_BITS 34
+
#endif /* !_SPARC64_PCI_PSYCHOREG_H_ */
diff --git a/sys/sparc64/sbus/sbus.c b/sys/sparc64/sbus/sbus.c
index c78468f..2f241d3 100644
--- a/sys/sparc64/sbus/sbus.c
+++ b/sys/sparc64/sbus/sbus.c
@@ -112,21 +112,20 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/pcpu.h>
+#include <sys/queue.h>
#include <sys/reboot.h>
+#include <sys/rman.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/openfirm.h>
#include <machine/bus.h>
+#include <machine/bus_common.h>
#include <machine/bus_private.h>
#include <machine/iommureg.h>
-#include <machine/bus_common.h>
-#include <machine/resource.h>
-
-#include <sys/rman.h>
-
#include <machine/iommuvar.h>
+#include <machine/resource.h>
#include <sparc64/sbus/ofw_sbus.h>
#include <sparc64/sbus/sbusreg.h>
@@ -380,6 +379,7 @@ sbus_attach(device_t dev)
/* initalise the IOMMU */
/* punch in our copies */
+ sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(SBUS_IOMMU_BITS);
sc->sc_is.is_bustag = rman_get_bustag(sc->sc_sysio_res);
sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_sysio_res);
sc->sc_is.is_iommu = SBR_IOMMU;
@@ -405,9 +405,9 @@ sbus_attach(device_t dev)
iommu_init(name, &sc->sc_is, 3, -1, 1);
/* Create the DMA tag. */
- if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, IOMMU_MAXADDR, ~0,
- NULL, NULL, IOMMU_MAXADDR, 0xff, 0xffffffff, 0, NULL, NULL,
- &sc->sc_cdmatag) != 0)
+ if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
+ sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
+ 0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_cdmatag) != 0)
panic("%s: bus_dma_tag_create failed", __func__);
/* Customize the tag. */
sc->sc_cdmatag->dt_cookie = &sc->sc_is;
diff --git a/sys/sparc64/sbus/sbusreg.h b/sys/sparc64/sbus/sbusreg.h
index 8b47d67..4dddce1 100644
--- a/sys/sparc64/sbus/sbusreg.h
+++ b/sys/sparc64/sbus/sbusreg.h
@@ -136,5 +136,7 @@
#define SBR_OBIO_DIAG 0x4808 /* OBIO and misc int state diag reg */
#define SBR_STRBUF_DIAG 0x5000 /* Streaming buffer diag regs */
-#endif /* _SPARC64_SBUS_SBUSREG_H_ */
+/* Width of the physical addresses the IOMMU translates to */
+#define SBUS_IOMMU_BITS 41
+#endif /* _SPARC64_SBUS_SBUSREG_H_ */
diff --git a/sys/sparc64/sparc64/iommu.c b/sys/sparc64/sparc64/iommu.c
index cc973f6..1237a56 100644
--- a/sys/sparc64/sparc64/iommu.c
+++ b/sys/sparc64/sparc64/iommu.c
@@ -103,14 +103,8 @@ __FBSDID("$FreeBSD$");
/*
* UltraSPARC IOMMU support; used by both the PCI and SBus code.
- * Currently, the IOTSBs are synchronized, because determining the bus the map
- * is to be loaded for is not possible with the current busdma code.
- * The code is structured so that the IOMMUs can be easily divorced when that
- * is fixed.
*
* TODO:
- * - As soon as there is a newbus way to get a parent dma tag, divorce the
- * IOTSBs.
* - Support sub-page boundaries.
* - Fix alignment handling for small allocations (the possible page offset
* of malloc()ed memory is not handled at all). Revise interaction of
@@ -127,7 +121,6 @@ __FBSDID("$FreeBSD$");
* becomes available.
* - Use the streaming cache unless BUS_DMA_COHERENT is specified; do not
* flush the streaming cache when coherent mappings are synced.
- * - Add bounce buffers to support machines with more than 16GB of RAM.
*/
#include "opt_iommu.h"
@@ -173,33 +166,7 @@ static void iommu_diag(struct iommu_state *, vm_offset_t va);
#endif
/*
- * Protects iommu_maplruq, dm_reslist of all maps on the queue and all
- * iommu states as long as the TSBs are synchronized.
- */
-struct mtx iommu_mtx;
-
-/*
- * The following 4 variables need to be moved to the per-IOMMU state once
- * the IOTSBs are divorced.
- * LRU queue handling for lazy resource allocation.
- */
-static TAILQ_HEAD(iommu_maplruq_head, bus_dmamap) iommu_maplruq =
- TAILQ_HEAD_INITIALIZER(iommu_maplruq);
-
-/* DVMA space rman. */
-static struct rman iommu_dvma_rman;
-
-/* Virtual and physical address of the TSB. */
-static u_int64_t *iommu_tsb;
-static vm_offset_t iommu_ptsb;
-
-/* List of all IOMMUs. */
-static STAILQ_HEAD(, iommu_state) iommu_insts =
- STAILQ_HEAD_INITIALIZER(iommu_insts);
-
-/*
- * Helpers. Some of these take unused iommu states as parameters, to ease the
- * transition to divorced TSBs.
+ * Helpers
*/
#define IOMMU_READ8(is, reg, off) \
bus_space_read_8((is)->is_bustag, (is)->is_bushandle, \
@@ -219,9 +186,9 @@ static STAILQ_HEAD(, iommu_state) iommu_insts =
(round_io_page(sz) + IO_PAGE_SIZE)
#define IOMMU_SET_TTE(is, va, tte) \
- (iommu_tsb[IOTSBSLOT(va)] = (tte))
+ ((is)->is_tsb[IOTSBSLOT(va)] = (tte))
#define IOMMU_GET_TTE(is, va) \
- iommu_tsb[IOTSBSLOT(va)]
+ (is)->is_tsb[IOTSBSLOT(va)]
/* Resource helpers */
#define IOMMU_RES_START(res) \
@@ -236,24 +203,17 @@ static STAILQ_HEAD(, iommu_state) iommu_insts =
#define BDR_END(r) IOMMU_RES_END((r)->dr_res)
#define BDR_SIZE(r) IOMMU_RES_SIZE((r)->dr_res)
-/* Locking macros. */
-#define IS_LOCK(is) mtx_lock(&iommu_mtx)
-#define IS_LOCK_ASSERT(is) mtx_assert(&iommu_mtx, MA_OWNED)
-#define IS_UNLOCK(is) mtx_unlock(&iommu_mtx)
-
+/* Locking macros */
+#define IS_LOCK(is) mtx_lock(&is->is_mtx)
+#define IS_LOCK_ASSERT(is) mtx_assert(&is->is_mtx, MA_OWNED)
+#define IS_UNLOCK(is) mtx_unlock(&is->is_mtx)
/* Flush a page from the TLB. No locking required, since this is atomic. */
static __inline void
iommu_tlb_flush(struct iommu_state *is, bus_addr_t va)
{
- struct iommu_state *it;
- /*
- * Since the TSB is shared for now, the TLBs of all IOMMUs
- * need to be flushed.
- */
- STAILQ_FOREACH(it, &iommu_insts, is_link)
- IOMMU_WRITE8(it, is_iommu, IMR_FLUSH, va);
+ IOMMU_WRITE8(is, is_iommu, IMR_FLUSH, va);
}
/*
@@ -265,10 +225,9 @@ iommu_strbuf_flushpg(struct iommu_state *is, bus_addr_t va)
{
int i;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 2; i++)
if (is->is_sb[i] != 0)
IOMMU_WRITE8(is, is_sb[i], ISR_PGFLUSH, va);
- }
}
/*
@@ -279,29 +238,17 @@ iommu_strbuf_flushpg(struct iommu_state *is, bus_addr_t va)
static __inline void
iommu_strbuf_flush(struct iommu_state *is, bus_addr_t va)
{
- struct iommu_state *it;
- /*
- * Need to flush the streaming buffers of all IOMMUs, we cannot
- * determine which one was used for the transaction.
- */
- STAILQ_FOREACH(it, &iommu_insts, is_link)
- iommu_strbuf_flushpg(it, va);
+ iommu_strbuf_flushpg(is, va);
}
/* Synchronize all outstanding flush operations. */
static __inline void
iommu_strbuf_sync(struct iommu_state *is)
{
- struct iommu_state *it;
IS_LOCK_ASSERT(is);
- /*
- * Need to sync the streaming buffers of all IOMMUs, we cannot
- * determine which one was used for the transaction.
- */
- STAILQ_FOREACH(it, &iommu_insts, is_link)
- iommu_strbuf_flush_sync(it);
+ iommu_strbuf_flush_sync(is);
}
/* LRU queue handling for lazy resource allocation. */
@@ -312,8 +259,8 @@ iommu_map_insq(struct iommu_state *is, bus_dmamap_t map)
IS_LOCK_ASSERT(is);
if (!SLIST_EMPTY(&map->dm_reslist)) {
if (map->dm_onq)
- TAILQ_REMOVE(&iommu_maplruq, map, dm_maplruq);
- TAILQ_INSERT_TAIL(&iommu_maplruq, map, dm_maplruq);
+ TAILQ_REMOVE(&is->is_maplruq, map, dm_maplruq);
+ TAILQ_INSERT_TAIL(&is->is_maplruq, map, dm_maplruq);
map->dm_onq = 1;
}
}
@@ -324,7 +271,7 @@ iommu_map_remq(struct iommu_state *is, bus_dmamap_t map)
IS_LOCK_ASSERT(is);
if (map->dm_onq)
- TAILQ_REMOVE(&iommu_maplruq, map, dm_maplruq);
+ TAILQ_REMOVE(&is->is_maplruq, map, dm_maplruq);
map->dm_onq = 0;
}
@@ -339,7 +286,6 @@ void
iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase,
int resvpg)
{
- struct iommu_state *first;
vm_size_t size;
vm_offset_t offs;
u_int64_t end;
@@ -367,43 +313,30 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase,
is->is_dvmabase, is->is_dvmabase +
(size << (IO_PAGE_SHIFT - IOTTE_SHIFT)) - 1);
- if (STAILQ_EMPTY(&iommu_insts)) {
- /*
- * First IOMMU to be registered; set up resource mamangement
- * and allocate TSB memory.
- */
- mtx_init(&iommu_mtx, "iommu", NULL, MTX_DEF);
- end = is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT));
- iommu_dvma_rman.rm_type = RMAN_ARRAY;
- iommu_dvma_rman.rm_descr = "DVMA Memory";
- if (rman_init(&iommu_dvma_rman) != 0 ||
- rman_manage_region(&iommu_dvma_rman,
- (is->is_dvmabase >> IO_PAGE_SHIFT) + resvpg,
- (end >> IO_PAGE_SHIFT) - 1) != 0)
- panic("%s: could not initialize DVMA rman", __func__);
- /*
- * Allocate memory for I/O page tables. They need to be
- * physically contiguous.
- */
- iommu_tsb = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, ~0UL,
- PAGE_SIZE, 0);
- if (iommu_tsb == 0)
- panic("%s: contigmalloc failed", __func__);
- iommu_ptsb = pmap_kextract((vm_offset_t)iommu_tsb);
- bzero(iommu_tsb, size);
- } else {
- /*
- * Not the first IOMMU; just check that the parameters match
- * those of the first one.
- */
- first = STAILQ_FIRST(&iommu_insts);
- if (is->is_tsbsize != first->is_tsbsize ||
- is->is_dvmabase != first->is_dvmabase) {
- panic("%s: secondary IOMMU state does not "
- "match primary", __func__);
- }
- }
- STAILQ_INSERT_TAIL(&iommu_insts, is, is_link);
+ /*
+ * Set up resource mamangement.
+ */
+ mtx_init(&is->is_mtx, "iommu", NULL, MTX_DEF);
+ end = is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT));
+ is->is_dvma_rman.rm_type = RMAN_ARRAY;
+ is->is_dvma_rman.rm_descr = "DVMA Memory";
+ if (rman_init(&is->is_dvma_rman) != 0 ||
+ rman_manage_region(&is->is_dvma_rman,
+ (is->is_dvmabase >> IO_PAGE_SHIFT) + resvpg,
+ (end >> IO_PAGE_SHIFT) - 1) != 0)
+ panic("%s: could not initialize DVMA rman", __func__);
+ TAILQ_INIT(&is->is_maplruq);
+
+ /*
+ * Allocate memory for I/O page tables. They need to be
+ * physically contiguous.
+ */
+ is->is_tsb = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, ~0UL,
+ PAGE_SIZE, 0);
+ if (is->is_tsb == 0)
+ panic("%s: contigmalloc failed", __func__);
+ is->is_ptsb = pmap_kextract((vm_offset_t)is->is_tsb);
+ bzero(is->is_tsb, size);
/*
* Initialize streaming buffer, if it is there.
@@ -437,7 +370,7 @@ iommu_reset(struct iommu_state *is)
{
int i;
- IOMMU_WRITE8(is, is_iommu, IMR_TSB, iommu_ptsb);
+ IOMMU_WRITE8(is, is_iommu, IMR_TSB, is->is_ptsb);
/* Enable IOMMU in diagnostic mode */
IOMMU_WRITE8(is, is_iommu, IMR_CTL, is->is_cr | IOMMUCR_DE);
@@ -465,7 +398,7 @@ iommu_enter(struct iommu_state *is, vm_offset_t va, vm_paddr_t pa,
KASSERT(va >= is->is_dvmabase,
("iommu_enter: va %#lx not in DVMA space", va));
- KASSERT(pa <= IOMMU_MAXADDR,
+ KASSERT(pa <= is->is_pmaxaddr,
("iommu_enter: XXX: physical address too large (%#lx)", pa));
tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
@@ -519,8 +452,8 @@ iommu_decode_fault(struct iommu_state *is, vm_offset_t phys)
bus_addr_t va;
long idx;
- idx = phys - iommu_ptsb;
- if (phys < iommu_ptsb ||
+ idx = phys - is->is_ptsb;
+ if (phys < is->is_ptsb ||
idx > (PAGE_SIZE << is->is_tsbsize))
return;
va = is->is_dvmabase +
@@ -630,7 +563,7 @@ iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
sgsize = round_io_page(size) >> IO_PAGE_SHIFT;
if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE)
panic("%s: illegal boundary specified", __func__);
- res = rman_reserve_resource_bound(&iommu_dvma_rman, 0L,
+ res = rman_reserve_resource_bound(&is->is_dvma_rman, 0L,
t->dt_lowaddr >> IO_PAGE_SHIFT, sgsize,
t->dt_boundary >> IO_PAGE_SHIFT,
RF_ACTIVE | rman_make_alignment_flags(align), NULL);
@@ -759,9 +692,9 @@ iommu_dvma_vallocseg(bus_dma_tag_t dt, struct iommu_state *is, bus_dmamap_t map,
*/
IS_LOCK(is);
freed = 0;
- last = TAILQ_LAST(&iommu_maplruq, iommu_maplruq_head);
+ last = TAILQ_LAST(&is->is_maplruq, iommu_maplruq_head);
do {
- tm = TAILQ_FIRST(&iommu_maplruq);
+ tm = TAILQ_FIRST(&is->is_maplruq);
complete = tm == last;
if (tm == NULL)
break;
@@ -789,8 +722,8 @@ iommu_dvmamem_alloc(bus_dma_tag_t dt, void **vaddr, int flags,
int error, mflags;
/*
- * XXX: This will break for 32 bit transfers on machines with more than
- * 16G (1 << 34 bytes) of memory.
+ * XXX: This will break for 32 bit transfers on machines with more
+ * than is->is_pmaxaddr memory.
*/
if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
return (error);
OpenPOWER on IntegriCloud