diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2012-04-04 10:15:58 +1000 |
---|---|---|
committer | Gerd Hoffmann <kraxel@redhat.com> | 2012-04-26 12:21:16 +0200 |
commit | 59a70ccd3be29d2c5034cf7f22de0cf4e9156432 (patch) | |
tree | aaa475e0700b4be9cfcf3218e6eee3d42e0e4a95 | |
parent | 0b377169b18d702d980d526578d8515900ca6eb1 (diff) | |
download | hqemu-59a70ccd3be29d2c5034cf7f22de0cf4e9156432.zip hqemu-59a70ccd3be29d2c5034cf7f22de0cf4e9156432.tar.gz |
usb-xhci: Use PCI DMA helper functions
Shortly before 1.0, we added helper functions / wrappers for doing PCI DMA
from individual devices. This makes what's going on clearer and means that
when we add IOMMU support somewhere in the future, only the general PCI
code will have to change, not every device that uses PCI DMA.
However, usb-xhci is not using these wrappers, despite being a PCI only
device. This patch remedies the situation, using the pci dma functions
instead of direct calls to cpu_physical_memory_{read,write}(). Likewise
address parameters for DMA are changed to dma_addr_t instead of
target_phys_addr_t.
[ kraxel: removed #ifdefs ]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-rw-r--r-- | hw/usb/hcd-xhci.c | 190 |
1 files changed, 92 insertions, 98 deletions
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 73b0c7f..fd13bba 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -22,7 +22,6 @@ #include "qemu-timer.h" #include "hw/usb.h" #include "hw/pci.h" -#include "hw/qdev-addr.h" #include "hw/msi.h" //#define DEBUG_XHCI @@ -140,7 +139,7 @@ typedef struct XHCITRB { uint64_t parameter; uint32_t status; uint32_t control; - target_phys_addr_t addr; + dma_addr_t addr; bool ccs; } XHCITRB; @@ -291,8 +290,8 @@ typedef enum EPType { } EPType; typedef struct XHCIRing { - target_phys_addr_t base; - target_phys_addr_t dequeue; + dma_addr_t base; + dma_addr_t dequeue; bool ccs; } XHCIRing; @@ -345,7 +344,7 @@ typedef struct XHCIEPContext { unsigned int next_bg; XHCITransfer bg_transfers[BG_XFERS]; EPType type; - target_phys_addr_t pctx; + dma_addr_t pctx; unsigned int max_psize; bool has_bg; uint32_t state; @@ -353,7 +352,7 @@ typedef struct XHCIEPContext { typedef struct XHCISlot { bool enabled; - target_phys_addr_t ctx; + dma_addr_t ctx; unsigned int port; unsigned int devaddr; XHCIEPContext * eps[31]; @@ -402,7 +401,7 @@ struct XHCIState { uint32_t erdp_low; uint32_t erdp_high; - target_phys_addr_t er_start; + dma_addr_t er_start; uint32_t er_size; bool er_pcs; unsigned int er_ep_idx; @@ -479,22 +478,22 @@ static const char *trb_name(XHCITRB *trb) static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, unsigned int epid); -static inline target_phys_addr_t xhci_addr64(uint32_t low, uint32_t high) +static inline dma_addr_t xhci_addr64(uint32_t low, uint32_t high) { -#if TARGET_PHYS_ADDR_BITS > 32 - return low | ((target_phys_addr_t)high << 32); -#else - return low; -#endif + if (sizeof(dma_addr_t) == 4) { + return low; + } else { + return low | (((dma_addr_t)high << 16) << 16); + } } -static inline target_phys_addr_t xhci_mask64(uint64_t addr) +static inline dma_addr_t xhci_mask64(uint64_t addr) { -#if TARGET_PHYS_ADDR_BITS > 32 - return addr; -#else - return addr & 0xffffffff; -#endif + if (sizeof(dma_addr_t) == 4) { + return addr & 0xffffffff; + } else { + return addr; + } } static void xhci_irq_update(XHCIState *xhci) @@ -532,7 +531,7 @@ static void xhci_die(XHCIState *xhci) static void xhci_write_event(XHCIState *xhci, XHCIEvent *event) { XHCITRB ev_trb; - target_phys_addr_t addr; + dma_addr_t addr; ev_trb.parameter = cpu_to_le64(event->ptr); ev_trb.status = cpu_to_le32(event->length | (event->ccode << 24)); @@ -548,7 +547,7 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event) trb_name(&ev_trb)); addr = xhci->er_start + TRB_SIZE*xhci->er_ep_idx; - cpu_physical_memory_write(addr, (uint8_t *) &ev_trb, TRB_SIZE); + pci_dma_write(&xhci->pci_dev, addr, &ev_trb, TRB_SIZE); xhci->er_ep_idx++; if (xhci->er_ep_idx >= xhci->er_size) { @@ -559,7 +558,7 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event) static void xhci_events_update(XHCIState *xhci) { - target_phys_addr_t erdp; + dma_addr_t erdp; unsigned int dp_idx; bool do_irq = 0; @@ -570,8 +569,8 @@ static void xhci_events_update(XHCIState *xhci) erdp = xhci_addr64(xhci->erdp_low, xhci->erdp_high); if (erdp < xhci->er_start || erdp >= (xhci->er_start + TRB_SIZE*xhci->er_size)) { - fprintf(stderr, "xhci: ERDP out of bounds: "TARGET_FMT_plx"\n", erdp); - fprintf(stderr, "xhci: ER at "TARGET_FMT_plx" len %d\n", + fprintf(stderr, "xhci: ERDP out of bounds: "DMA_ADDR_FMT"\n", erdp); + fprintf(stderr, "xhci: ER at "DMA_ADDR_FMT" len %d\n", xhci->er_start, xhci->er_size); xhci_die(xhci); return; @@ -630,7 +629,7 @@ static void xhci_events_update(XHCIState *xhci) static void xhci_event(XHCIState *xhci, XHCIEvent *event) { - target_phys_addr_t erdp; + dma_addr_t erdp; unsigned int dp_idx; if (xhci->er_full) { @@ -649,8 +648,8 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event) erdp = xhci_addr64(xhci->erdp_low, xhci->erdp_high); if (erdp < xhci->er_start || erdp >= (xhci->er_start + TRB_SIZE*xhci->er_size)) { - fprintf(stderr, "xhci: ERDP out of bounds: "TARGET_FMT_plx"\n", erdp); - fprintf(stderr, "xhci: ER at "TARGET_FMT_plx" len %d\n", + fprintf(stderr, "xhci: ERDP out of bounds: "DMA_ADDR_FMT"\n", erdp); + fprintf(stderr, "xhci: ER at "DMA_ADDR_FMT" len %d\n", xhci->er_start, xhci->er_size); xhci_die(xhci); return; @@ -686,7 +685,7 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event) } static void xhci_ring_init(XHCIState *xhci, XHCIRing *ring, - target_phys_addr_t base) + dma_addr_t base) { ring->base = base; ring->dequeue = base; @@ -694,18 +693,18 @@ static void xhci_ring_init(XHCIState *xhci, XHCIRing *ring, } static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, - target_phys_addr_t *addr) + dma_addr_t *addr) { while (1) { TRBType type; - cpu_physical_memory_read(ring->dequeue, (uint8_t *) trb, TRB_SIZE); + pci_dma_read(&xhci->pci_dev, ring->dequeue, trb, TRB_SIZE); trb->addr = ring->dequeue; trb->ccs = ring->ccs; le64_to_cpus(&trb->parameter); le32_to_cpus(&trb->status); le32_to_cpus(&trb->control); - DPRINTF("xhci: TRB fetched [" TARGET_FMT_plx "]: " + DPRINTF("xhci: TRB fetched [" DMA_ADDR_FMT "]: " "%016" PRIx64 " %08x %08x %s\n", ring->dequeue, trb->parameter, trb->status, trb->control, trb_name(trb)); @@ -735,19 +734,19 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) { XHCITRB trb; int length = 0; - target_phys_addr_t dequeue = ring->dequeue; + dma_addr_t dequeue = ring->dequeue; bool ccs = ring->ccs; /* hack to bundle together the two/three TDs that make a setup transfer */ bool control_td_set = 0; while (1) { TRBType type; - cpu_physical_memory_read(dequeue, (uint8_t *) &trb, TRB_SIZE); + pci_dma_read(&xhci->pci_dev, dequeue, &trb, TRB_SIZE); le64_to_cpus(&trb.parameter); le32_to_cpus(&trb.status); le32_to_cpus(&trb.control); - DPRINTF("xhci: TRB peeked [" TARGET_FMT_plx "]: " + DPRINTF("xhci: TRB peeked [" DMA_ADDR_FMT "]: " "%016" PRIx64 " %08x %08x\n", dequeue, trb.parameter, trb.status, trb.control); @@ -790,8 +789,8 @@ static void xhci_er_reset(XHCIState *xhci) xhci_die(xhci); return; } - target_phys_addr_t erstba = xhci_addr64(xhci->erstba_low, xhci->erstba_high); - cpu_physical_memory_read(erstba, (uint8_t *) &seg, sizeof(seg)); + dma_addr_t erstba = xhci_addr64(xhci->erstba_low, xhci->erstba_high); + pci_dma_read(&xhci->pci_dev, erstba, &seg, sizeof(seg)); le32_to_cpus(&seg.addr_low); le32_to_cpus(&seg.addr_high); le32_to_cpus(&seg.size); @@ -807,7 +806,7 @@ static void xhci_er_reset(XHCIState *xhci) xhci->er_pcs = 1; xhci->er_full = 0; - DPRINTF("xhci: event ring:" TARGET_FMT_plx " [%d]\n", + DPRINTF("xhci: event ring:" DMA_ADDR_FMT " [%d]\n", xhci->er_start, xhci->er_size); } @@ -833,24 +832,24 @@ static void xhci_set_ep_state(XHCIState *xhci, XHCIEPContext *epctx, return; } - cpu_physical_memory_read(epctx->pctx, (uint8_t *) ctx, sizeof(ctx)); + pci_dma_read(&xhci->pci_dev, epctx->pctx, ctx, sizeof(ctx)); ctx[0] &= ~EP_STATE_MASK; ctx[0] |= state; ctx[2] = epctx->ring.dequeue | epctx->ring.ccs; ctx[3] = (epctx->ring.dequeue >> 16) >> 16; - DPRINTF("xhci: set epctx: " TARGET_FMT_plx " state=%d dequeue=%08x%08x\n", + DPRINTF("xhci: set epctx: " DMA_ADDR_FMT " state=%d dequeue=%08x%08x\n", epctx->pctx, state, ctx[3], ctx[2]); - cpu_physical_memory_write(epctx->pctx, (uint8_t *) ctx, sizeof(ctx)); + pci_dma_write(&xhci->pci_dev, epctx->pctx, ctx, sizeof(ctx)); epctx->state = state; } static TRBCCode xhci_enable_ep(XHCIState *xhci, unsigned int slotid, - unsigned int epid, target_phys_addr_t pctx, + unsigned int epid, dma_addr_t pctx, uint32_t *ctx) { XHCISlot *slot; XHCIEPContext *epctx; - target_phys_addr_t dequeue; + dma_addr_t dequeue; int i; assert(slotid >= 1 && slotid <= MAXSLOTS); @@ -1087,7 +1086,7 @@ static TRBCCode xhci_set_ep_dequeue(XHCIState *xhci, unsigned int slotid, { XHCISlot *slot; XHCIEPContext *epctx; - target_phys_addr_t dequeue; + dma_addr_t dequeue; assert(slotid >= 1 && slotid <= MAXSLOTS); @@ -1142,7 +1141,7 @@ static int xhci_xfer_data(XHCITransfer *xfer, uint8_t *data, for (i = 0; i < xfer->trb_count; i++) { XHCITRB *trb = &xfer->trbs[i]; - target_phys_addr_t addr; + dma_addr_t addr; unsigned int chunk = 0; switch (TRB_TYPE(*trb)) { @@ -1173,11 +1172,11 @@ static int xhci_xfer_data(XHCITransfer *xfer, uint8_t *data, memcpy(data, &idata, chunk); } else { DPRINTF("xhci_xfer_data: r/w(%d) %d bytes at " - TARGET_FMT_plx "\n", in_xfer, chunk, addr); + DMA_ADDR_FMT "\n", in_xfer, chunk, addr); if (in_xfer) { - cpu_physical_memory_write(addr, data, chunk); + pci_dma_write(&xhci->pci_dev, addr, data, chunk); } else { - cpu_physical_memory_read(addr, data, chunk); + pci_dma_read(&xhci->pci_dev, addr, data, chunk); } #ifdef DEBUG_DATA unsigned int count = chunk; @@ -1240,7 +1239,7 @@ static void xhci_stall_ep(XHCITransfer *xfer) epctx->ring.ccs = xfer->trbs[0].ccs; xhci_set_ep_state(xhci, epctx, EP_HALTED); DPRINTF("xhci: stalled slot %d ep %d\n", xfer->slotid, xfer->epid); - DPRINTF("xhci: will continue at "TARGET_FMT_plx"\n", epctx->ring.dequeue); + DPRINTF("xhci: will continue at "DMA_ADDR_FMT"\n", epctx->ring.dequeue); } static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer, @@ -1802,7 +1801,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, { XHCISlot *slot; USBDevice *dev; - target_phys_addr_t ictx, octx, dcbaap; + dma_addr_t ictx, octx, dcbaap; uint64_t poctx; uint32_t ictl_ctx[2]; uint32_t slot_ctx[4]; @@ -1815,15 +1814,14 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, DPRINTF("xhci_address_slot(%d)\n", slotid); dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); - cpu_physical_memory_read(dcbaap + 8*slotid, - (uint8_t *) &poctx, sizeof(poctx)); + pci_dma_read(&xhci->pci_dev, dcbaap + 8*slotid, &poctx, sizeof(poctx)); ictx = xhci_mask64(pictx); octx = xhci_mask64(le64_to_cpu(poctx)); - DPRINTF("xhci: input context at "TARGET_FMT_plx"\n", ictx); - DPRINTF("xhci: output context at "TARGET_FMT_plx"\n", octx); + DPRINTF("xhci: input context at "DMA_ADDR_FMT"\n", ictx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); - cpu_physical_memory_read(ictx, (uint8_t *) ictl_ctx, sizeof(ictl_ctx)); + pci_dma_read(&xhci->pci_dev, ictx, ictl_ctx, sizeof(ictl_ctx)); if (ictl_ctx[0] != 0x0 || ictl_ctx[1] != 0x3) { fprintf(stderr, "xhci: invalid input context control %08x %08x\n", @@ -1831,8 +1829,8 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, return CC_TRB_ERROR; } - cpu_physical_memory_read(ictx+32, (uint8_t *) slot_ctx, sizeof(slot_ctx)); - cpu_physical_memory_read(ictx+64, (uint8_t *) ep0_ctx, sizeof(ep0_ctx)); + pci_dma_read(&xhci->pci_dev, ictx+32, slot_ctx, sizeof(slot_ctx)); + pci_dma_read(&xhci->pci_dev, ictx+64, ep0_ctx, sizeof(ep0_ctx)); DPRINTF("xhci: input slot context: %08x %08x %08x %08x\n", slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); @@ -1881,8 +1879,8 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, DPRINTF("xhci: output ep0 context: %08x %08x %08x %08x %08x\n", ep0_ctx[0], ep0_ctx[1], ep0_ctx[2], ep0_ctx[3], ep0_ctx[4]); - cpu_physical_memory_write(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); - cpu_physical_memory_write(octx+32, (uint8_t *) ep0_ctx, sizeof(ep0_ctx)); + pci_dma_write(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); + pci_dma_write(&xhci->pci_dev, octx+32, ep0_ctx, sizeof(ep0_ctx)); return res; } @@ -1891,7 +1889,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, uint64_t pictx, bool dc) { - target_phys_addr_t ictx, octx; + dma_addr_t ictx, octx; uint32_t ictl_ctx[2]; uint32_t slot_ctx[4]; uint32_t islot_ctx[4]; @@ -1905,8 +1903,8 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, ictx = xhci_mask64(pictx); octx = xhci->slots[slotid-1].ctx; - DPRINTF("xhci: input context at "TARGET_FMT_plx"\n", ictx); - DPRINTF("xhci: output context at "TARGET_FMT_plx"\n", octx); + DPRINTF("xhci: input context at "DMA_ADDR_FMT"\n", ictx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); if (dc) { for (i = 2; i <= 31; i++) { @@ -1915,17 +1913,17 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, } } - cpu_physical_memory_read(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_read(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); slot_ctx[3] &= ~(SLOT_STATE_MASK << SLOT_STATE_SHIFT); slot_ctx[3] |= SLOT_ADDRESSED << SLOT_STATE_SHIFT; DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); - cpu_physical_memory_write(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_write(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); return CC_SUCCESS; } - cpu_physical_memory_read(ictx, (uint8_t *) ictl_ctx, sizeof(ictl_ctx)); + pci_dma_read(&xhci->pci_dev, ictx, ictl_ctx, sizeof(ictl_ctx)); if ((ictl_ctx[0] & 0x3) != 0x0 || (ictl_ctx[1] & 0x3) != 0x1) { fprintf(stderr, "xhci: invalid input context control %08x %08x\n", @@ -1933,8 +1931,8 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, return CC_TRB_ERROR; } - cpu_physical_memory_read(ictx+32, (uint8_t *) islot_ctx, sizeof(islot_ctx)); - cpu_physical_memory_read(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_read(&xhci->pci_dev, ictx+32, islot_ctx, sizeof(islot_ctx)); + pci_dma_read(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); if (SLOT_STATE(slot_ctx[3]) < SLOT_ADDRESSED) { fprintf(stderr, "xhci: invalid slot state %08x\n", slot_ctx[3]); @@ -1946,8 +1944,8 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, xhci_disable_ep(xhci, slotid, i); } if (ictl_ctx[1] & (1<<i)) { - cpu_physical_memory_read(ictx+32+(32*i), - (uint8_t *) ep_ctx, sizeof(ep_ctx)); + pci_dma_read(&xhci->pci_dev, ictx+32+(32*i), ep_ctx, + sizeof(ep_ctx)); DPRINTF("xhci: input ep%d.%d context: %08x %08x %08x %08x %08x\n", i/2, i%2, ep_ctx[0], ep_ctx[1], ep_ctx[2], ep_ctx[3], ep_ctx[4]); @@ -1959,8 +1957,7 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, DPRINTF("xhci: output ep%d.%d context: %08x %08x %08x %08x %08x\n", i/2, i%2, ep_ctx[0], ep_ctx[1], ep_ctx[2], ep_ctx[3], ep_ctx[4]); - cpu_physical_memory_write(octx+(32*i), - (uint8_t *) ep_ctx, sizeof(ep_ctx)); + pci_dma_write(&xhci->pci_dev, octx+(32*i), ep_ctx, sizeof(ep_ctx)); } } @@ -1972,7 +1969,7 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); - cpu_physical_memory_write(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_write(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); return CC_SUCCESS; } @@ -1981,7 +1978,7 @@ static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, uint64_t pictx) { - target_phys_addr_t ictx, octx; + dma_addr_t ictx, octx; uint32_t ictl_ctx[2]; uint32_t iep0_ctx[5]; uint32_t ep0_ctx[5]; @@ -1994,10 +1991,10 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, ictx = xhci_mask64(pictx); octx = xhci->slots[slotid-1].ctx; - DPRINTF("xhci: input context at "TARGET_FMT_plx"\n", ictx); - DPRINTF("xhci: output context at "TARGET_FMT_plx"\n", octx); + DPRINTF("xhci: input context at "DMA_ADDR_FMT"\n", ictx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); - cpu_physical_memory_read(ictx, (uint8_t *) ictl_ctx, sizeof(ictl_ctx)); + pci_dma_read(&xhci->pci_dev, ictx, ictl_ctx, sizeof(ictl_ctx)); if (ictl_ctx[0] != 0x0 || ictl_ctx[1] & ~0x3) { fprintf(stderr, "xhci: invalid input context control %08x %08x\n", @@ -2006,13 +2003,12 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, } if (ictl_ctx[1] & 0x1) { - cpu_physical_memory_read(ictx+32, - (uint8_t *) islot_ctx, sizeof(islot_ctx)); + pci_dma_read(&xhci->pci_dev, ictx+32, islot_ctx, sizeof(islot_ctx)); DPRINTF("xhci: input slot context: %08x %08x %08x %08x\n", islot_ctx[0], islot_ctx[1], islot_ctx[2], islot_ctx[3]); - cpu_physical_memory_read(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_read(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); slot_ctx[1] &= ~0xFFFF; /* max exit latency */ slot_ctx[1] |= islot_ctx[1] & 0xFFFF; @@ -2022,18 +2018,17 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); - cpu_physical_memory_write(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_write(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); } if (ictl_ctx[1] & 0x2) { - cpu_physical_memory_read(ictx+64, - (uint8_t *) iep0_ctx, sizeof(iep0_ctx)); + pci_dma_read(&xhci->pci_dev, ictx+64, iep0_ctx, sizeof(iep0_ctx)); DPRINTF("xhci: input ep0 context: %08x %08x %08x %08x %08x\n", iep0_ctx[0], iep0_ctx[1], iep0_ctx[2], iep0_ctx[3], iep0_ctx[4]); - cpu_physical_memory_read(octx+32, (uint8_t *) ep0_ctx, sizeof(ep0_ctx)); + pci_dma_read(&xhci->pci_dev, octx+32, ep0_ctx, sizeof(ep0_ctx)); ep0_ctx[1] &= ~0xFFFF0000; /* max packet size*/ ep0_ctx[1] |= iep0_ctx[1] & 0xFFFF0000; @@ -2041,8 +2036,7 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, DPRINTF("xhci: output ep0 context: %08x %08x %08x %08x %08x\n", ep0_ctx[0], ep0_ctx[1], ep0_ctx[2], ep0_ctx[3], ep0_ctx[4]); - cpu_physical_memory_write(octx+32, - (uint8_t *) ep0_ctx, sizeof(ep0_ctx)); + pci_dma_write(&xhci->pci_dev, octx+32, ep0_ctx, sizeof(ep0_ctx)); } return CC_SUCCESS; @@ -2051,7 +2045,7 @@ static TRBCCode xhci_evaluate_slot(XHCIState *xhci, unsigned int slotid, static TRBCCode xhci_reset_slot(XHCIState *xhci, unsigned int slotid) { uint32_t slot_ctx[4]; - target_phys_addr_t octx; + dma_addr_t octx; int i; assert(slotid >= 1 && slotid <= MAXSLOTS); @@ -2059,7 +2053,7 @@ static TRBCCode xhci_reset_slot(XHCIState *xhci, unsigned int slotid) octx = xhci->slots[slotid-1].ctx; - DPRINTF("xhci: output context at "TARGET_FMT_plx"\n", octx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); for (i = 2; i <= 31; i++) { if (xhci->slots[slotid-1].eps[i-1]) { @@ -2067,12 +2061,12 @@ static TRBCCode xhci_reset_slot(XHCIState *xhci, unsigned int slotid) } } - cpu_physical_memory_read(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_read(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); slot_ctx[3] &= ~(SLOT_STATE_MASK << SLOT_STATE_SHIFT); slot_ctx[3] |= SLOT_DEFAULT << SLOT_STATE_SHIFT; DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); - cpu_physical_memory_write(octx, (uint8_t *) slot_ctx, sizeof(slot_ctx)); + pci_dma_write(&xhci->pci_dev, octx, slot_ctx, sizeof(slot_ctx)); return CC_SUCCESS; } @@ -2095,19 +2089,19 @@ static unsigned int xhci_get_slot(XHCIState *xhci, XHCIEvent *event, XHCITRB *tr static TRBCCode xhci_get_port_bandwidth(XHCIState *xhci, uint64_t pctx) { - target_phys_addr_t ctx; + dma_addr_t ctx; uint8_t bw_ctx[MAXPORTS+1]; DPRINTF("xhci_get_port_bandwidth()\n"); ctx = xhci_mask64(pctx); - DPRINTF("xhci: bandwidth context at "TARGET_FMT_plx"\n", ctx); + DPRINTF("xhci: bandwidth context at "DMA_ADDR_FMT"\n", ctx); /* TODO: actually implement real values here */ bw_ctx[0] = 0; memset(&bw_ctx[1], 80, MAXPORTS); /* 80% */ - cpu_physical_memory_write(ctx, bw_ctx, sizeof(bw_ctx)); + pci_dma_write(&xhci->pci_dev, ctx, bw_ctx, sizeof(bw_ctx)); return CC_SUCCESS; } @@ -2128,13 +2122,13 @@ static uint32_t xhci_nec_challenge(uint32_t hi, uint32_t lo) return ~val; } -static void xhci_via_challenge(uint64_t addr) +static void xhci_via_challenge(XHCIState *xhci, uint64_t addr) { uint32_t buf[8]; uint32_t obuf[8]; - target_phys_addr_t paddr = xhci_mask64(addr); + dma_addr_t paddr = xhci_mask64(addr); - cpu_physical_memory_read(paddr, (uint8_t *) &buf, 32); + pci_dma_read(&xhci->pci_dev, paddr, &buf, 32); memcpy(obuf, buf, sizeof(obuf)); @@ -2150,7 +2144,7 @@ static void xhci_via_challenge(uint64_t addr) obuf[7] = obuf[2] ^ obuf[3] ^ 0x65866593; } - cpu_physical_memory_write(paddr, (uint8_t *) &obuf, 32); + pci_dma_write(&xhci->pci_dev, paddr, &obuf, 32); } static void xhci_process_commands(XHCIState *xhci) @@ -2158,7 +2152,7 @@ static void xhci_process_commands(XHCIState *xhci) XHCITRB trb; TRBType type; XHCIEvent event = {ER_COMMAND_COMPLETE, CC_SUCCESS}; - target_phys_addr_t addr; + dma_addr_t addr; unsigned int i, slotid = 0; DPRINTF("xhci_process_commands()\n"); @@ -2247,7 +2241,7 @@ static void xhci_process_commands(XHCIState *xhci) event.ccode = xhci_get_port_bandwidth(xhci, trb.parameter); break; case CR_VENDOR_VIA_CHALLENGE_RESPONSE: - xhci_via_challenge(trb.parameter); + xhci_via_challenge(xhci, trb.parameter); break; case CR_VENDOR_NEC_FIRMWARE_REVISION: event.type = 48; /* NEC reply */ @@ -2537,7 +2531,7 @@ static void xhci_oper_write(XHCIState *xhci, uint32_t reg, uint32_t val) xhci_event(xhci, &event); DPRINTF("xhci: command ring stopped (CRCR=%08x)\n", xhci->crcr_low); } else { - target_phys_addr_t base = xhci_addr64(xhci->crcr_low & ~0x3f, val); + dma_addr_t base = xhci_addr64(xhci->crcr_low & ~0x3f, val); xhci_ring_init(xhci, &xhci->cmd_ring, base); } xhci->crcr_low &= ~(CRCR_CA | CRCR_CS); |