diff options
-rw-r--r-- | sys/arm/ti/cpsw/if_cpsw.c | 2324 | ||||
-rw-r--r-- | sys/arm/ti/cpsw/if_cpswreg.h | 24 | ||||
-rw-r--r-- | sys/arm/ti/cpsw/if_cpswvar.h | 139 |
3 files changed, 1586 insertions, 901 deletions
diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c index a47c131..93702df 100644 --- a/sys/arm/ti/cpsw/if_cpsw.c +++ b/sys/arm/ti/cpsw/if_cpsw.c @@ -25,8 +25,22 @@ */ /* - * TI 3 Port Switch Ethernet (CPSW) Driver - * Found in TI8148, AM335x SoCs + * TI Common Platform Ethernet Switch (CPSW) Driver + * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. + * + * This controller is documented in the AM335x Technical Reference + * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM + * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. + * + * It is basically a single Ethernet port (port 0) wired internally to + * a 3-port store-and-forward switch connected to two independent + * "sliver" controllers (port 1 and port 2). You can operate the + * controller in a variety of different ways by suitably configuring + * the slivers and the Address Lookup Engine (ALE) that routes packets + * between the ports. + * + * This code was developed and tested on a BeagleBone with + * an AM335x SoC. */ #include <sys/cdefs.h> @@ -76,44 +90,82 @@ __FBSDID("$FreeBSD$"); #include "miibus_if.h" -static int cpsw_probe(device_t dev); -static int cpsw_attach(device_t dev); -static int cpsw_detach(device_t dev); -static int cpsw_shutdown(device_t dev); -static int cpsw_suspend(device_t dev); -static int cpsw_resume(device_t dev); - -static int cpsw_miibus_readreg(device_t dev, int phy, int reg); -static int cpsw_miibus_writereg(device_t dev, int phy, int reg, int value); - -static int cpsw_ifmedia_upd(struct ifnet *ifp); -static void cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); - -static void cpsw_init(void *arg); -static void cpsw_init_locked(void *arg); -static void cpsw_start(struct ifnet *ifp); -static void cpsw_start_locked(struct ifnet *ifp); -static void cpsw_stop_locked(struct cpsw_softc *sc); -static int cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data); -static int cpsw_init_slot_lists(struct cpsw_softc *sc); -static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot); -static void cpsw_fill_rx_queue_locked(struct cpsw_softc *sc); -static void cpsw_tx_watchdog(struct cpsw_softc *sc); - -static void cpsw_intr_rx_thresh(void *arg); +/* Device probe/attach/detach. */ +static int cpsw_probe(device_t); +static void cpsw_init_slots(struct cpsw_softc *); +static int cpsw_attach(device_t); +static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *); +static int cpsw_detach(device_t); + +/* Device Init/shutdown. */ +static void cpsw_init(void *); +static void cpsw_init_locked(void *); +static int cpsw_shutdown(device_t); +static void cpsw_shutdown_locked(struct cpsw_softc *); + +/* Device Suspend/Resume. */ +static int cpsw_suspend(device_t); +static int cpsw_resume(device_t); + +/* Ioctl. */ +static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data); + +static int cpsw_miibus_readreg(device_t, int phy, int reg); +static int cpsw_miibus_writereg(device_t, int phy, int reg, int value); + +/* Send/Receive packets. */ static void cpsw_intr_rx(void *arg); -static void cpsw_intr_rx_locked(void *arg); -static void cpsw_intr_tx(void *arg); -static void cpsw_intr_tx_locked(void *arg); -static void cpsw_intr_misc(void *arg); - -static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry); -static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry); -static int cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac); -static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac); -#ifdef CPSW_DEBUG -static void cpsw_ale_dump_table(struct cpsw_softc *sc); -#endif +static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); +static void cpsw_rx_enqueue(struct cpsw_softc *); +static void cpsw_start(struct ifnet *); +static void cpsw_tx_enqueue(struct cpsw_softc *); +static int cpsw_tx_dequeue(struct cpsw_softc *); + +/* Misc interrupts and watchdog. */ +static void cpsw_intr_rx_thresh(void *); +static void cpsw_intr_misc(void *); +static void cpsw_tick(void *); +static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *); +static int cpsw_ifmedia_upd(struct ifnet *); +static void cpsw_tx_watchdog(struct cpsw_softc *); + +/* ALE support */ +static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry); +static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry); +static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac); +static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge); +static void cpsw_ale_dump_table(struct cpsw_softc *); + +/* Statistics and sysctls. */ +static void cpsw_add_sysctls(struct cpsw_softc *); +static void cpsw_stats_collect(struct cpsw_softc *); +static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); + +/* + * Arbitrary limit on number of segments in an mbuf to be transmitted. + * Packets with more segments than this will be defragmented before + * they are queued. + */ +#define CPSW_TXFRAGS 8 + + +/* + * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs + * as separate Ethernet ports. To properly support this, we should + * break this into two separate devices: a CPSW_SS device that owns + * the interrupts and actually talks to the CPSW hardware, and a + * separate CPSW Ethernet child device for each Ethernet port. The RX + * interrupt, for example, would be part of CPSW_SS; it would receive + * a packet, note the input port, and then dispatch it to the child + * device's interface queue. Similarly for transmit. + * + * It's not clear to me whether the device tree should be restructured + * with a cpsw_ss node and two child nodes. That would allow specifying + * MAC addresses for each port, for example, but might be overkill. + * + * Unfortunately, I don't have hardware right now that supports two + * Ethernet ports via CPSW. + */ static device_method_t cpsw_methods[] = { /* Device interface */ @@ -137,7 +189,6 @@ static driver_t cpsw_driver = { static devclass_t cpsw_devclass; - DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0); DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(cpsw, ether, 1, 1, 1); @@ -152,40 +203,109 @@ static struct resource_spec res_spec[] = { { -1, 0 } }; -static struct { - driver_intr_t *handler; - char * description; -} cpsw_intrs[CPSW_INTR_COUNT + 1] = { - { cpsw_intr_rx_thresh, "CPSW RX threshold interrupt" }, - { cpsw_intr_rx, "CPSW RX interrupt" }, - { cpsw_intr_tx, "CPSW TX interrupt" }, - { cpsw_intr_misc, "CPSW misc interrupt" }, +/* Number of entries here must match size of stats + * array in struct cpsw_softc. */ +static struct cpsw_stat { + int reg; + char *oid; +} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { + {0x00, "GoodRxFrames"}, + {0x04, "BroadcastRxFrames"}, + {0x08, "MulticastRxFrames"}, + {0x0C, "PauseRxFrames"}, + {0x10, "RxCrcErrors"}, + {0x14, "RxAlignErrors"}, + {0x18, "OversizeRxFrames"}, + {0x1c, "RxJabbers"}, + {0x20, "ShortRxFrames"}, + {0x24, "RxFragments"}, + {0x30, "RxOctets"}, + {0x34, "GoodTxFrames"}, + {0x38, "BroadcastTxFrames"}, + {0x3c, "MulticastTxFrames"}, + {0x40, "PauseTxFrames"}, + {0x44, "DeferredTxFrames"}, + {0x48, "CollisionsTxFrames"}, + {0x4c, "SingleCollisionTxFrames"}, + {0x50, "MultipleCollisionTxFrames"}, + {0x54, "ExcessiveCollisions"}, + {0x58, "LateCollisions"}, + {0x5c, "TxUnderrun"}, + {0x60, "CarrierSenseErrors"}, + {0x64, "TxOctets"}, + {0x68, "RxTx64OctetFrames"}, + {0x6c, "RxTx65to127OctetFrames"}, + {0x70, "RxTx128to255OctetFrames"}, + {0x74, "RxTx256to511OctetFrames"}, + {0x78, "RxTx512to1024OctetFrames"}, + {0x7c, "RxTx1024upOctetFrames"}, + {0x80, "NetOctets"}, + {0x84, "RxStartOfFrameOverruns"}, + {0x88, "RxMiddleOfFrameOverruns"}, + {0x8c, "RxDmaOverruns"} }; -/* Locking macros */ +/* + * Basic debug support. + */ + +#define IF_DEBUG(sc) if (sc->cpsw_if_flags & IFF_DEBUG) + +static void +cpsw_debugf_head(const char *funcname) +{ + int t = (int)(time_second % (24 * 60 * 60)); + + printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); +} + +#include <machine/stdarg.h> +static void +cpsw_debugf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); + printf("\n"); + +} + +#define CPSW_DEBUGF(a) do { \ + IF_DEBUG(sc) { \ + cpsw_debugf_head(__func__); \ + cpsw_debugf a; \ + } \ +} while (0) + + +/* + * Locking macros + */ #define CPSW_TX_LOCK(sc) do { \ - mtx_assert(&(sc)->rx_lock, MA_NOTOWNED); \ - mtx_lock(&(sc)->tx_lock); \ + mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ + mtx_lock(&(sc)->tx.lock); \ } while (0) -#define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx_lock) -#define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx_lock, MA_OWNED) +#define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) +#define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) #define CPSW_RX_LOCK(sc) do { \ - mtx_assert(&(sc)->tx_lock, MA_NOTOWNED); \ - mtx_lock(&(sc)->rx_lock); \ + mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ + mtx_lock(&(sc)->rx.lock); \ } while (0) -#define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx_lock) -#define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx_lock, MA_OWNED) +#define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) +#define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) #define CPSW_GLOBAL_LOCK(sc) do { \ - if ((mtx_owned(&(sc)->tx_lock) ? 1 : 0) != \ - (mtx_owned(&(sc)->rx_lock) ? 1 : 0)) { \ + if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \ + (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \ panic("cpsw deadlock possibility detection!"); \ } \ - mtx_lock(&(sc)->tx_lock); \ - mtx_lock(&(sc)->rx_lock); \ + mtx_lock(&(sc)->tx.lock); \ + mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_GLOBAL_UNLOCK(sc) do { \ @@ -198,35 +318,128 @@ static struct { CPSW_RX_LOCK_ASSERT(sc); \ } while (0) - -#include <machine/stdarg.h> +/* + * Read/Write macros + */ +#define cpsw_read_4(sc, reg) bus_read_4(sc->res[0], reg) +#define cpsw_write_4(sc, reg, val) bus_write_4(sc->res[0], reg, val) + +#define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) + +#define cpsw_cpdma_bd_paddr(sc, slot) \ + (slot->bd_offset + vtophys(rman_get_start(sc->res[0]))) +#define cpsw_cpdma_read_bd(sc, slot, val) \ + bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4) +#define cpsw_cpdma_write_bd(sc, slot, val) \ + bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4) +#define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ + cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) +#define cpsw_cpdma_read_bd_flags(sc, slot) \ + bus_read_2(sc->res[0], slot->bd_offset + 14) +#define cpsw_write_hdp_slot(sc, queue, slot) \ + cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) +#define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) +#define cpsw_read_cp(sc, queue) \ + cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) +#define cpsw_write_cp(sc, queue, val) \ + cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) +#define cpsw_write_cp_slot(sc, queue, slot) \ + cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) + +#if 0 +/* XXX temporary function versions for debugging. */ static void -cpsw_debugf_head(const char *funcname) +cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { - int t = (int)(time_second % (24 * 60 * 60)); + uint32_t reg = queue->hdp_offset; + uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); + CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); + cpsw_write_4(sc, reg, v); +} - printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); +static void +cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) +{ + uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); + CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); + cpsw_write_cp(sc, queue, v); } +#endif +/* + * Expanded dump routines for verbose debugging. + */ static void -cpsw_debugf(const char *fmt, ...) +cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { - va_list ap; + static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", + "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", + "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", + "Port0"}; + struct cpsw_cpdma_bd bd; + const char *sep; + int i; - va_start(ap, fmt); - vprintf(fmt, ap); - va_end(ap); + cpsw_cpdma_read_bd(sc, slot, &bd); + printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); + printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); + printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); + printf(" Flags: "); + sep = ""; + for (i = 0; i < 16; ++i) { + if (bd.flags & (1 << (15 - i))) { + printf("%s%s", sep, flags[i]); + sep = ","; + } + } printf("\n"); + if (slot->mbuf) { + printf(" Ether: %14D\n", + (char *)(slot->mbuf->m_hdr.mh_data), " "); + printf(" Packet: %16D\n", + (char *)(slot->mbuf->m_hdr.mh_data) + 14, " "); + } +} +#define CPSW_DUMP_SLOT(cs, slot) do { \ + IF_DEBUG(sc) { \ + cpsw_dump_slot(sc, slot); \ + } \ +} while (0) + + +static void +cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) +{ + struct cpsw_slot *slot; + int i = 0; + int others = 0; + + STAILQ_FOREACH(slot, q, next) { + if (i > 4) + ++others; + else + cpsw_dump_slot(sc, slot); + ++i; + } + if (others) + printf(" ... and %d more.\n", others); + printf("\n"); } -#define CPSW_DEBUGF(a) do { \ - if (sc->cpsw_if_flags & IFF_DEBUG) { \ - cpsw_debugf_head(__func__); \ - cpsw_debugf a; \ - } \ +#define CPSW_DUMP_QUEUE(sc, q) do { \ + IF_DEBUG(sc) { \ + cpsw_dump_queue(sc, q); \ + } \ } while (0) + +/* + * + * Device Probe, Attach, Detach. + * + */ + static int cpsw_probe(device_t dev) { @@ -238,18 +451,107 @@ cpsw_probe(device_t dev) return (BUS_PROBE_DEFAULT); } + +static void +cpsw_init_slots(struct cpsw_softc *sc) +{ + struct cpsw_slot *slot; + int i; + + STAILQ_INIT(&sc->avail); + + /* Put the slot descriptors onto the global avail list. */ + for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) { + slot = &sc->_slots[i]; + slot->bd_offset = cpsw_cpdma_bd_offset(i); + STAILQ_INSERT_TAIL(&sc->avail, slot, next); + } +} + +/* + * bind an interrupt, add the relevant info to sc->interrupts + */ +static int +cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description) +{ + void **pcookie; + int error; + + sc->interrupts[sc->interrupt_count].res = res; + sc->interrupts[sc->interrupt_count].description = description; + pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie; + + error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE, + NULL, *handler, sc, pcookie); + if (error) + device_printf(sc->dev, + "could not setup %s\n", description); + else + ++sc->interrupt_count; + return (error); +} + +/* + * teardown everything in sc->interrupts. + */ +static void +cpsw_detach_interrupts(struct cpsw_softc *sc) +{ + int error; + int i; + + for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) { + if (!sc->interrupts[i].ih_cookie) + continue; + error = bus_teardown_intr(sc->dev, + sc->interrupts[i].res, sc->interrupts[i].ih_cookie); + if (error) + device_printf(sc->dev, "could not release %s\n", + sc->interrupts[i].description); + sc->interrupts[i].ih_cookie = NULL; + } +} + +static int +cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) +{ + const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]); + struct cpsw_slot *slot; + int i; + + if (requested < 0) + requested = max_slots; + + for (i = 0; i < requested; ++i) { + slot = STAILQ_FIRST(&sc->avail); + if (slot == NULL) + return (0); + if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { + if_printf(sc->ifp, "failed to create dmamap\n"); + return (ENOMEM); + } + STAILQ_REMOVE_HEAD(&sc->avail, next); + STAILQ_INSERT_TAIL(&queue->avail, slot, next); + ++queue->avail_queue_len; + ++queue->queue_slots; + } + return (0); +} + static int cpsw_attach(device_t dev) { + bus_dma_segment_t segs[1]; struct cpsw_softc *sc = device_get_softc(dev); struct mii_softc *miisc; struct ifnet *ifp; void *phy_sc; - int i, error, phy; + int error, phy, nsegs; uint32_t reg; CPSW_DEBUGF(("")); + getbinuptime(&sc->attach_uptime); sc->dev = dev; sc->node = ofw_bus_get_node(dev); @@ -259,10 +561,10 @@ cpsw_attach(device_t dev) return (ENXIO); } /* Initialize mutexes */ - mtx_init(&sc->tx_lock, device_get_nameunit(dev), - "cpsw TX lock", MTX_DEF); - mtx_init(&sc->rx_lock, device_get_nameunit(dev), - "cpsw RX lock", MTX_DEF); + mtx_init(&sc->tx.lock, device_get_nameunit(dev), + "cpsw TX lock", MTX_DEF); + mtx_init(&sc->rx.lock, device_get_nameunit(dev), + "cpsw RX lock", MTX_DEF); /* Allocate IO and IRQ resources */ error = bus_alloc_resources(dev, res_spec, sc->res); @@ -272,11 +574,11 @@ cpsw_attach(device_t dev) return (ENXIO); } - reg = cpsw_read_4(CPSW_SS_IDVER); - device_printf(dev, "Version %d.%d (%d)\n", (reg >> 8 & 0x7), + reg = cpsw_read_4(sc, CPSW_SS_IDVER); + device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); - //cpsw_add_sysctls(sc); TODO + cpsw_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for mbufs. */ error = bus_dma_tag_create( @@ -285,22 +587,14 @@ cpsw_attach(device_t dev) BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ - MCLBYTES, 1, /* maxsize, nsegments */ + MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mbuf_dtag); /* dmatag */ if (error) { device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); - return (ENOMEM); - } - - /* Initialize the tx_avail and rx_avail lists. */ - error = cpsw_init_slot_lists(sc); - if (error) { - device_printf(dev, "failed to allocate dmamaps\n"); - cpsw_detach(dev); - return (ENOMEM); + return (error); } /* Allocate network interface */ @@ -311,6 +605,16 @@ cpsw_attach(device_t dev) return (ENOMEM); } + /* Allocate the null mbuf and pre-sync it. */ + sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size); + bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); + bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, + sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); + bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, + BUS_DMASYNC_PREWRITE); + sc->null_mbuf_paddr = segs[0].ds_addr; + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; @@ -321,11 +625,32 @@ cpsw_attach(device_t dev) ifp->if_start = cpsw_start; ifp->if_ioctl = cpsw_ioctl; - ifp->if_snd.ifq_drv_maxlen = CPSW_MAX_TX_BUFFERS - 1; + cpsw_init_slots(sc); + + /* Allocate slots to TX and RX queues. */ + STAILQ_INIT(&sc->rx.avail); + STAILQ_INIT(&sc->rx.active); + STAILQ_INIT(&sc->tx.avail); + STAILQ_INIT(&sc->tx.active); + // For now: 128 slots to TX, rest to RX. + // XXX TODO: start with 32/64 and grow dynamically based on demand. + if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { + device_printf(dev, "failed to allocate dmamaps\n"); + cpsw_detach(dev); + return (ENOMEM); + } + device_printf(dev, "Initial queue size TX=%d RX=%d\n", + sc->tx.queue_slots, sc->rx.queue_slots); + + ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); + sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); + sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); + /* Get high part of MAC address from control module (mac_id0_hi) */ + /* TODO: Get MAC ID1 as well as MAC ID0. */ ti_scm_reg_read_4(0x634, ®); sc->mac_addr[0] = reg & 0xFF; sc->mac_addr[1] = (reg >> 8) & 0xFF; @@ -338,11 +663,14 @@ cpsw_attach(device_t dev) sc->mac_addr[5] = (reg >> 8) & 0xFF; ether_ifattach(ifp, sc->mac_addr); - callout_init(&sc->wd_callout, 0); + callout_init(&sc->watchdog.callout, 0); /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ - cpsw_write_4(MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); + cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); + + /* Clear ALE */ + cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30); /* Attach PHY(s) */ error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd, @@ -358,25 +686,38 @@ cpsw_attach(device_t dev) miisc = LIST_FIRST(&sc->mii->mii_phys); /* Select PHY and enable interrupts */ - cpsw_write_4(MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F)); - - /* Attach interrupt handlers */ - for (i = 1; i <= CPSW_INTR_COUNT; ++i) { - error = bus_setup_intr(dev, sc->res[i], - INTR_TYPE_NET | INTR_MPSAFE, - NULL, *cpsw_intrs[i - 1].handler, - sc, &sc->ih_cookie[i - 1]); - if (error) { - device_printf(dev, "could not setup %s\n", - cpsw_intrs[i].description); - cpsw_detach(dev); - return (error); - } + cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F)); + + /* Note: We don't use sc->res[3] (TX interrupt) */ + if (cpsw_attach_interrupt(sc, sc->res[1], + cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") || + cpsw_attach_interrupt(sc, sc->res[2], + cpsw_intr_rx, "CPSW RX interrupt") || + cpsw_attach_interrupt(sc, sc->res[4], + cpsw_intr_misc, "CPSW misc interrupt")) { + cpsw_detach(dev); + return (ENXIO); } return (0); } +static void +cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) +{ + int error; + + if (slot->dmamap) { + error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); + KASSERT(error == 0, ("Mapping still active")); + slot->dmamap = NULL; + } + if (slot->mbuf) { + m_freem(slot->mbuf); + slot->mbuf = NULL; + } +} + static int cpsw_detach(device_t dev) { @@ -389,31 +730,20 @@ cpsw_detach(device_t dev) if (device_is_attached(dev)) { ether_ifdetach(sc->ifp); CPSW_GLOBAL_LOCK(sc); - cpsw_stop_locked(sc); + cpsw_shutdown_locked(sc); CPSW_GLOBAL_UNLOCK(sc); - callout_drain(&sc->wd_callout); + callout_drain(&sc->watchdog.callout); } bus_generic_detach(dev); device_delete_child(dev, sc->miibus); /* Stop and release all interrupts */ - for (i = 0; i < CPSW_INTR_COUNT; ++i) { - if (!sc->ih_cookie[i]) - continue; - - error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]); - if (error) - device_printf(dev, "could not release %s\n", - cpsw_intrs[i + 1].description); - } + cpsw_detach_interrupts(sc); /* Free dmamaps and mbufs */ - for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) { - cpsw_free_slot(sc, &sc->_tx_slots[i]); - } - for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) { - cpsw_free_slot(sc, &sc->_rx_slots[i]); + for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) { + cpsw_free_slot(sc, &sc->_slots[i]); } /* Free DMA tag */ @@ -424,409 +754,323 @@ cpsw_detach(device_t dev) bus_release_resources(dev, res_spec, sc->res); /* Destroy mutexes */ - mtx_destroy(&sc->rx_lock); - mtx_destroy(&sc->tx_lock); + mtx_destroy(&sc->rx.lock); + mtx_destroy(&sc->tx.lock); return (0); } -static int -cpsw_suspend(device_t dev) +/* + * + * Init/Shutdown. + * + */ + +static void +cpsw_reset(struct cpsw_softc *sc) { - struct cpsw_softc *sc = device_get_softc(dev); + int i; - CPSW_DEBUGF(("")); - CPSW_GLOBAL_LOCK(sc); - cpsw_stop_locked(sc); - CPSW_GLOBAL_UNLOCK(sc); - return (0); -} + /* Reset RMII/RGMII wrapper. */ + cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); + while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) + ; -static int -cpsw_resume(device_t dev) -{ - /* XXX TODO XXX */ - device_printf(dev, "%s\n", __FUNCTION__); - return (0); + /* Disable TX and RX interrupts for all cores. */ + for (i = 0; i < 3; ++i) { + cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); + cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); + cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); + cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); + } + + /* Reset CPSW subsystem. */ + cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); + while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) + ; + + /* Reset Sliver port 1 and 2 */ + for (i = 0; i < 2; i++) { + /* Reset */ + cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); + while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) + ; + } + + /* Reset DMA controller. */ + cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); + while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) + ; + + /* Disable TX & RX DMA */ + cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); + cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); + + /* Clear all queues. */ + for (i = 0; i < 8; i++) { + cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); + cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); + cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); + cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); + } + + /* Clear all interrupt Masks */ + cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); + cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); } -static int -cpsw_shutdown(device_t dev) +static void +cpsw_init(void *arg) { - struct cpsw_softc *sc = device_get_softc(dev); + struct cpsw_softc *sc = arg; CPSW_DEBUGF(("")); CPSW_GLOBAL_LOCK(sc); - cpsw_stop_locked(sc); + cpsw_init_locked(arg); CPSW_GLOBAL_UNLOCK(sc); - return (0); } -static int -cpsw_miibus_ready(struct cpsw_softc *sc) +static void +cpsw_init_locked(void *arg) { - uint32_t r, retries = CPSW_MIIBUS_RETRIES; + struct ifnet *ifp; + struct cpsw_softc *sc = arg; + struct cpsw_slot *slot; + uint32_t i; - while (--retries) { - r = cpsw_read_4(MDIOUSERACCESS0); - if ((r & 1 << 31) == 0) - return 1; - DELAY(CPSW_MIIBUS_DELAY); - } - return 0; -} + CPSW_DEBUGF(("")); + ifp = sc->ifp; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + return; -static int -cpsw_miibus_readreg(device_t dev, int phy, int reg) -{ - struct cpsw_softc *sc = device_get_softc(dev); - uint32_t cmd, r; + getbinuptime(&sc->init_uptime); - if (!cpsw_miibus_ready(sc)) { - device_printf(dev, "MDIO not ready to read\n"); - return 0; - } + /* Reset the controller. */ + cpsw_reset(sc); - /* Set GO, reg, phy */ - cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; - cpsw_write_4(MDIOUSERACCESS0, cmd); + /* Enable ALE */ + cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4); - if (!cpsw_miibus_ready(sc)) { - device_printf(dev, "MDIO timed out during read\n"); - return 0; - } - - r = cpsw_read_4(MDIOUSERACCESS0); - if((r & 1 << 29) == 0) { - device_printf(dev, "Failed to read from PHY.\n"); - r = 0; + /* Init Sliver port 1 and 2 */ + for (i = 0; i < 2; i++) { + /* Set Slave Mapping */ + cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210); + cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100); + cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2); + /* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15), + GMII_EN(5), FULLDUPLEX(1) */ + /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ + /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ + cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1); } - return (r & 0xFFFF); -} - -static int -cpsw_miibus_writereg(device_t dev, int phy, int reg, int value) -{ - struct cpsw_softc *sc = device_get_softc(dev); - uint32_t cmd; - if (!cpsw_miibus_ready(sc)) { - device_printf(dev, "MDIO not ready to write\n"); - return 0; - } + /* Set Host Port Mapping */ + cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); + cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); - /* Set GO, WRITE, reg, phy, and value */ - cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 - | (value & 0xFFFF); - cpsw_write_4(MDIOUSERACCESS0, cmd); + /* Initialize ALE: all ports set to forwarding(3), initialize addrs */ + for (i = 0; i < 3; i++) + cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3); + cpsw_ale_update_addresses(sc, 1); - if (!cpsw_miibus_ready(sc)) { - device_printf(dev, "MDIO timed out during write\n"); - return 0; - } + cpsw_write_4(sc, CPSW_SS_PTYPE, 0); - if((cpsw_read_4(MDIOUSERACCESS0) & (1 << 29)) == 0) - device_printf(dev, "Failed to write to PHY.\n"); + /* Enable statistics for ports 0, 1 and 2 */ + cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); - return 0; -} + /* Experiment: Turn off flow control */ + /* This seems to fix the watchdog resets that have plagued + earlier versions of this driver; I'm not yet sure if there + are negative effects yet. */ + cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); -static int -cpsw_init_slot_lists(struct cpsw_softc *sc) -{ - int i; + /* Make IP hdr aligned with 4 */ + cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); - STAILQ_INIT(&sc->rx_active); - STAILQ_INIT(&sc->rx_avail); - STAILQ_INIT(&sc->tx_active); - STAILQ_INIT(&sc->tx_avail); - - /* Put the slot descriptors onto the avail lists. */ - for (i = 0; i < CPSW_MAX_TX_BUFFERS; i++) { - struct cpsw_slot *slot = &sc->_tx_slots[i]; - slot->index = i; - /* XXX TODO: Remove this from here; allocate dmamaps lazily - in the encap routine to reduce memory usage. */ - if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { - if_printf(sc->ifp, "failed to create dmamap for tx mbuf\n"); - return (ENOMEM); - } - STAILQ_INSERT_TAIL(&sc->tx_avail, slot, next); - } + /* Initialize RX Buffer Descriptors */ + cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); - for (i = 0; i < CPSW_MAX_RX_BUFFERS; i++) { - struct cpsw_slot *slot = &sc->_rx_slots[i]; - slot->index = i; - if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { - if_printf(sc->ifp, "failed to create dmamap for rx mbuf\n"); - return (ENOMEM); - } - STAILQ_INSERT_TAIL(&sc->rx_avail, slot, next); - } + /* Enable TX & RX DMA */ + cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); + cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); - return (0); -} + /* Enable Interrupts for core 0 */ + cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); + cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); + cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F); -static void -cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) -{ - int error; + /* Enable host Error Interrupt */ + cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); - if (slot->dmamap) { - error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); - KASSERT(error == 0, ("Mapping still active")); - slot->dmamap = NULL; - } - if (slot->mbuf) { - m_freem(slot->mbuf); - slot->mbuf = NULL; - } -} + /* Enable interrupts for RX Channel 0 */ + cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1); -/* - * Pad the packet to the minimum length for Ethernet. - * (CPSW hardware doesn't do this for us.) - */ -static int -cpsw_pad(struct mbuf *m) -{ - int padlen = ETHER_MIN_LEN - m->m_pkthdr.len; - struct mbuf *last, *n; + /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ + /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ + cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); - if (padlen <= 0) - return (0); + /* Select MII in GMII_SEL, Internal Delay mode */ + //ti_scm_reg_write_4(0x650, 0); - /* If there's only the packet-header and we can pad there, use it. */ - if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && - M_TRAILINGSPACE(m) >= padlen) { - last = m; - } else { - /* - * Walk packet chain to find last mbuf. We will either - * pad there, or append a new mbuf and pad it. - */ - for (last = m; last->m_next != NULL; last = last->m_next) - ; - if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { - /* Allocate new empty mbuf, pad it. Compact later. */ - MGET(n, M_NOWAIT, MT_DATA); - if (n == NULL) - return (ENOBUFS); - n->m_len = 0; - last->m_next = n; - last = n; - } - } + /* Initialize active queues. */ + slot = STAILQ_FIRST(&sc->tx.active); + if (slot != NULL) + cpsw_write_hdp_slot(sc, &sc->tx, slot); + slot = STAILQ_FIRST(&sc->rx.active); + if (slot != NULL) + cpsw_write_hdp_slot(sc, &sc->rx, slot); + cpsw_rx_enqueue(sc); - /* Now zero the pad area. */ - memset(mtod(last, caddr_t) + last->m_len, 0, padlen); - last->m_len += padlen; - m->m_pkthdr.len += padlen; + /* Activate network interface */ + sc->rx.running = 1; + sc->tx.running = 1; + sc->watchdog.timer = 0; + callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc); + sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; + sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - return (0); } -static void -cpsw_start(struct ifnet *ifp) +static int +cpsw_shutdown(device_t dev) { - struct cpsw_softc *sc = ifp->if_softc; + struct cpsw_softc *sc = device_get_softc(dev); - CPSW_TX_LOCK(sc); - cpsw_start_locked(ifp); - CPSW_TX_UNLOCK(sc); + CPSW_DEBUGF(("")); + CPSW_GLOBAL_LOCK(sc); + cpsw_shutdown_locked(sc); + CPSW_GLOBAL_UNLOCK(sc); + return (0); } static void -cpsw_start_locked(struct ifnet *ifp) +cpsw_rx_teardown_locked(struct cpsw_softc *sc) { - bus_dma_segment_t seg[1]; - struct cpsw_cpdma_bd bd; - struct cpsw_softc *sc = ifp->if_softc; - struct cpsw_queue newslots = STAILQ_HEAD_INITIALIZER(newslots); - struct cpsw_slot *slot, *prev_slot = NULL, *first_new_slot; - struct mbuf *m0, *mtmp; - int error, nsegs, enqueued = 0; - - CPSW_TX_LOCK_ASSERT(sc); - - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) - return; + struct mbuf *received, *next; + int i = 0; - /* Pull pending packets from IF queue and prep them for DMA. */ + CPSW_DEBUGF(("starting RX teardown")); + cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); for (;;) { - slot = STAILQ_FIRST(&sc->tx_avail); - if (slot == NULL) { - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - break; + received = cpsw_rx_dequeue(sc); + CPSW_GLOBAL_UNLOCK(sc); + while (received != NULL) { + next = received->m_nextpkt; + received->m_nextpkt = NULL; + (*sc->ifp->if_input)(sc->ifp, received); + received = next; } - - IF_DEQUEUE(&ifp->if_snd, m0); - if (m0 == NULL) - break; - - if ((error = cpsw_pad(m0))) { - if_printf(ifp, - "%s: Dropping packet; could not pad\n", __func__); - m_freem(m0); - continue; + CPSW_GLOBAL_LOCK(sc); + if (!sc->rx.running) { + CPSW_DEBUGF(("finished RX teardown (%d retries)", i)); + return; } - - /* TODO: don't defragment here, queue each - packet fragment as a separate entry. */ - mtmp = m_defrag(m0, M_NOWAIT); - if (mtmp) - m0 = mtmp; - - slot->mbuf = m0; - /* Create mapping in DMA memory */ - error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, - m0, seg, &nsegs, BUS_DMA_NOWAIT); - KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); - KASSERT(error == 0, ("DMA error (error=%d)", error)); - if (error != 0 || nsegs != 1) { - if_printf(ifp, - "%s: Can't load packet for DMA (nsegs=%d, error=%d), dropping packet\n", - __func__, nsegs, error); - bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); - m_freem(m0); - break; + if (++i > 10) { + if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n"); + return; } - bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, - BUS_DMASYNC_PREWRITE); - - if (prev_slot != NULL) - cpsw_cpdma_write_txbd_next(prev_slot->index, - cpsw_cpdma_txbd_paddr(slot->index)); - bd.next = 0; - bd.bufptr = seg->ds_addr; - bd.bufoff = 0; - bd.buflen = seg->ds_len; - bd.pktlen = seg->ds_len; - bd.flags = 7 << 13; /* Set OWNERSHIP, SOP, EOP */ - cpsw_cpdma_write_txbd(slot->index, &bd); - ++enqueued; - - prev_slot = slot; - STAILQ_REMOVE_HEAD(&sc->tx_avail, next); - STAILQ_INSERT_TAIL(&newslots, slot, next); - BPF_MTAP(ifp, m0); + DELAY(10); } +} - if (STAILQ_EMPTY(&newslots)) - return; +static void +cpsw_tx_teardown_locked(struct cpsw_softc *sc) +{ + int i = 0; - /* Attach the list of new buffers to the hardware TX queue. */ - prev_slot = STAILQ_LAST(&sc->tx_active, cpsw_slot, next); - first_new_slot = STAILQ_FIRST(&newslots); - STAILQ_CONCAT(&sc->tx_active, &newslots); - if (prev_slot == NULL) { - /* Start the TX queue fresh. */ - cpsw_write_4(CPSW_CPDMA_TX_HDP(0), - cpsw_cpdma_txbd_paddr(first_new_slot->index)); - } else { - /* Add buffers to end of current queue. */ - cpsw_cpdma_write_txbd_next(prev_slot->index, - cpsw_cpdma_txbd_paddr(first_new_slot->index)); - /* If underrun, restart queue. */ - if (cpsw_cpdma_read_txbd_flags(prev_slot->index) & CPDMA_BD_EOQ) - cpsw_write_4(CPSW_CPDMA_TX_HDP(0), - cpsw_cpdma_txbd_paddr(first_new_slot->index)); - } - sc->tx_enqueues += enqueued; - sc->tx_queued += enqueued; - if (sc->tx_queued > sc->tx_max_queued) { - sc->tx_max_queued = sc->tx_queued; - CPSW_DEBUGF(("New TX high water mark %d", sc->tx_queued)); + CPSW_DEBUGF(("starting TX teardown")); + cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); + cpsw_tx_dequeue(sc); + while (sc->tx.running && ++i < 10) { + DELAY(10); + cpsw_tx_dequeue(sc); } + if (sc->tx.running) + if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n"); + CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)", + i, sc->tx.active_queue_len)); } static void -cpsw_stop_locked(struct cpsw_softc *sc) +cpsw_shutdown_locked(struct cpsw_softc *sc) { struct ifnet *ifp; - int i; CPSW_DEBUGF(("")); - CPSW_GLOBAL_LOCK_ASSERT(sc); - ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; /* Disable interface */ - ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - - /* Stop tick engine */ - callout_stop(&sc->wd_callout); + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + ifp->if_drv_flags |= IFF_DRV_OACTIVE; - /* Wait for hardware to clear pending ops. */ - CPSW_GLOBAL_UNLOCK(sc); - CPSW_DEBUGF(("starting RX and TX teardown")); - cpsw_write_4(CPSW_CPDMA_RX_TEARDOWN, 0); - cpsw_write_4(CPSW_CPDMA_TX_TEARDOWN, 0); - i = 0; - cpsw_intr_rx(sc); // Try clearing without delay. - cpsw_intr_tx(sc); - while (sc->rx_running || sc->tx_running) { - DELAY(10); - cpsw_intr_rx(sc); - cpsw_intr_tx(sc); - ++i; - } - CPSW_DEBUGF(("finished RX and TX teardown (%d tries)", i)); - CPSW_GLOBAL_LOCK(sc); + /* Stop ticker */ + callout_stop(&sc->watchdog.callout); - /* All slots are now available */ - STAILQ_CONCAT(&sc->rx_avail, &sc->rx_active); - STAILQ_CONCAT(&sc->tx_avail, &sc->tx_active); - CPSW_DEBUGF(("%d buffers dropped at TX reset", sc->tx_queued)); - sc->tx_queued = 0; + /* Tear down the RX/TX queues. */ + cpsw_rx_teardown_locked(sc); + cpsw_tx_teardown_locked(sc); - /* Reset writer */ - cpsw_write_4(CPSW_WR_SOFT_RESET, 1); - while (cpsw_read_4(CPSW_WR_SOFT_RESET) & 1) - ; + /* Capture stats before we reset controller. */ + cpsw_stats_collect(sc); - /* Reset SS */ - cpsw_write_4(CPSW_SS_SOFT_RESET, 1); - while (cpsw_read_4(CPSW_SS_SOFT_RESET) & 1) - ; + cpsw_reset(sc); +} - /* Reset Sliver port 1 and 2 */ - for (i = 0; i < 2; i++) { - /* Reset */ - cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1); - while (cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1) - ; - } +/* + * Suspend/Resume. + */ - /* Reset CPDMA */ - cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1); - while (cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1) - ; +static int +cpsw_suspend(device_t dev) +{ + struct cpsw_softc *sc = device_get_softc(dev); - /* Disable TX & RX DMA */ - cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 0); - cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 0); + CPSW_DEBUGF(("")); + CPSW_GLOBAL_LOCK(sc); + cpsw_shutdown_locked(sc); + CPSW_GLOBAL_UNLOCK(sc); + return (0); +} - /* Disable TX and RX interrupts for all cores. */ - for (i = 0; i < 3; ++i) { - cpsw_write_4(CPSW_WR_C_TX_EN(i), 0x00); - cpsw_write_4(CPSW_WR_C_RX_EN(i), 0x00); - cpsw_write_4(CPSW_WR_C_MISC_EN(i), 0x00); - } +static int +cpsw_resume(device_t dev) +{ + struct cpsw_softc *sc = device_get_softc(dev); - /* Clear all interrupt Masks */ - cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); - cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); + CPSW_DEBUGF(("UNIMPLEMENTED")); + return (0); } +/* + * + * IOCTL + * + */ + static void cpsw_set_promisc(struct cpsw_softc *sc, int set) { + /* + * Enabling promiscuous mode requires two bits of work: First, + * ALE_BYPASS needs to be enabled. That disables the ALE + * forwarding logic and causes every packet to be sent to the + * host port. That makes us promiscuous wrt received packets. + * + * With ALE forwarding disabled, the transmitter needs to set + * an explicit output port on every packet to route it to the + * correct egress. This should be doable for systems such as + * BeagleBone where only one egress port is actually wired to + * a PHY. If you have both egress ports wired up, life gets a + * lot more interesting. + * + * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't + * seem to set explicit egress ports. Does that mean they + * are always promiscuous? + */ if (set) { printf("Promiscuous mode unimplemented\n"); } @@ -848,7 +1092,6 @@ cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data) int error; uint32_t changed; - CPSW_DEBUGF(("command=0x%lx", command)); error = 0; switch (command) { @@ -865,118 +1108,175 @@ cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data) cpsw_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI); } else { - CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING")); + CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up")); cpsw_init_locked(sc); } } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING")); - cpsw_stop_locked(sc); + CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down")); + cpsw_shutdown_locked(sc); } sc->cpsw_if_flags = ifp->if_flags; CPSW_GLOBAL_UNLOCK(sc); break; case SIOCADDMULTI: - CPSW_DEBUGF(("SIOCADDMULTI unimplemented")); + cpsw_ale_update_addresses(sc, 0); break; case SIOCDELMULTI: - CPSW_DEBUGF(("SIOCDELMULTI unimplemented")); + /* Ugh. DELMULTI doesn't provide the specific address + being removed, so the best we can do is remove + everything and rebuild it all. */ + cpsw_ale_update_addresses(sc, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: - CPSW_DEBUGF(("ether ioctl")); error = ether_ioctl(ifp, command, data); } return (error); } -static void -cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) +/* + * + * MIIBUS + * + */ +static int +cpsw_miibus_ready(struct cpsw_softc *sc) { - struct cpsw_softc *sc = ifp->if_softc; - struct mii_data *mii; + uint32_t r, retries = CPSW_MIIBUS_RETRIES; - CPSW_DEBUGF(("")); - CPSW_TX_LOCK(sc); + while (--retries) { + r = cpsw_read_4(sc, MDIOUSERACCESS0); + if ((r & 1 << 31) == 0) + return 1; + DELAY(CPSW_MIIBUS_DELAY); + } + return 0; +} - mii = sc->mii; - mii_pollstat(mii); +static int +cpsw_miibus_readreg(device_t dev, int phy, int reg) +{ + struct cpsw_softc *sc = device_get_softc(dev); + uint32_t cmd, r; - ifmr->ifm_active = mii->mii_media_active; - ifmr->ifm_status = mii->mii_media_status; + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO not ready to read\n"); + return 0; + } - CPSW_TX_UNLOCK(sc); -} + /* Set GO, reg, phy */ + cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; + cpsw_write_4(sc, MDIOUSERACCESS0, cmd); + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO timed out during read\n"); + return 0; + } + + r = cpsw_read_4(sc, MDIOUSERACCESS0); + if((r & 1 << 29) == 0) { + device_printf(dev, "Failed to read from PHY.\n"); + r = 0; + } + return (r & 0xFFFF); +} static int -cpsw_ifmedia_upd(struct ifnet *ifp) +cpsw_miibus_writereg(device_t dev, int phy, int reg, int value) { - struct cpsw_softc *sc = ifp->if_softc; + struct cpsw_softc *sc = device_get_softc(dev); + uint32_t cmd; - CPSW_DEBUGF(("")); - if (ifp->if_flags & IFF_UP) { - CPSW_GLOBAL_LOCK(sc); - sc->cpsw_media_status = sc->mii->mii_media.ifm_media; - mii_mediachg(sc->mii); - cpsw_init_locked(sc); - CPSW_GLOBAL_UNLOCK(sc); + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO not ready to write\n"); + return 0; } - return (0); -} + /* Set GO, WRITE, reg, phy, and value */ + cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 + | (value & 0xFFFF); + cpsw_write_4(sc, MDIOUSERACCESS0, cmd); -static void -cpsw_intr_rx_thresh(void *arg) -{ - struct cpsw_softc *sc = arg; - CPSW_DEBUGF(("")); + if (!cpsw_miibus_ready(sc)) { + device_printf(dev, "MDIO timed out during write\n"); + return 0; + } + + if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0) + device_printf(dev, "Failed to write to PHY.\n"); + + return 0; } +/* + * + * Transmit/Receive Packets. + * + */ + + static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc = arg; + struct mbuf *received, *next; CPSW_RX_LOCK(sc); - cpsw_intr_rx_locked(arg); - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); + received = cpsw_rx_dequeue(sc); + cpsw_rx_enqueue(sc); + cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); + + while (received != NULL) { + next = received->m_nextpkt; + received->m_nextpkt = NULL; + (*sc->ifp->if_input)(sc->ifp, received); + received = next; + } } -static void -cpsw_intr_rx_locked(void *arg) +static struct mbuf * +cpsw_rx_dequeue(struct cpsw_softc *sc) { - struct cpsw_softc *sc = arg; struct cpsw_cpdma_bd bd; - struct cpsw_slot *slot, *last_slot = NULL; + struct cpsw_slot *slot; struct ifnet *ifp; + struct mbuf *mb_head, *mb_tail; + int removed = 0; ifp = sc->ifp; - if (!sc->rx_running) - return; + mb_head = mb_tail = NULL; /* Pull completed packets off hardware RX queue. */ - slot = STAILQ_FIRST(&sc->rx_active); - while (slot != NULL) { - cpsw_cpdma_read_rxbd(slot->index, &bd); + while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { + cpsw_cpdma_read_bd(sc, slot, &bd); if (bd.flags & CPDMA_BD_OWNER) break; /* Still in use by hardware */ + CPSW_DEBUGF(("Removing received packet from RX queue")); + ++removed; + STAILQ_REMOVE_HEAD(&sc->rx.active, next); + STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); + + bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); + if (bd.flags & CPDMA_BD_TDOWNCMPLT) { CPSW_DEBUGF(("RX teardown in progress")); - cpsw_write_4(CPSW_CPDMA_RX_CP(0), 0xfffffffc); - sc->rx_running = 0; - return; + m_freem(slot->mbuf); + slot->mbuf = NULL; + cpsw_write_cp(sc, &sc->rx, 0xfffffffc); + sc->rx.running = 0; + break; } - bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); + cpsw_write_cp_slot(sc, &sc->rx, slot); - /* Fill mbuf */ + /* Set up mbuf */ /* TODO: track SOP/EOP bits to assemble a full mbuf out of received fragments. */ slot->mbuf->m_hdr.mh_data += bd.bufoff; @@ -984,6 +1284,7 @@ cpsw_intr_rx_locked(void *arg) slot->mbuf->m_pkthdr.len = bd.pktlen - 4; slot->mbuf->m_flags |= M_PKTHDR; slot->mbuf->m_pkthdr.rcvif = ifp; + slot->mbuf->m_nextpkt = NULL; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ @@ -994,64 +1295,60 @@ cpsw_intr_rx_locked(void *arg) } } - /* Handover packet */ - CPSW_RX_UNLOCK(sc); - (*ifp->if_input)(ifp, slot->mbuf); + /* Add mbuf to packet list to be returned. */ + if (mb_tail) { + mb_tail->m_nextpkt = slot->mbuf; + } else { + mb_head = slot->mbuf; + } + mb_tail = slot->mbuf; slot->mbuf = NULL; - CPSW_RX_LOCK(sc); - - last_slot = slot; - STAILQ_REMOVE_HEAD(&sc->rx_active, next); - STAILQ_INSERT_TAIL(&sc->rx_avail, slot, next); - slot = STAILQ_FIRST(&sc->rx_active); } - /* Tell hardware last slot we processed. */ - if (last_slot) - cpsw_write_4(CPSW_CPDMA_RX_CP(0), - cpsw_cpdma_rxbd_paddr(last_slot->index)); - - /* Repopulate hardware RX queue. */ - cpsw_fill_rx_queue_locked(sc); + if (removed != 0) { + sc->rx.queue_removes += removed; + sc->rx.active_queue_len -= removed; + sc->rx.avail_queue_len += removed; + if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) + sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; + } + return (mb_head); } static void -cpsw_fill_rx_queue_locked(struct cpsw_softc *sc) +cpsw_rx_enqueue(struct cpsw_softc *sc) { bus_dma_segment_t seg[1]; - struct cpsw_queue tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); struct cpsw_cpdma_bd bd; - struct cpsw_slot *slot, *prev_slot, *next_slot; - int error, nsegs; + struct ifnet *ifp = sc->ifp; + struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); + struct cpsw_slot *slot, *prev_slot = NULL; + struct cpsw_slot *last_old_slot, *first_new_slot; + int error, nsegs, added = 0; - /* Try to allocate new mbufs. */ - STAILQ_FOREACH(slot, &sc->rx_avail, next) { - if (slot->mbuf != NULL) - continue; - slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + /* Register new mbufs with hardware. */ + while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { if (slot->mbuf == NULL) { - if_printf(sc->ifp, "Unable to fill RX queue\n"); - break; + slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + if (slot->mbuf == NULL) { + if_printf(sc->ifp, "Unable to fill RX queue\n"); + break; + } + slot->mbuf->m_len = + slot->mbuf->m_pkthdr.len = + slot->mbuf->m_ext.ext_size; } - slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; - } - - /* Register new mbufs with hardware. */ - prev_slot = NULL; - while (!STAILQ_EMPTY(&sc->rx_avail)) { - slot = STAILQ_FIRST(&sc->rx_avail); - if (slot->mbuf == NULL) - break; error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); KASSERT(error == 0, ("DMA error (error=%d)", error)); - if (nsegs != 1 || error) { - if_printf(sc->ifp, + if (error != 0 || nsegs != 1) { + if_printf(ifp, "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", __func__, nsegs, error); + bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; @@ -1062,410 +1359,563 @@ cpsw_fill_rx_queue_locked(struct cpsw_softc *sc) /* Create and submit new rx descriptor*/ bd.next = 0; bd.bufptr = seg->ds_addr; - bd.buflen = MCLBYTES-1; - bd.bufoff = 2; /* make IP hdr aligned with 4 */ - bd.pktlen = 0; + bd.bufoff = 0; + bd.buflen = MCLBYTES - 1; + bd.pktlen = bd.buflen; bd.flags = CPDMA_BD_OWNER; - cpsw_cpdma_write_rxbd(slot->index, &bd); + cpsw_cpdma_write_bd(sc, slot, &bd); + ++added; - if (prev_slot) { - cpsw_cpdma_write_rxbd_next(prev_slot->index, - cpsw_cpdma_rxbd_paddr(slot->index)); - } + if (prev_slot != NULL) + cpsw_cpdma_write_bd_next(sc, prev_slot, slot); prev_slot = slot; - STAILQ_REMOVE_HEAD(&sc->rx_avail, next); + STAILQ_REMOVE_HEAD(&sc->rx.avail, next); + sc->rx.avail_queue_len--; STAILQ_INSERT_TAIL(&tmpqueue, slot, next); } + if (added == 0) + return; + + CPSW_DEBUGF(("Adding %d buffers to RX queue", added)); + /* Link new entries to hardware RX queue. */ - prev_slot = STAILQ_LAST(&sc->rx_active, cpsw_slot, next); - next_slot = STAILQ_FIRST(&tmpqueue); - if (next_slot == NULL) { + last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); + first_new_slot = STAILQ_FIRST(&tmpqueue); + STAILQ_CONCAT(&sc->rx.active, &tmpqueue); + if (first_new_slot == NULL) { return; - } else if (prev_slot == NULL) { - /* Start a fresh RX queue. */ - cpsw_write_4(CPSW_CPDMA_RX_HDP(0), - cpsw_cpdma_rxbd_paddr(next_slot->index)); + } else if (last_old_slot == NULL) { + /* Start a fresh queue. */ + cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } else { /* Add buffers to end of current queue. */ - cpsw_cpdma_write_rxbd_next(prev_slot->index, - cpsw_cpdma_rxbd_paddr(next_slot->index)); + cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); /* If underrun, restart queue. */ - if (cpsw_cpdma_read_rxbd_flags(prev_slot->index) & CPDMA_BD_EOQ) { - cpsw_write_4(CPSW_CPDMA_RX_HDP(0), - cpsw_cpdma_rxbd_paddr(next_slot->index)); + if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { + cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } } - STAILQ_CONCAT(&sc->rx_active, &tmpqueue); + sc->rx.queue_adds += added; + sc->rx.active_queue_len += added; + if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { + sc->rx.max_active_queue_len = sc->rx.active_queue_len; + } } static void -cpsw_intr_tx(void *arg) +cpsw_start(struct ifnet *ifp) { - struct cpsw_softc *sc = arg; + struct cpsw_softc *sc = ifp->if_softc; + CPSW_TX_LOCK(sc); - cpsw_intr_tx_locked(arg); - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) { + cpsw_tx_enqueue(sc); + cpsw_tx_dequeue(sc); + } CPSW_TX_UNLOCK(sc); } static void -cpsw_intr_tx_locked(void *arg) +cpsw_tx_enqueue(struct cpsw_softc *sc) { - struct cpsw_softc *sc = arg; - struct cpsw_slot *slot, *last_slot = NULL; - uint32_t flags, last_flags = 0, retires = 0; + bus_dma_segment_t segs[CPSW_TXFRAGS]; + struct cpsw_cpdma_bd bd; + struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); + struct cpsw_slot *slot, *prev_slot = NULL; + struct cpsw_slot *last_old_slot, *first_new_slot; + struct mbuf *m0; + int error, nsegs, seg, added = 0, padlen; + + /* Pull pending packets from IF queue and prep them for DMA. */ + while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) { + IF_DEQUEUE(&sc->ifp->if_snd, m0); + if (m0 == NULL) + break; + + slot->mbuf = m0; + padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; + if (padlen < 0) + padlen = 0; + + /* Create mapping in DMA memory */ + error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, + slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); + /* If the packet is too fragmented, try to simplify. */ + if (error == EFBIG || + (error == 0 && + nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) { + bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); + if (padlen > 0) /* May as well add padding. */ + m_append(slot->mbuf, padlen, + sc->null_mbuf->m_hdr.mh_data); + m0 = m_defrag(slot->mbuf, M_NOWAIT); + if (m0 == NULL) { + if_printf(sc->ifp, + "Can't defragment packet; dropping\n"); + m_freem(slot->mbuf); + } else { + CPSW_DEBUGF(("Requeueing defragmented packet")); + IF_PREPEND(&sc->ifp->if_snd, m0); + } + slot->mbuf = NULL; + continue; + } + if (error != 0) { + if_printf(sc->ifp, + "%s: Can't setup DMA (error=%d), dropping packet\n", + __func__, error); + bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); + m_freem(slot->mbuf); + slot->mbuf = NULL; + break; + } + + bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, + BUS_DMASYNC_PREWRITE); + + + CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes", + nsegs, padlen)); + + /* If there is only one segment, the for() loop + * gets skipped and the single buffer gets set up + * as both SOP and EOP. */ + /* Start by setting up the first buffer */ + bd.next = 0; + bd.bufptr = segs[0].ds_addr; + bd.bufoff = 0; + bd.buflen = segs[0].ds_len; + bd.pktlen = m_length(slot->mbuf, NULL) + padlen; + bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; + for (seg = 1; seg < nsegs; ++seg) { + /* Save the previous buffer (which isn't EOP) */ + cpsw_cpdma_write_bd(sc, slot, &bd); + if (prev_slot != NULL) + cpsw_cpdma_write_bd_next(sc, prev_slot, slot); + prev_slot = slot; + STAILQ_REMOVE_HEAD(&sc->tx.avail, next); + sc->tx.avail_queue_len--; + STAILQ_INSERT_TAIL(&tmpqueue, slot, next); + ++added; + slot = STAILQ_FIRST(&sc->tx.avail); + + /* Setup next buffer (which isn't SOP) */ + bd.next = 0; + bd.bufptr = segs[seg].ds_addr; + bd.bufoff = 0; + bd.buflen = segs[seg].ds_len; + bd.pktlen = 0; + bd.flags = CPDMA_BD_OWNER; + } + /* Save the final buffer. */ + if (padlen <= 0) + bd.flags |= CPDMA_BD_EOP; + cpsw_cpdma_write_bd(sc, slot, &bd); + if (prev_slot != NULL) + cpsw_cpdma_write_bd_next(sc, prev_slot, slot); + prev_slot = slot; + STAILQ_REMOVE_HEAD(&sc->tx.avail, next); + sc->tx.avail_queue_len--; + STAILQ_INSERT_TAIL(&tmpqueue, slot, next); + ++added; + + if (padlen > 0) { + slot = STAILQ_FIRST(&sc->tx.avail); + STAILQ_REMOVE_HEAD(&sc->tx.avail, next); + sc->tx.avail_queue_len--; + STAILQ_INSERT_TAIL(&tmpqueue, slot, next); + ++added; + + /* Setup buffer of null pad bytes (definitely EOP) */ + cpsw_cpdma_write_bd_next(sc, prev_slot, slot); + prev_slot = slot; + bd.next = 0; + bd.bufptr = sc->null_mbuf_paddr; + bd.bufoff = 0; + bd.buflen = padlen; + bd.pktlen = 0; + bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER; + cpsw_cpdma_write_bd(sc, slot, &bd); + ++nsegs; + } - if (!sc->tx_running) + if (nsegs > sc->tx.longest_chain) + sc->tx.longest_chain = nsegs; + + // TODO: Should we defer the BPF tap until + // after all packets are queued? + BPF_MTAP(sc->ifp, m0); + } + + /* Attach the list of new buffers to the hardware TX queue. */ + last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next); + first_new_slot = STAILQ_FIRST(&tmpqueue); + STAILQ_CONCAT(&sc->tx.active, &tmpqueue); + if (first_new_slot == NULL) { return; + } else if (last_old_slot == NULL) { + /* Start a fresh queue. */ + cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot); + } else { + /* Add buffers to end of current queue. */ + cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); + /* If underrun, restart queue. */ + if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { + cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot); + } + } + sc->tx.queue_adds += added; + sc->tx.active_queue_len += added; + if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) { + sc->tx.max_active_queue_len = sc->tx.active_queue_len; + } +} + +static int +cpsw_tx_dequeue(struct cpsw_softc *sc) +{ + struct cpsw_slot *slot, *last_removed_slot = NULL; + uint32_t flags, removed = 0; - slot = STAILQ_FIRST(&sc->tx_active); - if (slot == NULL && - cpsw_read_4(CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) { + slot = STAILQ_FIRST(&sc->tx.active); + if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) { CPSW_DEBUGF(("TX teardown of an empty queue")); - cpsw_write_4(CPSW_CPDMA_TX_CP(0), 0xfffffffc); - sc->tx_running = 0; - return; + cpsw_write_cp(sc, &sc->tx, 0xfffffffc); + sc->tx.running = 0; + return (0); } - /* Pull completed segments off the hardware TX queue. */ + /* Pull completed buffers off the hardware TX queue. */ while (slot != NULL) { - flags = cpsw_cpdma_read_txbd_flags(slot->index); + flags = cpsw_cpdma_read_bd_flags(sc, slot); if (flags & CPDMA_BD_OWNER) - break; /* Hardware is still using this. */ - - if (flags & CPDMA_BD_TDOWNCMPLT) { - CPSW_DEBUGF(("TX teardown in progress")); - cpsw_write_4(CPSW_CPDMA_TX_CP(0), 0xfffffffc); - sc->tx_running = 0; - return; - } + break; /* Hardware is still using this packet. */ - /* Release dmamap, free mbuf. */ - bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, - BUS_DMASYNC_POSTWRITE); + CPSW_DEBUGF(("TX removing completed packet")); + bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; - STAILQ_REMOVE_HEAD(&sc->tx_active, next); - STAILQ_INSERT_TAIL(&sc->tx_avail, slot, next); - sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + /* Dequeue any additional buffers used by this packet. */ + while (slot != NULL && slot->mbuf == NULL) { + STAILQ_REMOVE_HEAD(&sc->tx.active, next); + STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); + ++removed; + last_removed_slot = slot; + slot = STAILQ_FIRST(&sc->tx.active); + } - last_slot = slot; - last_flags = flags; - ++retires; - slot = STAILQ_FIRST(&sc->tx_active); + /* TearDown complete is only marked on the SOP for the packet. */ + if (flags & CPDMA_BD_TDOWNCMPLT) { + CPSW_DEBUGF(("TX teardown in progress")); + cpsw_write_cp(sc, &sc->tx, 0xfffffffc); + // TODO: Increment a count of dropped TX packets + sc->tx.running = 0; + break; + } } - if (retires != 0) { - /* Tell hardware the last item we dequeued. */ - cpsw_write_4(CPSW_CPDMA_TX_CP(0), - cpsw_cpdma_txbd_paddr(last_slot->index)); - sc->tx_retires += retires; - sc->tx_queued -= retires; + if (removed != 0) { + cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); + sc->tx.queue_removes += removed; + sc->tx.active_queue_len -= removed; + sc->tx.avail_queue_len += removed; + if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) + sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; } + return (removed); } +/* + * + * Miscellaneous interrupts. + * + */ + static void -cpsw_intr_misc(void *arg) +cpsw_intr_rx_thresh(void *arg) { struct cpsw_softc *sc = arg; - uint32_t stat = cpsw_read_4(CPSW_WR_C_MISC_STAT(0)); + uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0)); CPSW_DEBUGF(("stat=%x", stat)); - /* EOI_RX_PULSE */ - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); + cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); } static void -cpsw_tick(void *msc) +cpsw_intr_misc_host_error(struct cpsw_softc *sc) { - struct cpsw_softc *sc = msc; + uint32_t intstat; + uint32_t dmastat; + int txerr, rxerr, txchan, rxchan; + + printf("\n\n"); + device_printf(sc->dev, + "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); + printf("\n\n"); + intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); + device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); + dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); + device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); + + txerr = (dmastat >> 20) & 15; + txchan = (dmastat >> 16) & 7; + rxerr = (dmastat >> 12) & 15; + rxchan = (dmastat >> 8) & 7; + + switch (txerr) { + case 0: break; + case 1: printf("SOP error on TX channel %d\n", txchan); + break; + case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); + break; + case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); + break; + case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); + break; + case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); + break; + case 6: printf("Packet length error on TX channel %d\n", txchan); + break; + default: printf("Unknown error on TX channel %d\n", txchan); + break; + } - /* Check for TX timeout */ - cpsw_tx_watchdog(sc); + if (txerr != 0) { + printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", + txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); + printf("CPSW_CPDMA_TX%d_CP=0x%x\n", + txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); + cpsw_dump_queue(sc, &sc->tx.active); + } - mii_tick(sc->mii); + switch (rxerr) { + case 0: break; + case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); + break; + case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); + break; + case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); + break; + case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); + break; + default: printf("Unknown RX error on RX channel %d\n", rxchan); + break; + } - /* Check for media type change */ - if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) { - printf("%s: media type changed (ifm_media=%x)\n", __func__, - sc->mii->mii_media.ifm_media); - cpsw_ifmedia_upd(sc->ifp); + if (rxerr != 0) { + printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", + rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); + printf("CPSW_CPDMA_RX%d_CP=0x%x\n", + rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); + cpsw_dump_queue(sc, &sc->rx.active); } - /* Schedule another timeout one second from now */ - callout_reset(&sc->wd_callout, hz, cpsw_tick, sc); -} + printf("\nALE Table\n"); + cpsw_ale_dump_table(sc); -static void -cpsw_tx_watchdog(struct cpsw_softc *sc) -{ - struct ifnet *ifp = sc->ifp; + // XXX do something useful here?? + panic("CPSW HOST ERROR INTERRUPT"); - CPSW_GLOBAL_LOCK(sc); - if (sc->tx_retires > sc->tx_retires_at_last_tick) { - sc->tx_wd_timer = 0; /* Stuff got sent. */ - } else if (sc->tx_queued == 0) { - sc->tx_wd_timer = 0; /* Nothing to send. */ - } else { - /* There was something to send but we didn't. */ - ++sc->tx_wd_timer; - if (sc->tx_wd_timer > 3) { - sc->tx_wd_timer = 0; - ifp->if_oerrors++; - if_printf(ifp, "watchdog timeout\n"); - cpsw_stop_locked(sc); - cpsw_init_locked(sc); - CPSW_DEBUGF(("watchdog reset completed\n")); - } - } - sc->tx_retires_at_last_tick = sc->tx_retires; - CPSW_GLOBAL_UNLOCK(sc); + // Suppress this interrupt in the future. + cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); + printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); + // The watchdog will probably reset the controller + // in a little while. It will probably fail again. } static void -cpsw_init(void *arg) +cpsw_intr_misc(void *arg) { struct cpsw_softc *sc = arg; - - CPSW_DEBUGF(("")); - CPSW_GLOBAL_LOCK(sc); - cpsw_init_locked(arg); - CPSW_GLOBAL_UNLOCK(sc); + uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); + + if (stat & 16) + CPSW_DEBUGF(("Time sync event interrupt unimplemented")); + if (stat & 8) + cpsw_stats_collect(sc); + if (stat & 4) + cpsw_intr_misc_host_error(sc); + if (stat & 2) + CPSW_DEBUGF(("MDIO link change interrupt unimplemented")); + if (stat & 1) + CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented")); + cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); } -int once = 1; +/* + * + * Periodic Checks and Watchdog. + * + */ static void -cpsw_init_locked(void *arg) +cpsw_tick(void *msc) { - struct ifnet *ifp; - struct cpsw_softc *sc = arg; - uint8_t broadcast_address[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; - uint32_t i; - - CPSW_DEBUGF(("")); - ifp = sc->ifp; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - return; - - /* Reset writer */ - cpsw_write_4(CPSW_WR_SOFT_RESET, 1); - while (cpsw_read_4(CPSW_WR_SOFT_RESET) & 1) - ; - - /* Reset SS */ - cpsw_write_4(CPSW_SS_SOFT_RESET, 1); - while (cpsw_read_4(CPSW_SS_SOFT_RESET) & 1) - ; + struct cpsw_softc *sc = msc; - /* Clear table (30) and enable ALE(31) */ - if (once) - cpsw_write_4(CPSW_ALE_CONTROL, 3 << 30); - else - cpsw_write_4(CPSW_ALE_CONTROL, 1 << 31); - once = 0; // FIXME + /* Check for TX timeout */ + cpsw_tx_watchdog(sc); - /* Reset and init Sliver port 1 and 2 */ - for (i = 0; i < 2; i++) { - /* Reset */ - cpsw_write_4(CPSW_SL_SOFT_RESET(i), 1); - while (cpsw_read_4(CPSW_SL_SOFT_RESET(i)) & 1) - ; - /* Set Slave Mapping */ - cpsw_write_4(CPSW_SL_RX_PRI_MAP(i), 0x76543210); - cpsw_write_4(CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100); - cpsw_write_4(CPSW_SL_RX_MAXLEN(i), 0x5f2); - /* Set MAC Address */ - cpsw_write_4(CPSW_PORT_P_SA_HI(i + 1), - sc->mac_addr[3] << 24 | - sc->mac_addr[2] << 16 | - sc->mac_addr[1] << 8 | - sc->mac_addr[0]); - cpsw_write_4(CPSW_PORT_P_SA_LO(i+1), - sc->mac_addr[5] << 8 | - sc->mac_addr[4]); - - /* Set MACCONTROL for ports 0,1: FULLDUPLEX(1), GMII_EN(5), - IFCTL_A(15), IFCTL_B(16) FIXME */ - cpsw_write_4(CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1); - - /* Set ALE port to forwarding(3) */ - cpsw_write_4(CPSW_ALE_PORTCTL(i + 1), 3); + /* Check for media type change */ + mii_tick(sc->mii); + if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) { + printf("%s: media type changed (ifm_media=%x)\n", __func__, + sc->mii->mii_media.ifm_media); + cpsw_ifmedia_upd(sc->ifp); } - /* Set Host Port Mapping */ - cpsw_write_4(CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); - cpsw_write_4(CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); - - /* Set ALE port to forwarding(3)*/ - cpsw_write_4(CPSW_ALE_PORTCTL(0), 3); - - /* Add own MAC address and broadcast to ALE */ - cpsw_ale_uc_entry_set(sc, 0, sc->mac_addr); - cpsw_ale_mc_entry_set(sc, 7, broadcast_address); - - cpsw_write_4(CPSW_SS_PTYPE, 0); - /* Enable statistics for ports 0, 1 and 2 */ - cpsw_write_4(CPSW_SS_STAT_PORT_EN, 7); - - /* Reset CPDMA */ - cpsw_write_4(CPSW_CPDMA_SOFT_RESET, 1); - while (cpsw_read_4(CPSW_CPDMA_SOFT_RESET) & 1) - ; - - /* Make IP hdr aligned with 4 */ - cpsw_write_4(CPSW_CPDMA_RX_BUFFER_OFFSET, 2); + /* Schedule another timeout one second from now */ + callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc); +} - for (i = 0; i < 8; i++) { - cpsw_write_4(CPSW_CPDMA_TX_HDP(i), 0); - cpsw_write_4(CPSW_CPDMA_RX_HDP(i), 0); - cpsw_write_4(CPSW_CPDMA_TX_CP(i), 0); - cpsw_write_4(CPSW_CPDMA_RX_CP(i), 0); - } +static void +cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct cpsw_softc *sc = ifp->if_softc; + struct mii_data *mii; - /* Initialize RX Buffer Descriptors */ - cpsw_write_4(CPSW_CPDMA_RX_FREEBUFFER(0), 0); - cpsw_fill_rx_queue_locked(sc); + CPSW_DEBUGF(("")); + CPSW_TX_LOCK(sc); - /* Clear all interrupt Masks */ - cpsw_write_4(CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); - cpsw_write_4(CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); + mii = sc->mii; + mii_pollstat(mii); - /* Enable TX & RX DMA */ - cpsw_write_4(CPSW_CPDMA_TX_CONTROL, 1); - cpsw_write_4(CPSW_CPDMA_RX_CONTROL, 1); + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; - /* Enable TX and RX interrupt receive for core 0 */ - cpsw_write_4(CPSW_WR_C_TX_EN(0), 0xFF); - cpsw_write_4(CPSW_WR_C_RX_EN(0), 0xFF); - //cpsw_write_4(CPSW_WR_C_MISC_EN(0), 0x3F); + CPSW_TX_UNLOCK(sc); +} - /* Enable host Error Interrupt */ - cpsw_write_4(CPSW_CPDMA_DMA_INTMASK_SET, 1); +static int +cpsw_ifmedia_upd(struct ifnet *ifp) +{ + struct cpsw_softc *sc = ifp->if_softc; - /* Enable interrupts for TX and RX Channel 0 */ - cpsw_write_4(CPSW_CPDMA_TX_INTMASK_SET, 1); - cpsw_write_4(CPSW_CPDMA_RX_INTMASK_SET, 1); + CPSW_DEBUGF(("")); + if (ifp->if_flags & IFF_UP) { + CPSW_GLOBAL_LOCK(sc); + sc->cpsw_media_status = sc->mii->mii_media.ifm_media; + mii_mediachg(sc->mii); + cpsw_init_locked(sc); + CPSW_GLOBAL_UNLOCK(sc); + } - /* Ack stalled irqs */ - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); - cpsw_write_4(CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); + return (0); +} - /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ - /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ - cpsw_write_4(MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); +static void +cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) +{ + cpsw_debugf_head("CPSW watchdog"); + if_printf(sc->ifp, "watchdog timeout\n"); + cpsw_shutdown_locked(sc); + cpsw_init_locked(sc); +} - /* Select MII in GMII_SEL, Internal Delay mode */ - //ti_scm_reg_write_4(0x650, 0); +static void +cpsw_tx_watchdog(struct cpsw_softc *sc) +{ + struct ifnet *ifp = sc->ifp; - /* Activate network interface */ - sc->rx_running = 1; - sc->tx_running = 1; - sc->tx_wd_timer = 0; - callout_reset(&sc->wd_callout, hz, cpsw_tick, sc); - sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; - sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + CPSW_GLOBAL_LOCK(sc); + if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 || + (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) { + sc->watchdog.timer = 0; /* Nothing to do. */ + } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { + sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ + } else if (cpsw_tx_dequeue(sc) > 0) { + sc->watchdog.timer = 0; /* We just did something. */ + } else { + /* There was something to do but it didn't get done. */ + ++sc->watchdog.timer; + if (sc->watchdog.timer > 2) { + sc->watchdog.timer = 0; + ++ifp->if_oerrors; + ++sc->watchdog.resets; + cpsw_tx_watchdog_full_reset(sc); + } + } + sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; + CPSW_GLOBAL_UNLOCK(sc); } +/* + * + * ALE support routines. + * + */ + static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { - cpsw_write_4(CPSW_ALE_TBLCTL, idx & 1023); - ale_entry[0] = cpsw_read_4(CPSW_ALE_TBLW0); - ale_entry[1] = cpsw_read_4(CPSW_ALE_TBLW1); - ale_entry[2] = cpsw_read_4(CPSW_ALE_TBLW2); + cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); + ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); + ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); + ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); } static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { - cpsw_write_4(CPSW_ALE_TBLW0, ale_entry[0]); - cpsw_write_4(CPSW_ALE_TBLW1, ale_entry[1]); - cpsw_write_4(CPSW_ALE_TBLW2, ale_entry[2]); - cpsw_write_4(CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); + cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); + cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); + cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); + cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); } static int -cpsw_ale_find_entry_by_mac(struct cpsw_softc *sc, uint8_t *mac) +cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; - for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { + + /* First two entries are link address and broadcast. */ + for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); - if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && - (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && - (((ale_entry[0] >>24) & 0xFF) == mac[2]) && - (((ale_entry[0] >>16) & 0xFF) == mac[3]) && - (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && - (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { - return (i); + if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */ + ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */ + ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; + cpsw_ale_write_entry(sc, i, ale_entry); } } return CPSW_MAX_ALE_ENTRIES; } static int -cpsw_ale_find_free_entry(struct cpsw_softc *sc) +cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac) { - int i; + int free_index = -1, matching_index = -1, i; uint32_t ale_entry[3]; + + /* Find a matching entry or a free entry. */ for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); + /* Entry Type[61:60] is 0 for free entry */ - if (((ale_entry[1] >> 28) & 3) == 0) { - return i; + if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) { + free_index = i; } - } - return CPSW_MAX_ALE_ENTRIES; -} - - -static int -cpsw_ale_uc_entry_set(struct cpsw_softc *sc, uint8_t port, uint8_t *mac) -{ - int i; - uint32_t ale_entry[3]; - if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) { - i = cpsw_ale_find_free_entry(sc); + if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && + (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && + (((ale_entry[0] >>24) & 0xFF) == mac[2]) && + (((ale_entry[0] >>16) & 0xFF) == mac[3]) && + (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && + (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { + matching_index = i; + break; + } } - if (i == CPSW_MAX_ALE_ENTRIES) - return (ENOMEM); - - /* Set MAC address */ - ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; - ale_entry[1] = mac[0] << 8 | mac[1]; - - /* Entry type[61:60] is addr entry(1) */ - ale_entry[1] |= 0x10 << 24; - - /* Set portmask [67:66] */ - ale_entry[2] = (port & 3) << 2; - - cpsw_ale_write_entry(sc, i, ale_entry); - - return 0; -} - -static int -cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac) -{ - int i; - uint32_t ale_entry[3]; - - if ((i = cpsw_ale_find_entry_by_mac(sc, mac)) == CPSW_MAX_ALE_ENTRIES) { - i = cpsw_ale_find_free_entry(sc); + if (matching_index < 0) { + if (free_index < 0) + return (ENOMEM); + i = free_index; } - if (i == CPSW_MAX_ALE_ENTRIES) - return (ENOMEM); - /* Set MAC address */ ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = mac[0] << 8 | mac[1]; @@ -1481,7 +1931,6 @@ cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac) return 0; } -#ifdef CPSW_DEBUG static void cpsw_ale_dump_table(struct cpsw_softc *sc) { int i; @@ -1504,5 +1953,232 @@ cpsw_ale_dump_table(struct cpsw_softc *sc) { printf("\n"); } } + printf("\n"); +} + +static int +cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge) +{ + uint8_t *mac; + uint32_t ale_entry[3]; + struct ifnet *ifp = sc->ifp; + struct ifmultiaddr *ifma; + int i; + + /* Route incoming packets for our MAC address to Port 0 (host). */ + /* For simplicity, keep this entry at table index 0 in the ALE. */ + if_addr_rlock(ifp); + mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr); + ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; + ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */ + ale_entry[2] = 0; /* port = 0 */ + cpsw_ale_write_entry(sc, 0, ale_entry); + + /* Set outgoing MAC Address for Ports 1 and 2. */ + for (i = 1; i < 3; ++i) { + cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i), + mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); + cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i), + mac[5] << 8 | mac[4]); + } + if_addr_runlock(ifp); + + /* Keep the broadcast address at table entry 1. */ + ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ + ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */ + ale_entry[2] = 0x0000001c; /* Forward to all ports */ + cpsw_ale_write_entry(sc, 1, ale_entry); + + /* SIOCDELMULTI doesn't specify the particular address + being removed, so we have to remove all and rebuild. */ + if (purge) + cpsw_ale_remove_all_mc_entries(sc); + + /* Set other multicast addrs desired. */ + if_maddr_rlock(ifp); + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + cpsw_ale_mc_entry_set(sc, 7, + LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); + } + if_maddr_runlock(ifp); + + return (0); +} + +/* + * + * Statistics and Sysctls. + * + */ + +#if 0 +static void +cpsw_stats_dump(struct cpsw_softc *sc) +{ + int i; + uint32_t r; + + for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { + r = cpsw_read_4(sc, CPSW_STATS_OFFSET + + cpsw_stat_sysctls[i].reg); + CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, + (intmax_t)sc->shadow_stats[i], r, + (intmax_t)sc->shadow_stats[i] + r)); + } } #endif + +static void +cpsw_stats_collect(struct cpsw_softc *sc) +{ + int i; + uint32_t r; + + CPSW_DEBUGF(("Controller shadow statistics updated.")); + + for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { + r = cpsw_read_4(sc, CPSW_STATS_OFFSET + + cpsw_stat_sysctls[i].reg); + sc->shadow_stats[i] += r; + cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); + } +} + +static int +cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct cpsw_softc *sc; + struct cpsw_stat *stat; + uint64_t result; + + sc = (struct cpsw_softc *)arg1; + stat = &cpsw_stat_sysctls[oidp->oid_number]; + result = sc->shadow_stats[oidp->oid_number]; + result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); + return (sysctl_handle_64(oidp, &result, 0, req)); +} + +static int +cpsw_stat_attached(SYSCTL_HANDLER_ARGS) +{ + struct cpsw_softc *sc; + struct bintime t; + unsigned result; + + sc = (struct cpsw_softc *)arg1; + getbinuptime(&t); + bintime_sub(&t, &sc->attach_uptime); + result = t.sec; + return (sysctl_handle_int(oidp, &result, 0, req)); +} + +static int +cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) +{ + struct cpsw_softc *sc; + struct bintime t; + unsigned result; + + sc = (struct cpsw_softc *)arg1; + if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { + getbinuptime(&t); + bintime_sub(&t, &sc->init_uptime); + result = t.sec; + } else + result = 0; + return (sysctl_handle_int(oidp, &result, 0, req)); +} + +static void +cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) +{ + struct sysctl_oid_list *parent; + + parent = SYSCTL_CHILDREN(node); + SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", + CTLFLAG_RD, &queue->queue_slots, 0, + "Total buffers currently assigned to this queue"); + SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", + CTLFLAG_RD, &queue->active_queue_len, 0, + "Buffers currently registered with hardware controller"); + SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", + CTLFLAG_RD, &queue->max_active_queue_len, 0, + "Max value of activeBuffers since last driver reset"); + SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", + CTLFLAG_RD, &queue->avail_queue_len, 0, + "Buffers allocated to this queue but not currently " + "registered with hardware controller"); + SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", + CTLFLAG_RD, &queue->max_avail_queue_len, 0, + "Max value of availBuffers since last driver reset"); + SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", + CTLFLAG_RD, &queue->queue_adds, 0, + "Total buffers added to queue"); + SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", + CTLFLAG_RD, &queue->queue_removes, 0, + "Total buffers removed from queue"); + SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", + CTLFLAG_RD, &queue->longest_chain, 0, + "Max buffers used for a single packet"); +} + +static void +cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) +{ + struct sysctl_oid_list *parent; + + parent = SYSCTL_CHILDREN(node); + SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", + CTLFLAG_RD, &sc->watchdog.resets, 0, + "Total number of watchdog resets"); +} + +static void +cpsw_add_sysctls(struct cpsw_softc *sc) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid *stats_node, *queue_node, *node; + struct sysctl_oid_list *parent, *stats_parent, *queue_parent; + int i; + + ctx = device_get_sysctl_ctx(sc->dev); + parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); + + SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", + CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", + "Time since driver attach"); + + SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime", + CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU", + "Seconds since driver init"); + + stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", + CTLFLAG_RD, NULL, "CPSW Statistics"); + stats_parent = SYSCTL_CHILDREN(stats_node); + for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { + SYSCTL_ADD_PROC(ctx, stats_parent, i, + cpsw_stat_sysctls[i].oid, + CTLTYPE_U64 | CTLFLAG_RD, sc, 0, + cpsw_stats_sysctl, "IU", + cpsw_stat_sysctls[i].oid); + } + + queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", + CTLFLAG_RD, NULL, "CPSW Queue Statistics"); + queue_parent = SYSCTL_CHILDREN(queue_node); + + node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", + CTLFLAG_RD, NULL, "TX Queue Statistics"); + cpsw_add_queue_sysctls(ctx, node, &sc->tx); + + node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", + CTLFLAG_RD, NULL, "RX Queue Statistics"); + cpsw_add_queue_sysctls(ctx, node, &sc->rx); + + node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", + CTLFLAG_RD, NULL, "Watchdog Statistics"); + cpsw_add_watchdog_sysctls(ctx, node, sc); +} + diff --git a/sys/arm/ti/cpsw/if_cpswreg.h b/sys/arm/ti/cpsw/if_cpswreg.h index dab4f36..46f8417 100644 --- a/sys/arm/ti/cpsw/if_cpswreg.h +++ b/sys/arm/ti/cpsw/if_cpswreg.h @@ -34,8 +34,11 @@ #define CPSW_SS_SOFT_RESET (CPSW_SS_OFFSET + 0x08) #define CPSW_SS_STAT_PORT_EN (CPSW_SS_OFFSET + 0x0C) #define CPSW_SS_PTYPE (CPSW_SS_OFFSET + 0x10) +#define CPSW_SS_FLOW_CONTROL (CPSW_SS_OFFSET + 0x24) #define CPSW_PORT_OFFSET 0x0100 +#define CPSW_PORT_P_MAX_BLKS(p) (CPSW_PORT_OFFSET + 0x08 + ((p) * 0x100)) +#define CPSW_PORT_P_BLK_CNT(p) (CPSW_PORT_OFFSET + 0x0C + ((p) * 0x100)) #define CPSW_PORT_P_TX_PRI_MAP(p) (CPSW_PORT_OFFSET + 0x118 + ((p-1) * 0x100)) #define CPSW_PORT_P0_CPDMA_TX_PRI_MAP (CPSW_PORT_OFFSET + 0x01C) #define CPSW_PORT_P0_CPDMA_RX_CH_MAP (CPSW_PORT_OFFSET + 0x020) @@ -84,10 +87,14 @@ #define CPSW_ALE_TBLW0 (CPSW_ALE_OFFSET + 0x3C) #define CPSW_ALE_PORTCTL(p) (CPSW_ALE_OFFSET + 0x40 + ((p) * 0x04)) +/* SL1 is at 0x0D80, SL2 is at 0x0DC0 */ #define CPSW_SL_OFFSET 0x0D80 #define CPSW_SL_MACCONTROL(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x04) +#define CPSW_SL_MACSTATUS(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x08) #define CPSW_SL_SOFT_RESET(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x0C) #define CPSW_SL_RX_MAXLEN(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x10) +#define CPSW_SL_RX_PAUSE(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x18) +#define CPSW_SL_TX_PAUSE(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x1C) #define CPSW_SL_RX_PRI_MAP(p) (CPSW_SL_OFFSET + (0x40 * (p)) + 0x24) #define MDIO_OFFSET 0x1000 @@ -109,5 +116,22 @@ #define CPSW_WR_C_MISC_STAT(p) (CPSW_WR_OFFSET + (0x10 * (p)) + 0x4C) #define CPSW_CPPI_RAM_OFFSET 0x2000 +#define CPSW_CPPI_RAM_SIZE 0x2000 + +#define CPDMA_BD_SOP (1<<15) +#define CPDMA_BD_EOP (1<<14) +#define CPDMA_BD_OWNER (1<<13) +#define CPDMA_BD_EOQ (1<<12) +#define CPDMA_BD_TDOWNCMPLT (1<<11) +#define CPDMA_BD_PKT_ERR_MASK (3<< 4) + +struct cpsw_cpdma_bd { + volatile uint32_t next; + volatile uint32_t bufptr; + volatile uint16_t buflen; + volatile uint16_t bufoff; + volatile uint16_t pktlen; + volatile uint16_t flags; +}; #endif /*_IF_CPSWREG_H */ diff --git a/sys/arm/ti/cpsw/if_cpswvar.h b/sys/arm/ti/cpsw/if_cpswvar.h index 35024c6..2aebb0f 100644 --- a/sys/arm/ti/cpsw/if_cpswvar.h +++ b/sys/arm/ti/cpsw/if_cpswvar.h @@ -35,105 +35,90 @@ #define CPSW_MIIBUS_RETRIES 5 #define CPSW_MIIBUS_DELAY 1000 -#define CPSW_MAX_TX_BUFFERS 128 -#define CPSW_MAX_RX_BUFFERS 128 #define CPSW_MAX_ALE_ENTRIES 1024 +#define CPSW_SYSCTL_COUNT 34 + struct cpsw_slot { + uint32_t bd_offset; /* Offset of corresponding BD within CPPI RAM. */ bus_dmamap_t dmamap; struct mbuf *mbuf; - int index; STAILQ_ENTRY(cpsw_slot) next; }; -STAILQ_HEAD(cpsw_queue, cpsw_slot); +STAILQ_HEAD(cpsw_slots, cpsw_slot); + +struct cpsw_queue { + struct mtx lock; + int running; + struct cpsw_slots active; + struct cpsw_slots avail; + uint32_t queue_adds; /* total bufs added */ + uint32_t queue_removes; /* total bufs removed */ + uint32_t queue_removes_at_last_tick; /* Used by watchdog */ + int queue_slots; + int active_queue_len; + int max_active_queue_len; + int avail_queue_len; + int max_avail_queue_len; + int longest_chain; /* Largest # segments in a single packet. */ + int hdp_offset; +}; struct cpsw_softc { struct ifnet *ifp; phandle_t node; device_t dev; + struct bintime attach_uptime; /* system uptime when attach happened. */ + struct bintime init_uptime; /* system uptime when init happened. */ + + /* TODO: We should set up a child structure for each port; + store mac, phy information, etc, in that structure. */ uint8_t mac_addr[ETHER_ADDR_LEN]; + device_t miibus; struct mii_data *mii; - struct mtx tx_lock; /* transmitter lock */ - struct mtx rx_lock; /* receiver lock */ - struct resource *res[1 + CPSW_INTR_COUNT]; /* resources */ - void *ih_cookie[CPSW_INTR_COUNT]; /* interrupt handlers cookies */ + /* We expect 1 memory resource and 4 interrupts from the device tree. */ + struct resource *res[1 + CPSW_INTR_COUNT]; + + /* Interrupts get recorded here as we initialize them. */ + /* Interrupt teardown just walks this list. */ + struct { + struct resource *res; + void *ih_cookie; + const char *description; + } interrupts[CPSW_INTR_COUNT]; + int interrupt_count; uint32_t cpsw_if_flags; int cpsw_media_status; - struct callout wd_callout; - int tx_wd_timer; + struct { + int resets; + int timer; + struct callout callout; + } watchdog; bus_dma_tag_t mbuf_dtag; - /* RX buffer tracking */ - int rx_running; - struct cpsw_queue rx_active; - struct cpsw_queue rx_avail; - struct cpsw_slot _rx_slots[CPSW_MAX_RX_BUFFERS]; - - /* TX buffer tracking. */ - int tx_running; - struct cpsw_queue tx_active; - struct cpsw_queue tx_avail; - struct cpsw_slot _tx_slots[CPSW_MAX_TX_BUFFERS]; - - /* Statistics */ - uint32_t tx_enqueues; /* total TX bufs added to queue */ - uint32_t tx_retires; /* total TX bufs removed from queue */ - uint32_t tx_retires_at_last_tick; /* used for watchdog */ - /* Note: tx_queued != tx_enqueues - tx_retires - At driver reset, packets can be discarded - from TX queue without being retired. */ - int tx_queued; /* Current bufs in TX queue */ - int tx_max_queued; + /* An mbuf full of nulls for TX padding. */ + bus_dmamap_t null_mbuf_dmamap; + struct mbuf *null_mbuf; + bus_addr_t null_mbuf_paddr; + + /* RX and TX buffer tracking */ + struct cpsw_queue rx, tx; + + /* 64-bit versions of 32-bit hardware statistics counters */ + uint64_t shadow_stats[CPSW_SYSCTL_COUNT]; + + /* CPPI STATERAM has 512 slots for building TX/RX queues. */ + /* TODO: Size here supposedly varies with different versions + of the controller. Check DaVinci specs and find a good + way to adjust this. One option is to have a separate + Device Tree parameter for number slots; another option + is to calculate it from the memory size in the device tree. */ + struct cpsw_slot _slots[CPSW_CPPI_RAM_SIZE / sizeof(struct cpsw_cpdma_bd)]; + struct cpsw_slots avail; }; -#define CPDMA_BD_SOP (1<<15) -#define CPDMA_BD_EOP (1<<14) -#define CPDMA_BD_OWNER (1<<13) -#define CPDMA_BD_EOQ (1<<12) -#define CPDMA_BD_TDOWNCMPLT (1<<11) -#define CPDMA_BD_PKT_ERR_MASK (3<< 4) - -struct cpsw_cpdma_bd { - volatile uint32_t next; - volatile uint32_t bufptr; - volatile uint16_t buflen; - volatile uint16_t bufoff; - volatile uint16_t pktlen; - volatile uint16_t flags; -}; - -/* Read/Write macros */ -#define cpsw_read_4(reg) bus_read_4(sc->res[0], reg) -#define cpsw_write_4(reg, val) bus_write_4(sc->res[0], reg, val) - -#define cpsw_cpdma_txbd_offset(i) \ - (CPSW_CPPI_RAM_OFFSET + ((i)*16)) -#define cpsw_cpdma_txbd_paddr(i) (cpsw_cpdma_txbd_offset(i) + \ - vtophys(rman_get_start(sc->res[0]))) -#define cpsw_cpdma_read_txbd(i, val) \ - bus_read_region_4(sc->res[0], cpsw_cpdma_txbd_offset(i), (uint32_t *) val, 4) -#define cpsw_cpdma_write_txbd(i, val) \ - bus_write_region_4(sc->res[0], cpsw_cpdma_txbd_offset(i), (uint32_t *) val, 4) -#define cpsw_cpdma_write_txbd_next(i, val) \ - bus_write_4(sc->res[0], cpsw_cpdma_txbd_offset(i), val) -#define cpsw_cpdma_read_txbd_flags(i) \ - bus_read_2(sc->res[0], cpsw_cpdma_txbd_offset(i)+14) - -#define cpsw_cpdma_rxbd_offset(i) \ - (CPSW_CPPI_RAM_OFFSET + ((CPSW_MAX_TX_BUFFERS + (i))*16)) -#define cpsw_cpdma_rxbd_paddr(i) (cpsw_cpdma_rxbd_offset(i) + \ - vtophys(rman_get_start(sc->res[0]))) -#define cpsw_cpdma_read_rxbd(i, val) \ - bus_read_region_4(sc->res[0], cpsw_cpdma_rxbd_offset(i), (uint32_t *) val, 4) -#define cpsw_cpdma_write_rxbd(i, val) \ - bus_write_region_4(sc->res[0], cpsw_cpdma_rxbd_offset(i), (uint32_t *) val, 4) -#define cpsw_cpdma_write_rxbd_next(i, val) \ - bus_write_4(sc->res[0], cpsw_cpdma_rxbd_offset(i), val) -#define cpsw_cpdma_read_rxbd_flags(i) \ - bus_read_2(sc->res[0], cpsw_cpdma_rxbd_offset(i)+14) - #endif /*_IF_CPSWVAR_H */ |