diff options
Diffstat (limited to 'drivers/net/ethernet/cadence/macb.c')
-rw-r--r-- | drivers/net/ethernet/cadence/macb.c | 564 |
1 files changed, 391 insertions, 173 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 033064b..a9b0830 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -14,8 +14,10 @@ #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> +#include <linux/circ_buf.h> #include <linux/slab.h> #include <linux/init.h> +#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -26,37 +28,74 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_net.h> +#include <linux/pinctrl/consumer.h> #include "macb.h" #define RX_BUFFER_SIZE 128 -#define RX_RING_SIZE 512 -#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) +#define RX_RING_SIZE 512 /* must be power of 2 */ +#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) -/* Make the IP header word-aligned (the ethernet header is 14 bytes) */ -#define RX_OFFSET 2 +#define TX_RING_SIZE 128 /* must be power of 2 */ +#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) -#define TX_RING_SIZE 128 -#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) -#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) +/* level of occupied TX descriptors under which we wake up TX process */ +#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) -#define TX_RING_GAP(bp) \ - (TX_RING_SIZE - (bp)->tx_pending) -#define TX_BUFFS_AVAIL(bp) \ - (((bp)->tx_tail <= (bp)->tx_head) ? \ - (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ - (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) -#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ + | MACB_BIT(ISR_ROVR)) +#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ + | MACB_BIT(ISR_RLE) \ + | MACB_BIT(TXERR)) +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) -#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) +/* + * Graceful stop timeouts in us. We should allow up to + * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) + */ +#define MACB_HALT_TIMEOUT 1230 -/* minimum number of free TX descriptors before waking up TX process */ -#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) +/* Ring buffer accessors */ +static unsigned int macb_tx_ring_wrap(unsigned int index) +{ + return index & (TX_RING_SIZE - 1); +} -#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ - | MACB_BIT(ISR_ROVR)) +static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index) +{ + return &bp->tx_ring[macb_tx_ring_wrap(index)]; +} -static void __macb_set_hwaddr(struct macb *bp) +static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index) +{ + return &bp->tx_skb[macb_tx_ring_wrap(index)]; +} + +static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index) +{ + dma_addr_t offset; + + offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); + + return bp->tx_ring_dma + offset; +} + +static unsigned int macb_rx_ring_wrap(unsigned int index) +{ + return index & (RX_RING_SIZE - 1); +} + +static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) +{ + return &bp->rx_ring[macb_rx_ring_wrap(index)]; +} + +static void *macb_rx_buffer(struct macb *bp, unsigned int index) +{ + return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index); +} + +void macb_set_hwaddr(struct macb *bp) { u32 bottom; u16 top; @@ -65,31 +104,58 @@ static void __macb_set_hwaddr(struct macb *bp) macb_or_gem_writel(bp, SA1B, bottom); top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); macb_or_gem_writel(bp, SA1T, top); + + /* Clear unused address register sets */ + macb_or_gem_writel(bp, SA2B, 0); + macb_or_gem_writel(bp, SA2T, 0); + macb_or_gem_writel(bp, SA3B, 0); + macb_or_gem_writel(bp, SA3T, 0); + macb_or_gem_writel(bp, SA4B, 0); + macb_or_gem_writel(bp, SA4T, 0); } +EXPORT_SYMBOL_GPL(macb_set_hwaddr); -static void __init macb_get_hwaddr(struct macb *bp) +void macb_get_hwaddr(struct macb *bp) { + struct macb_platform_data *pdata; u32 bottom; u16 top; u8 addr[6]; + int i; - bottom = macb_or_gem_readl(bp, SA1B); - top = macb_or_gem_readl(bp, SA1T); + pdata = bp->pdev->dev.platform_data; - addr[0] = bottom & 0xff; - addr[1] = (bottom >> 8) & 0xff; - addr[2] = (bottom >> 16) & 0xff; - addr[3] = (bottom >> 24) & 0xff; - addr[4] = top & 0xff; - addr[5] = (top >> 8) & 0xff; + /* Check all 4 address register for vaild address */ + for (i = 0; i < 4; i++) { + bottom = macb_or_gem_readl(bp, SA1B + i * 8); + top = macb_or_gem_readl(bp, SA1T + i * 8); + + if (pdata && pdata->rev_eth_addr) { + addr[5] = bottom & 0xff; + addr[4] = (bottom >> 8) & 0xff; + addr[3] = (bottom >> 16) & 0xff; + addr[2] = (bottom >> 24) & 0xff; + addr[1] = top & 0xff; + addr[0] = (top & 0xff00) >> 8; + } else { + addr[0] = bottom & 0xff; + addr[1] = (bottom >> 8) & 0xff; + addr[2] = (bottom >> 16) & 0xff; + addr[3] = (bottom >> 24) & 0xff; + addr[4] = top & 0xff; + addr[5] = (top >> 8) & 0xff; + } - if (is_valid_ether_addr(addr)) { - memcpy(bp->dev->dev_addr, addr, sizeof(addr)); - } else { - netdev_info(bp->dev, "invalid hw address, using random\n"); - eth_hw_addr_random(bp->dev); + if (is_valid_ether_addr(addr)) { + memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + return; + } } + + netdev_info(bp->dev, "invalid hw address, using random\n"); + eth_hw_addr_random(bp->dev); } +EXPORT_SYMBOL_GPL(macb_get_hwaddr); static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { @@ -152,13 +218,17 @@ static void macb_handle_link_change(struct net_device *dev) reg = macb_readl(bp, NCFGR); reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); + if (macb_is_gem(bp)) + reg &= ~GEM_BIT(GBE); if (phydev->duplex) reg |= MACB_BIT(FD); if (phydev->speed == SPEED_100) reg |= MACB_BIT(SPD); + if (phydev->speed == SPEED_1000) + reg |= GEM_BIT(GBE); - macb_writel(bp, NCFGR, reg); + macb_or_gem_writel(bp, NCFGR, reg); bp->speed = phydev->speed; bp->duplex = phydev->duplex; @@ -196,7 +266,9 @@ static void macb_handle_link_change(struct net_device *dev) static int macb_mii_probe(struct net_device *dev) { struct macb *bp = netdev_priv(dev); + struct macb_platform_data *pdata; struct phy_device *phydev; + int phy_irq; int ret; phydev = phy_find_first(bp->mii_bus); @@ -205,7 +277,14 @@ static int macb_mii_probe(struct net_device *dev) return -1; } - /* TODO : add pin_irq */ + pdata = dev_get_platdata(&bp->pdev->dev); + if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { + ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); + if (!ret) { + phy_irq = gpio_to_irq(pdata->phy_irq_pin); + phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + } + } /* attach the mac to the phy */ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, @@ -216,7 +295,10 @@ static int macb_mii_probe(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + if (macb_is_gem(bp)) + phydev->supported &= PHY_GBIT_FEATURES; + else + phydev->supported &= PHY_BASIC_FEATURES; phydev->advertising = phydev->supported; @@ -228,7 +310,7 @@ static int macb_mii_probe(struct net_device *dev) return 0; } -static int macb_mii_init(struct macb *bp) +int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; int err = -ENXIO, i; @@ -284,6 +366,7 @@ err_out_free_mdiobus: err_out: return err; } +EXPORT_SYMBOL_GPL(macb_mii_init); static void macb_update_stats(struct macb *bp) { @@ -297,93 +380,148 @@ static void macb_update_stats(struct macb *bp) *p += __raw_readl(reg); } -static void macb_tx(struct macb *bp) +static int macb_halt_tx(struct macb *bp) { - unsigned int tail; - unsigned int head; - u32 status; + unsigned long halt_time, timeout; + u32 status; - status = macb_readl(bp, TSR); - macb_writel(bp, TSR, status); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); - netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); + timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); + do { + halt_time = jiffies; + status = macb_readl(bp, TSR); + if (!(status & MACB_BIT(TGO))) + return 0; - if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { - int i; - netdev_err(bp->dev, "TX %s, resetting buffers\n", - status & MACB_BIT(UND) ? - "underrun" : "retry limit exceeded"); + usleep_range(10, 250); + } while (time_before(halt_time, timeout)); - /* Transfer ongoing, disable transmitter, to avoid confusion */ - if (status & MACB_BIT(TGO)) - macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); + return -ETIMEDOUT; +} - head = bp->tx_head; +static void macb_tx_error_task(struct work_struct *work) +{ + struct macb *bp = container_of(work, struct macb, tx_error_task); + struct macb_tx_skb *tx_skb; + struct sk_buff *skb; + unsigned int tail; - /*Mark all the buffer as used to avoid sending a lost buffer*/ - for (i = 0; i < TX_RING_SIZE; i++) - bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); + netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", + bp->tx_tail, bp->tx_head); - /* Add wrap bit */ - bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + /* Make sure nobody is trying to queue up new packets */ + netif_stop_queue(bp->dev); - /* free transmit buffer in upper layer*/ - for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { - struct ring_info *rp = &bp->tx_skb[tail]; - struct sk_buff *skb = rp->skb; + /* + * Stop transmission now + * (in case we have just queued new packets) + */ + if (macb_halt_tx(bp)) + /* Just complain for now, reinitializing TX path can be good */ + netdev_err(bp->dev, "BUG: halt tx timed out\n"); - BUG_ON(skb == NULL); + /* No need for the lock here as nobody will interrupt us anymore */ - rmb(); + /* + * Treat frames in TX queue including the ones that caused the error. + * Free transmit buffers in upper layer. + */ + for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { + struct macb_dma_desc *desc; + u32 ctrl; + + desc = macb_tx_desc(bp, tail); + ctrl = desc->ctrl; + tx_skb = macb_tx_skb(bp, tail); + skb = tx_skb->skb; + + if (ctrl & MACB_BIT(TX_USED)) { + netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", + macb_tx_ring_wrap(tail), skb->data); + bp->stats.tx_packets++; + bp->stats.tx_bytes += skb->len; + } else { + /* + * "Buffers exhausted mid-frame" errors may only happen + * if the driver is buggy, so complain loudly about those. + * Statistics are updated by hardware. + */ + if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) + netdev_err(bp->dev, + "BUG: TX buffers exhausted mid-frame\n"); - dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, - DMA_TO_DEVICE); - rp->skb = NULL; - dev_kfree_skb_irq(skb); + desc->ctrl = ctrl | MACB_BIT(TX_USED); } - bp->tx_head = bp->tx_tail = 0; - - /* Enable the transmitter again */ - if (status & MACB_BIT(TGO)) - macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); + dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, + DMA_TO_DEVICE); + tx_skb->skb = NULL; + dev_kfree_skb(skb); } - if (!(status & MACB_BIT(COMP))) - /* - * This may happen when a buffer becomes complete - * between reading the ISR and scanning the - * descriptors. Nothing to worry about. - */ - return; + /* Make descriptor updates visible to hardware */ + wmb(); + + /* Reinitialize the TX desc queue */ + macb_writel(bp, TBQP, bp->tx_ring_dma); + /* Make TX ring reflect state of hardware */ + bp->tx_head = bp->tx_tail = 0; + + /* Now we are ready to start transmission again */ + netif_wake_queue(bp->dev); + + /* Housework before enabling TX IRQ */ + macb_writel(bp, TSR, macb_readl(bp, TSR)); + macb_writel(bp, IER, MACB_TX_INT_FLAGS); +} + +static void macb_tx_interrupt(struct macb *bp) +{ + unsigned int tail; + unsigned int head; + u32 status; + + status = macb_readl(bp, TSR); + macb_writel(bp, TSR, status); + + netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", + (unsigned long)status); head = bp->tx_head; - for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { - struct ring_info *rp = &bp->tx_skb[tail]; - struct sk_buff *skb = rp->skb; - u32 bufstat; + for (tail = bp->tx_tail; tail != head; tail++) { + struct macb_tx_skb *tx_skb; + struct sk_buff *skb; + struct macb_dma_desc *desc; + u32 ctrl; - BUG_ON(skb == NULL); + desc = macb_tx_desc(bp, tail); + /* Make hw descriptor updates visible to CPU */ rmb(); - bufstat = bp->tx_ring[tail].ctrl; - if (!(bufstat & MACB_BIT(TX_USED))) + ctrl = desc->ctrl; + + if (!(ctrl & MACB_BIT(TX_USED))) break; - netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", - tail, skb->data); - dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, + tx_skb = macb_tx_skb(bp, tail); + skb = tx_skb->skb; + + netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", + macb_tx_ring_wrap(tail), skb->data); + dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, DMA_TO_DEVICE); bp->stats.tx_packets++; bp->stats.tx_bytes += skb->len; - rp->skb = NULL; + tx_skb->skb = NULL; dev_kfree_skb_irq(skb); } bp->tx_tail = tail; - if (netif_queue_stopped(bp->dev) && - TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) + if (netif_queue_stopped(bp->dev) + && CIRC_CNT(bp->tx_head, bp->tx_tail, + TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) netif_wake_queue(bp->dev); } @@ -392,31 +530,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, { unsigned int len; unsigned int frag; - unsigned int offset = 0; + unsigned int offset; struct sk_buff *skb; + struct macb_dma_desc *desc; - len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); + desc = macb_rx_desc(bp, last_frag); + len = MACB_BFEXT(RX_FRMLEN, desc->ctrl); - netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", - first_frag, last_frag, len); + netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", + macb_rx_ring_wrap(first_frag), + macb_rx_ring_wrap(last_frag), len); - skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); + /* + * The ethernet header starts NET_IP_ALIGN bytes into the + * first buffer. Since the header is 14 bytes, this makes the + * payload word-aligned. + * + * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy + * the two padding bytes into the skb so that we avoid hitting + * the slowpath in memcpy(), and pull them off afterwards. + */ + skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); if (!skb) { bp->stats.rx_dropped++; - for (frag = first_frag; ; frag = NEXT_RX(frag)) { - bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); + for (frag = first_frag; ; frag++) { + desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); if (frag == last_frag) break; } + + /* Make descriptor updates visible to hardware */ wmb(); + return 1; } - skb_reserve(skb, RX_OFFSET); + offset = 0; + len += NET_IP_ALIGN; skb_checksum_none_assert(skb); skb_put(skb, len); - for (frag = first_frag; ; frag = NEXT_RX(frag)) { + for (frag = first_frag; ; frag++) { unsigned int frag_len = RX_BUFFER_SIZE; if (offset + frag_len > len) { @@ -424,22 +579,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, frag_len = len - offset; } skb_copy_to_linear_data_offset(skb, offset, - (bp->rx_buffers + - (RX_BUFFER_SIZE * frag)), - frag_len); + macb_rx_buffer(bp, frag), frag_len); offset += RX_BUFFER_SIZE; - bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); - wmb(); + desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); if (frag == last_frag) break; } + /* Make descriptor updates visible to hardware */ + wmb(); + + __skb_pull(skb, NET_IP_ALIGN); skb->protocol = eth_type_trans(skb, bp->dev); bp->stats.rx_packets++; - bp->stats.rx_bytes += len; - netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", + bp->stats.rx_bytes += skb->len; + netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", skb->len, skb->csum); netif_receive_skb(skb); @@ -452,8 +609,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, { unsigned int frag; - for (frag = begin; frag != end; frag = NEXT_RX(frag)) - bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); + for (frag = begin; frag != end; frag++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + } + + /* Make descriptor updates visible to hardware */ wmb(); /* @@ -466,15 +627,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, static int macb_rx(struct macb *bp, int budget) { int received = 0; - unsigned int tail = bp->rx_tail; + unsigned int tail; int first_frag = -1; - for (; budget > 0; tail = NEXT_RX(tail)) { + for (tail = bp->rx_tail; budget > 0; tail++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, tail); u32 addr, ctrl; + /* Make hw descriptor updates visible to CPU */ rmb(); - addr = bp->rx_ring[tail].addr; - ctrl = bp->rx_ring[tail].ctrl; + + addr = desc->addr; + ctrl = desc->ctrl; if (!(addr & MACB_BIT(RX_USED))) break; @@ -517,7 +681,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = 0; - netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", + netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); work_done = macb_rx(bp, budget); @@ -552,10 +716,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) while (status) { /* close possible race with dev_close */ if (unlikely(!netif_running(dev))) { - macb_writel(bp, IDR, ~0UL); + macb_writel(bp, IDR, -1); break; } + netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status); + if (status & MACB_RX_INT_FLAGS) { /* * There's no point taking any more interrupts @@ -567,14 +733,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) macb_writel(bp, IDR, MACB_RX_INT_FLAGS); if (napi_schedule_prep(&bp->napi)) { - netdev_dbg(bp->dev, "scheduling RX softirq\n"); + netdev_vdbg(bp->dev, "scheduling RX softirq\n"); __napi_schedule(&bp->napi); } } - if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE))) - macb_tx(bp); + if (unlikely(status & (MACB_TX_ERR_FLAGS))) { + macb_writel(bp, IDR, MACB_TX_INT_FLAGS); + schedule_work(&bp->tx_error_task); + break; + } + + if (status & MACB_BIT(TCOMP)) + macb_tx_interrupt(bp); /* * Link change detection isn't possible with RMII, so we'll @@ -626,11 +797,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) struct macb *bp = netdev_priv(dev); dma_addr_t mapping; unsigned int len, entry; + struct macb_dma_desc *desc; + struct macb_tx_skb *tx_skb; u32 ctrl; unsigned long flags; -#ifdef DEBUG - netdev_dbg(bp->dev, +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + netdev_vdbg(bp->dev, "start_xmit: len %u head %p data %p tail %p end %p\n", skb->len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb)); @@ -642,7 +815,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ - if (TX_BUFFS_AVAIL(bp) < 1) { + if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) { netif_stop_queue(dev); spin_unlock_irqrestore(&bp->lock, flags); netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); @@ -651,13 +824,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - entry = bp->tx_head; - netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); + entry = macb_tx_ring_wrap(bp->tx_head); + bp->tx_head++; + netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); mapping = dma_map_single(&bp->pdev->dev, skb->data, len, DMA_TO_DEVICE); - bp->tx_skb[entry].skb = skb; - bp->tx_skb[entry].mapping = mapping; - netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", + + tx_skb = &bp->tx_skb[entry]; + tx_skb->skb = skb; + tx_skb->mapping = mapping; + netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", skb->data, (unsigned long)mapping); ctrl = MACB_BF(TX_FRMLEN, len); @@ -665,18 +841,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) if (entry == (TX_RING_SIZE - 1)) ctrl |= MACB_BIT(TX_WRAP); - bp->tx_ring[entry].addr = mapping; - bp->tx_ring[entry].ctrl = ctrl; - wmb(); + desc = &bp->tx_ring[entry]; + desc->addr = mapping; + desc->ctrl = ctrl; - entry = NEXT_TX(entry); - bp->tx_head = entry; + /* Make newly initialized descriptor visible to hardware */ + wmb(); skb_tx_timestamp(skb); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); - if (TX_BUFFS_AVAIL(bp) < 1) + if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) netif_stop_queue(dev); spin_unlock_irqrestore(&bp->lock, flags); @@ -712,7 +888,7 @@ static int macb_alloc_consistent(struct macb *bp) { int size; - size = TX_RING_SIZE * sizeof(struct ring_info); + size = TX_RING_SIZE * sizeof(struct macb_tx_skb); bp->tx_skb = kmalloc(size, GFP_KERNEL); if (!bp->tx_skb) goto out_err; @@ -775,9 +951,6 @@ static void macb_init_rings(struct macb *bp) static void macb_reset_hw(struct macb *bp) { - /* Make sure we have the write buffer for ourselves */ - wmb(); - /* * Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) @@ -788,11 +961,11 @@ static void macb_reset_hw(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); /* Clear all status flags */ - macb_writel(bp, TSR, ~0UL); - macb_writel(bp, RSR, ~0UL); + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); /* Disable all interrupts */ - macb_writel(bp, IDR, ~0UL); + macb_writel(bp, IDR, -1); macb_readl(bp, ISR); } @@ -860,8 +1033,12 @@ static u32 macb_dbw(struct macb *bp) } /* - * Configure the receive DMA engine to use the correct receive buffer size. - * This is a configurable parameter for GEM. + * Configure the receive DMA engine + * - use the correct receive buffer size + * - set the possibility to use INCR16 bursts + * (if not supported by FIFO, it will fallback to default) + * - set both rx/tx packet buffers to full memory size + * These are configurable parameters for GEM. */ static void macb_configure_dma(struct macb *bp) { @@ -870,6 +1047,8 @@ static void macb_configure_dma(struct macb *bp) if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); + dmacfg |= GEM_BF(FBLDO, 16); + dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); gem_writel(bp, DMACFG, dmacfg); } } @@ -879,9 +1058,10 @@ static void macb_init_hw(struct macb *bp) u32 config; macb_reset_hw(bp); - __macb_set_hwaddr(bp); + macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); + config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ config |= MACB_BIT(PAE); /* PAuse Enable */ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ config |= MACB_BIT(BIG); /* Receive oversized frames */ @@ -891,6 +1071,8 @@ static void macb_init_hw(struct macb *bp) config |= MACB_BIT(NBC); /* No BroadCast */ config |= macb_dbw(bp); macb_writel(bp, NCFGR, config); + bp->speed = SPEED_10; + bp->duplex = DUPLEX_HALF; macb_configure_dma(bp); @@ -902,13 +1084,8 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); /* Enable interrupts */ - macb_writel(bp, IER, (MACB_BIT(RCOMP) - | MACB_BIT(RXUBR) - | MACB_BIT(ISR_TUND) - | MACB_BIT(ISR_RLE) - | MACB_BIT(TXERR) - | MACB_BIT(TCOMP) - | MACB_BIT(ISR_ROVR) + macb_writel(bp, IER, (MACB_RX_INT_FLAGS + | MACB_TX_INT_FLAGS | MACB_BIT(HRESP))); } @@ -996,7 +1173,7 @@ static void macb_sethashtable(struct net_device *dev) /* * Enable/Disable promiscuous and multicast modes. */ -static void macb_set_rx_mode(struct net_device *dev) +void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; struct macb *bp = netdev_priv(dev); @@ -1028,6 +1205,7 @@ static void macb_set_rx_mode(struct net_device *dev) macb_writel(bp, NCFGR, cfg); } +EXPORT_SYMBOL_GPL(macb_set_rx_mode); static int macb_open(struct net_device *dev) { @@ -1043,9 +1221,6 @@ static int macb_open(struct net_device *dev) if (!bp->phy_dev) return -EAGAIN; - if (!is_valid_ether_addr(dev->dev_addr)) - return -EADDRNOTAVAIL; - err = macb_alloc_consistent(bp); if (err) { netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", @@ -1135,7 +1310,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) return nstat; } -static struct net_device_stats *macb_get_stats(struct net_device *dev) +struct net_device_stats *macb_get_stats(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct net_device_stats *nstat = &bp->stats; @@ -1181,6 +1356,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return nstat; } +EXPORT_SYMBOL_GPL(macb_get_stats); static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1204,25 +1380,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return phy_ethtool_sset(phydev, cmd); } -static void macb_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) +static int macb_get_regs_len(struct net_device *netdev) +{ + return MACB_GREGS_NBR * sizeof(u32); +} + +static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) { struct macb *bp = netdev_priv(dev); + unsigned int tail, head; + u32 *regs_buff = p; + + regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) + | MACB_GREGS_VERSION; + + tail = macb_tx_ring_wrap(bp->tx_tail); + head = macb_tx_ring_wrap(bp->tx_head); + + regs_buff[0] = macb_readl(bp, NCR); + regs_buff[1] = macb_or_gem_readl(bp, NCFGR); + regs_buff[2] = macb_readl(bp, NSR); + regs_buff[3] = macb_readl(bp, TSR); + regs_buff[4] = macb_readl(bp, RBQP); + regs_buff[5] = macb_readl(bp, TBQP); + regs_buff[6] = macb_readl(bp, RSR); + regs_buff[7] = macb_readl(bp, IMR); - strcpy(info->driver, bp->pdev->dev.driver->name); - strcpy(info->version, "$Revision: 1.14 $"); - strcpy(info->bus_info, dev_name(&bp->pdev->dev)); + regs_buff[8] = tail; + regs_buff[9] = head; + regs_buff[10] = macb_tx_dma(bp, tail); + regs_buff[11] = macb_tx_dma(bp, head); + + if (macb_is_gem(bp)) { + regs_buff[12] = gem_readl(bp, USRIO); + regs_buff[13] = gem_readl(bp, DMACFG); + } } -static const struct ethtool_ops macb_ethtool_ops = { +const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, - .get_drvinfo = macb_get_drvinfo, + .get_regs_len = macb_get_regs_len, + .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, }; +EXPORT_SYMBOL_GPL(macb_ethtool_ops); -static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; @@ -1235,6 +1441,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(phydev, rq, cmd); } +EXPORT_SYMBOL_GPL(macb_ioctl); static const struct net_device_ops macb_netdev_ops = { .ndo_open = macb_open, @@ -1263,7 +1470,7 @@ static const struct of_device_id macb_dt_ids[] = { MODULE_DEVICE_TABLE(of, macb_dt_ids); -static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) +static int macb_get_phy_mode_dt(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1273,7 +1480,7 @@ static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) return -ENODEV; } -static int __devinit macb_get_hwaddr_dt(struct macb *bp) +static int macb_get_hwaddr_dt(struct macb *bp) { struct device_node *np = bp->pdev->dev.of_node; if (np) { @@ -1287,11 +1494,11 @@ static int __devinit macb_get_hwaddr_dt(struct macb *bp) return -ENODEV; } #else -static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) +static int macb_get_phy_mode_dt(struct platform_device *pdev) { return -ENODEV; } -static int __devinit macb_get_hwaddr_dt(struct macb *bp) +static int macb_get_hwaddr_dt(struct macb *bp) { return -ENODEV; } @@ -1306,6 +1513,7 @@ static int __init macb_probe(struct platform_device *pdev) struct phy_device *phydev; u32 config; int err = -ENXIO; + struct pinctrl *pinctrl; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { @@ -1313,6 +1521,15 @@ static int __init macb_probe(struct platform_device *pdev) goto err_out; } + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) { + err = PTR_ERR(pinctrl); + if (err == -EPROBE_DEFER) + goto err_out; + + dev_warn(&pdev->dev, "No pinctrl provided\n"); + } + err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) @@ -1328,6 +1545,7 @@ static int __init macb_probe(struct platform_device *pdev) bp->dev = dev; spin_lock_init(&bp->lock); + INIT_WORK(&bp->tx_error_task, macb_tx_error_task); bp->pclk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(bp->pclk)) { @@ -1384,7 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev) bp->phy_interface = err; } - if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII)); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) #if defined(CONFIG_ARCH_AT91) macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN))); @@ -1398,8 +1618,6 @@ static int __init macb_probe(struct platform_device *pdev) macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); #endif - bp->tx_pending = DEF_TX_RING_PENDING; - err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |