diff options
Diffstat (limited to 'drivers/net')
62 files changed, 679 insertions, 385 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 059c741..3fe45c7 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2177,10 +2177,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; vp->tx_ring[entry].frag[i+1].addr = - cpu_to_le32(pci_map_single( - VORTEX_PCI(vp), - (void *)skb_frag_address(frag), - skb_frag_size(frag), PCI_DMA_TODEVICE)); + cpu_to_le32(skb_frag_dma_map( + &VORTEX_PCI(vp)->dev, + frag, + frag->page_offset, frag->size, DMA_TO_DEVICE)); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 23578df..3005155 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth) GRETH_REGORIN(greth->regs->control, GRETH_TXEN); } +static inline void greth_enable_tx_and_irq(struct greth_private *greth) +{ + wmb(); /* BDs must been written to memory before enabling TX */ + GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); +} + static inline void greth_disable_tx(struct greth_private *greth) { GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); @@ -447,29 +453,30 @@ out: return err; } +static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) +{ + if (tx_next < tx_last) + return (tx_last - tx_next) - 1; + else + return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; +} static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; - u32 status = 0, dma_addr, ctrl; + u32 status, dma_addr; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; unsigned long flags; + u16 tx_last; nr_frags = skb_shinfo(skb)->nr_frags; + tx_last = greth->tx_last; + rmb(); /* tx_last is updated by the poll task */ - /* Clean TX Ring */ - greth_clean_tx_gbit(dev); - - if (greth->tx_free < nr_frags + 1) { - spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ - ctrl = GRETH_REGLOAD(greth->regs->control); - /* Enable TX IRQ only if not already in poll() routine */ - if (ctrl & GRETH_RXI) - GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); + if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { netif_stop_queue(dev); - spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } @@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; + else + status = GRETH_BD_IE; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; @@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* Enable the descriptor chain by enabling the first descriptor */ bdp = greth->tx_bd_base + greth->tx_next; - greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); - greth->tx_next = curr_tx; - greth->tx_free -= nr_frags + 1; - - wmb(); + greth_write_bd(&bdp->stat, + greth_read_bd(&bdp->stat) | GRETH_BD_EN); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ - greth_enable_tx(greth); + greth->tx_next = curr_tx; + greth_enable_tx_and_irq(greth); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; @@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev) if (greth->tx_free > 0) { netif_wake_queue(dev); } - } static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) @@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev) { struct greth_private *greth; struct greth_bd *bdp, *bdp_last_frag; - struct sk_buff *skb; + struct sk_buff *skb = NULL; u32 stat; int nr_frags, i; + u16 tx_last; greth = netdev_priv(dev); + tx_last = greth->tx_last; - while (greth->tx_free < GRETH_TXBD_NUM) { + while (tx_last != greth->tx_next) { - skb = greth->tx_skbuff[greth->tx_last]; + skb = greth->tx_skbuff[tx_last]; nr_frags = skb_shinfo(skb)->nr_frags; /* We only clean fully completed SKBs */ - bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); + bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); mb(); @@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev) if (stat & GRETH_BD_EN) break; - greth->tx_skbuff[greth->tx_last] = NULL; + greth->tx_skbuff[tx_last] = NULL; greth_update_tx_stats(dev, stat); dev->stats.tx_bytes += skb->len; - bdp = greth->tx_bd_base + greth->tx_last; + bdp = greth->tx_bd_base + tx_last; - greth->tx_last = NEXT_TX(greth->tx_last); + tx_last = NEXT_TX(tx_last); dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), @@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev) for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - bdp = greth->tx_bd_base + greth->tx_last; + bdp = greth->tx_bd_base + tx_last; dma_unmap_page(greth->dev, greth_read_bd(&bdp->addr), skb_frag_size(frag), DMA_TO_DEVICE); - greth->tx_last = NEXT_TX(greth->tx_last); + tx_last = NEXT_TX(tx_last); } - greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } + if (skb) { /* skb is set only if the above while loop was entered */ + wmb(); + greth->tx_last = tx_last; - if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) - netif_wake_queue(dev); + if (netif_queue_stopped(dev) && + (greth_num_free_bds(tx_last, greth->tx_next) > + (MAX_SKB_FRAGS+1))) + netif_wake_queue(dev); + } } static int greth_rx(struct net_device *dev, int limit) @@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget) greth = container_of(napi, struct greth_private, napi); restart_txrx_poll: - if (netif_queue_stopped(greth->netdev)) { - if (greth->gbit_mac) - greth_clean_tx_gbit(greth->netdev); - else - greth_clean_tx(greth->netdev); - } - if (greth->gbit_mac) { + greth_clean_tx_gbit(greth->netdev); work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { + if (netif_queue_stopped(greth->netdev)) + greth_clean_tx(greth->netdev); work_done += greth_rx(greth->netdev, budget - work_done); } @@ -983,7 +992,8 @@ restart_txrx_poll: spin_lock_irqsave(&greth->devlock, flags); ctrl = GRETH_REGLOAD(greth->regs->control); - if (netif_queue_stopped(greth->netdev)) { + if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || + (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) { GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI | GRETH_RXI); mask = GRETH_INT_RX | GRETH_INT_RE | diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 232a622..ae16ac9 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -107,7 +107,7 @@ struct greth_private { u16 tx_next; u16 tx_last; - u16 tx_free; + u16 tx_free; /* only used on 10/100Mbit */ u16 rx_cur; struct greth_regs *regs; /* Address of controller registers. */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 346592d..a3c1135 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer, struct xgbe_prv_data *pdata = filp->private_data; unsigned int value; - value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, - pdata->debugfs_xpcs_reg); + value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd, + pdata->debugfs_xpcs_reg); return xgbe_common_read(buffer, count, ppos, value); } @@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp, if (len < 0) return len; - pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, - pdata->debugfs_xpcs_reg, value); + XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg, + value); return len; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index edaca44..ea27383 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) /* Clear MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); /* Enable all counter interrupts */ - XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); - XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); + XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); + XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); } static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) @@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) { unsigned int i, count; + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) + return 0; + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); @@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); } -static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, - unsigned char queue_count) +static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, + unsigned int queue_count) { unsigned int q_fifo_size = 0; enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; @@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, q_fifo_size = XGBE_FIFO_SIZE_KB(256); break; } + + /* The configured value is not the actual amount of fifo RAM */ + q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); + q_fifo_size = q_fifo_size / queue_count; /* Set the queue fifo size programmable value */ @@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) xgbe_disable_rx_vlan_stripping(pdata); } +static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) +{ + bool read_hi; + u64 val; + + switch (reg_lo) { + /* These registers are always 64 bit */ + case MMC_TXOCTETCOUNT_GB_LO: + case MMC_TXOCTETCOUNT_G_LO: + case MMC_RXOCTETCOUNT_GB_LO: + case MMC_RXOCTETCOUNT_G_LO: + read_hi = true; + break; + + default: + read_hi = false; + }; + + val = XGMAC_IOREAD(pdata, reg_lo); + + if (read_hi) + val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); + + return val; +} + static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; @@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) stats->txoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) stats->txframecount_gb += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) stats->txmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) stats->tx64octets_gb += - XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) stats->tx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) stats->tx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) stats->tx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) stats->tx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) stats->tx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) stats->txunicastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) stats->txmulticastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) stats->txunderflowerror += - XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) stats->txoctetcount_g += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) stats->txframecount_g += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) stats->txpauseframes += - XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) stats->txvlanframes_g += - XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) @@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) stats->rxframecount_gb += - XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) stats->rxoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) stats->rxoctetcount_g += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) stats->rxbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) stats->rxmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) stats->rxcrcerror += - XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) stats->rxrunterror += - XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) stats->rxjabbererror += - XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) stats->rxundersize_g += - XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) stats->rxoversize_g += - XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) stats->rx64octets_gb += - XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) stats->rx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) stats->rx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) stats->rx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) stats->rx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) stats->rx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) stats->rxunicastframes_g += - XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) stats->rxlengtherror += - XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) stats->rxoutofrangetype += - XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) stats->rxpauseframes += - XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) stats->rxfifooverflow += - XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) stats->rxvlanframes_gb += - XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) stats->rxwatchdogerror += - XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); } static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) @@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); stats->txoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += - XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += - XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += - XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += - XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += - XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += - XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += - XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += - XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += - XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += - XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += - XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += - XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += - XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += - XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += - XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += - XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += - XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += - XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index dc84f71..b26d758 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) memset(hw_feat, 0, sizeof(*hw_feat)); + hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); + /* Hardware feature register 0 */ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index a076aca..46f6130 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_features *hw_feat = &pdata->hw_feat; strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, dev_name(pdata->dev), sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", - XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER), - XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID), - XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER)); + XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); drvinfo->n_stats = XGBE_STATS_COUNT; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 8aa6a93..bdf9cfa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) } if (i < pdata->rx_ring_count) { - spin_lock_init(&tx_ring->lock); + spin_lock_init(&rx_ring->lock); channel->rx_ring = rx_ring++; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 07bf70a..e9fe6e6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -183,6 +183,7 @@ #define XGMAC_DRIVER_CONTEXT 1 #define XGMAC_IOCTL_CONTEXT 2 +#define XGBE_FIFO_MAX 81920 #define XGBE_FIFO_SIZE_B(x) (x) #define XGBE_FIFO_SIZE_KB(x) (x * 1024) @@ -526,6 +527,9 @@ struct xgbe_desc_if { * or configurations are present in the device. */ struct xgbe_hw_features { + /* HW Version */ + unsigned int version; + /* HW Feature Register0 */ unsigned int gmii; /* 1000 Mbps support */ unsigned int vlhash; /* VLAN Hash Filter */ diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 616dff6..f4054d24 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -1,5 +1,6 @@ config NET_XGENE tristate "APM X-Gene SoC Ethernet Driver" + depends on HAS_DMA select PHYLIB help This is the Ethernet driver for the on-chip ethernet interface on the diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 7dcfb19..d8d07a8 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -84,7 +84,7 @@ config BNX2 config CNIC tristate "QLogic CNIC support" - depends on PCI + depends on PCI && (IPV6 || IPV6=n) select BNX2 select UIO ---help--- diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5ba8af5..c4daa06 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -2233,7 +2233,12 @@ struct shmem2_region { u32 reserved3; /* Offset 0x14C */ u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ - #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) + #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 + #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 + #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 + #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000 + #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 u32 reserved5[2]; u32 reserved6[PORT_MAX]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 53fb4fa..549549e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) #define SFP_EEPROM_CON_TYPE_ADDR 0x2 + #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 -#define SFP_EEPROM_COMP_CODE_ADDR 0x3 - #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) - #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) - #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) +#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3 + #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4) + #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5) + #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6) + +#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6 + #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0) + #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1) + #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2) + #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3) #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 @@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, reg_set[i].val); /* Start KR2 work-around timer which handles BCM8073 link-parner */ - vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; - bnx2x_update_link_attr(params, vars->link_attr_sync); + params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, params->link_attr_sync); } static void bnx2x_disable_kr2(struct link_params *params, @@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params, for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; - bnx2x_update_link_attr(params, vars->link_attr_sync); + params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, params->link_attr_sync); vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; } @@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params, ~FEATURE_CONFIG_PFC_ENABLED; if (SHMEM2_HAS(bp, link_attr_sync)) - vars->link_attr_sync = SHMEM2_RD(bp, + params->link_attr_sync = SHMEM2_RD(bp, link_attr_sync[params->port]); DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", @@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u32 sync_offset = 0, phy_idx, media_types; - u8 gport, val[2], check_limiting_mode = 0; + u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0; *edc_mode = EDC_MODE_LIMITING; phy->media_type = ETH_PHY_UNSPECIFIED; /* First check for copper cable */ if (bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0, - SFP_EEPROM_CON_TYPE_ADDR, - 2, + 0, + SFP_EEPROM_FC_TX_TECH_ADDR + 1, (u8 *)val) != 0) { DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); return -EINVAL; } - - switch (val[0]) { + params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK; + params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] << + LINK_SFP_EEPROM_COMP_CODE_SHIFT; + bnx2x_update_link_attr(params, params->link_attr_sync); + switch (val[SFP_EEPROM_CON_TYPE_ADDR]) { case SFP_EEPROM_CON_TYPE_VAL_COPPER: { u8 copper_module_type; @@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, /* Check if its active cable (includes SFP+ module) * of passive cable */ - if (bnx2x_read_sfp_module_eeprom(phy, - params, - I2C_DEV_ADDR_A0, - SFP_EEPROM_FC_TX_TECH_ADDR, - 1, - &copper_module_type) != 0) { - DP(NETIF_MSG_LINK, - "Failed to read copper-cable-type" - " from SFP+ EEPROM\n"); - return -EINVAL; - } + copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR]; if (copper_module_type & SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { @@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, } break; } + case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN: case SFP_EEPROM_CON_TYPE_VAL_LC: case SFP_EEPROM_CON_TYPE_VAL_RJ45: check_limiting_mode = 1; - if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | - SFP_EEPROM_COMP_CODE_LR_MASK | - SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { + if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & + (SFP_EEPROM_10G_COMP_CODE_SR_MASK | + SFP_EEPROM_10G_COMP_CODE_LR_MASK | + SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) { DP(NETIF_MSG_LINK, "1G SFP module detected\n"); - gport = params->port; phy->media_type = ETH_PHY_SFP_1G_FIBER; if (phy->req_line_speed != SPEED_1000) { + u8 gport = params->port; phy->req_line_speed = SPEED_1000; if (!CHIP_IS_E1x(bp)) { gport = BP_PATH(bp) + @@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", gport); } + if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] & + SFP_EEPROM_1G_COMP_CODE_BASE_T) { + bnx2x_sfp_set_transmitter(params, phy, 0); + msleep(40); + bnx2x_sfp_set_transmitter(params, phy, 1); + } } else { int idx, cfg_idx = 0; DP(NETIF_MSG_LINK, "10G Optic module detected\n"); @@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, break; default: DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", - val[0]); + val[SFP_EEPROM_CON_TYPE_ADDR]); return -EINVAL; } sync_offset = params->shmem_base + @@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, sigdet = bnx2x_warpcore_get_sigdet(phy, params); if (!sigdet) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { bnx2x_kr2_recovery(params, vars, phy); DP(NETIF_MSG_LINK, "No sigdet\n"); } @@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, /* CL73 has not begun yet */ if (base_page == 0) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { bnx2x_kr2_recovery(params, vars, phy); DP(NETIF_MSG_LINK, "No BP\n"); } @@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { if (!not_kr2_device) { DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 389f5f8c..d9cce4c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -323,6 +323,9 @@ struct link_params { #define LINK_FLAGS_INT_DISABLED (1<<0) #define PHY_INITIALIZED (1<<1) u32 lfa_base; + + /* The same definitions as the shmem2 parameter */ + u32 link_attr_sync; }; /* Output parameters */ @@ -364,8 +367,6 @@ struct link_vars { u8 rx_tx_asic_rst; u8 turn_to_run_wc_rt; u16 rsrv2; - /* The same definitions as the shmem2 parameter */ - u32 link_attr_sync; }; /***********************************************************/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 900cab4..d1c093d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp) bnx2x_release_phy_lock(bp); } +static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) +{ + REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); + + /* make sure this value is 0 */ + REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); + + REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); +} + +static void bnx2x_set_endianity(struct bnx2x *bp) +{ +#ifdef __BIG_ENDIAN + bnx2x_config_endianity(bp, 1); +#else + bnx2x_config_endianity(bp, 0); +#endif +} + +static void bnx2x_reset_endianity(struct bnx2x *bp) +{ + bnx2x_config_endianity(bp, 0); +} + /** * bnx2x_init_hw_common - initialize the HW at the COMMON phase. * @@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); bnx2x_init_pxp(bp); - -#ifdef __BIG_ENDIAN - REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); - /* make sure this value is 0 */ - REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); - -/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ - REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); -#endif - + bnx2x_set_endianity(bp); bnx2x_ilt_init_page_size(bp, INITOP_SET); if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) @@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev, bnx2x_iov_remove_one(bp); /* Power on: we can't let PCI layer write to us while we are in D3 */ - if (IS_PF(bp)) + if (IS_PF(bp)) { bnx2x_set_power_state(bp, PCI_D0); + /* Set endianity registers to reset values in case next driver + * boots in different endianty environment. + */ + bnx2x_reset_endianity(bp); + } + /* Disable MSI/MSI-X */ bnx2x_disable_msi(bp); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 27861a6..a6a9f28 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -31,7 +31,7 @@ #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <linux/random.h> -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q) #define BCM_VLAN 1 #endif #include <net/ip.h> @@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct dst_entry **dst) { -#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) +#if IS_ENABLED(CONFIG_IPV6) struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3ac5d23..cb77ae9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11617,6 +11617,12 @@ static int tg3_open(struct net_device *dev) struct tg3 *tp = netdev_priv(dev); int err; + if (tp->pcierr_recovery) { + netdev_err(dev, "Failed to open device. PCI error recovery " + "in progress\n"); + return -EAGAIN; + } + if (tp->fw_needed) { err = tg3_request_firmware(tp); if (tg3_asic_rev(tp) == ASIC_REV_57766) { @@ -11674,6 +11680,12 @@ static int tg3_close(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); + if (tp->pcierr_recovery) { + netdev_err(dev, "Failed to close device. PCI error recovery " + "in progress\n"); + return -EAGAIN; + } + tg3_ptp_fini(tp); tg3_stop(tp); @@ -17561,6 +17573,7 @@ static int tg3_init_one(struct pci_dev *pdev, tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; + tp->pcierr_recovery = false; if (tg3_debug > 0) tp->msg_enable = tg3_debug; @@ -18071,6 +18084,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); + tp->pcierr_recovery = true; + /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) goto done; @@ -18195,6 +18210,7 @@ static void tg3_io_resume(struct pci_dev *pdev) tg3_phy_start(tp); done: + tp->pcierr_recovery = false; rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 461acca..31c9f82 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3407,6 +3407,7 @@ struct tg3 { struct device *hwmon_dev; bool link_up; + bool pcierr_recovery; }; /* Accessor macros for chip and asic attributes diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ff8cae5..ffc92a4 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) * For TSO, the TCP checksum field is seeded with pseudo-header sum * excluding the length field. */ - if (skb->protocol == htons(ETH_P_IP)) { + if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); /* Do we really need these? */ @@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, } if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 net_proto = vlan_get_protocol(skb); u8 proto = 0; - if (skb->protocol == htons(ETH_P_IP)) + if (net_proto == htons(ETH_P_IP)) proto = ip_hdr(skb)->protocol; #ifdef NETIF_F_IPV6_CSUM - else if (skb->protocol == htons(ETH_P_IPV6)) { + else if (net_proto == htons(ETH_P_IPV6)) { /* nexthdr may not be TCP immediately. */ proto = ipv6_hdr(skb)->nexthdr; } diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index 184a063..07d2201 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,7 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" depends on HAS_IOMEM && HAS_DMA + depends on ARCH_HIGHBANK || COMPILE_TEST select CRC32 help This is the driver for the XGMAC Ethernet IP block found on Calxeda diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 18fb9c6..8c34811 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap); goto freeout; } - t4_write_reg(adap, MPS_TRC_RSS_CONTROL, + t4_write_reg(adap, is_t4(adap->params.chip) ? + MPS_TRC_RSS_CONTROL : + MPS_T5_TRC_RSS_CONTROL, RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); return 0; @@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0xd004, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, - 0xf000, 0x11190, + 0xf000, 0x11110, + 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x19124, @@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0xd004, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0x11088, - 0x1109c, 0x1117c, + 0x1109c, 0x11110, + 0x11118, 0x1117c, 0x11190, 0x11204, 0x19040, 0x1906c, 0x19078, 0x19080, @@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, + val); if (ret < 0) goto bye; adap->vres.qp.start = val[0]; @@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap) params[0] = FW_PARAM_DEV(MAXORDIRD_QP); params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); - ret = t4_query_params(adap, 0, 0, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, + val); if (ret < 0) { adap->params.max_ordird_qp = 8; adap->params.max_ird_adapter = 32 * adap->tids.ntids; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a853133..41d0446 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) } /* + * t4_report_fw_error - report firmware error + * @adap: the adapter + * + * The adapter firmware can indicate error conditions to the host. + * If the firmware has indicated an error, print out the reason for + * the firmware error. + */ +static void t4_report_fw_error(struct adapter *adap) +{ + static const char *const reason[] = { + "Crash", /* PCIE_FW_EVAL_CRASH */ + "During Device Preparation", /* PCIE_FW_EVAL_PREP */ + "During Device Configuration", /* PCIE_FW_EVAL_CONF */ + "During Device Initialization", /* PCIE_FW_EVAL_INIT */ + "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ + "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ + "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ + "Reserved", /* reserved */ + }; + u32 pcie_fw; + + pcie_fw = t4_read_reg(adap, MA_PCIE_FW); + if (pcie_fw & FW_PCIE_FW_ERR) + dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", + reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); +} + +/* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, @@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, dump_mbox(adap, mbox, data_reg); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); + t4_report_fw_error(adap); return -ETIMEDOUT; } @@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 +#define CHELSIO_VPD_UNIQUE_ID 0x82 /** * t4_seeprom_wp - enable/disable EEPROM write protection @@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); if (ret < 0) goto out; - addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + /* The VPD shall have a unique identifier specified by the PCI SIG. + * For chelsio adapters, the identifier is 0x82. The first byte of a VPD + * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software + * is expected to automatically put this entry at the + * beginning of the VPD. + */ + addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); if (ret < 0) @@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); + i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->pn, vpd + pn, min(i, PN_LEN)); strim(p->pn); @@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter) int fat; - fat = t4_handle_intr_status(adapter, - PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, - sysbus_intr_info) + - t4_handle_intr_status(adapter, - PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, - pcie_port_intr_info) + - t4_handle_intr_status(adapter, PCIE_INT_CAUSE, - is_t4(adapter->params.chip) ? - pcie_intr_info : t5_pcie_intr_info); + if (is_t4(adapter->params.chip)) + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, + pcie_intr_info); + else + fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, + t5_pcie_intr_info); if (fat) t4_fatal_err(adapter); @@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter) int fat; + if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) + t4_report_fw_error(adapter); + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, cim_intr_info) + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, @@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap) { u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); - if (status & MEM_PERR_INT_CAUSE) + if (status & MEM_PERR_INT_CAUSE) { dev_alert(adap->pdev_dev, "MA parity error, parity status %#x\n", t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); + if (is_t5(adap->params.chip)) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, + MA_PARITY_ERROR_STATUS2)); + } if (status & MEM_WRAP_INT_CAUSE) { v = t4_read_reg(adap, MA_INT_WRAP_STATUS); dev_alert(adap->pdev_dev, "MA address wrap-around error by " @@ -2733,12 +2783,16 @@ retry: /* * Issue the HELLO command to the firmware. If it's not successful * but indicates that we got a "busy" or "timeout" condition, retry - * the HELLO until we exhaust our retry limit. + * the HELLO until we exhaust our retry limit. If we do exceed our + * retry limit, check to see if the firmware left us any error + * information and report that if so. */ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret < 0) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; + if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) + t4_report_fw_error(adap); return ret; } @@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); t4_os_link_changed(adap, port, link_ok); } if (mod != pi->mod_type) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e3146e8..39fb325 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -511,6 +511,7 @@ #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) #define MA_PCIE_FW 0x30b8 #define MA_PARITY_ERROR_STATUS 0x77f4 +#define MA_PARITY_ERROR_STATUS2 0x7804 #define MA_EXT_MEMORY1_BAR 0x7808 #define EDC_0_BASE_ADDR 0x7900 @@ -959,6 +960,7 @@ #define TRCMULTIFILTER 0x00000001U #define MPS_TRC_RSS_CONTROL 0x9808 +#define MPS_T5_TRC_RSS_CONTROL 0xa00c #define RSSCONTROL_MASK 0x00ff0000U #define RSSCONTROL_SHIFT 16 #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 5f2729e..3409756 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2228,6 +2228,10 @@ struct fw_debug_cmd { #define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ FW_PCIE_FW_MASTER_MASK) +#define FW_PCIE_FW_EVAL_MASK 0x7 +#define FW_PCIE_FW_EVAL_SHIFT 24 +#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ + FW_PCIE_FW_EVAL_MASK) struct fw_hdr { u8 ver; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index a0b418e..566b17d 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) { swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; - if (skb->protocol != htons(ETH_P_IP)) + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) return; if (skb->ip_summed == CHECKSUM_PARTIAL) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index cbc330b..ad3d5d1 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2674,7 +2674,8 @@ set_itr_now: #define E1000_TX_FLAGS_VLAN_SHIFT 16 static int e1000_tso(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, struct sk_buff *skb) + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; @@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter, } static bool e1000_tx_csum(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, struct sk_buff *skb) + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; @@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, if (skb->ip_summed != CHECKSUM_PARTIAL) return false; - switch (skb->protocol) { + switch (protocol) { case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; @@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, int count = 0; int tso; unsigned int f; + __be16 protocol = vlan_get_protocol(skb); /* This goes back to the question of how to logically map a Tx queue * to a flow. Right now, performance is impacted slightly negatively @@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = e1000_tso(adapter, tx_ring, skb); + tso = e1000_tso(adapter, tx_ring, skb, protocol); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (likely(hw->mac_type != e1000_82544)) tx_ring->last_tx_tso = true; tx_flags |= E1000_TX_FLAGS_TSO; - } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) tx_flags |= E1000_TX_FLAGS_CSUM; - if (likely(skb->protocol == htons(ETH_P_IP))) + if (protocol == htons(ETH_P_IP)) tx_flags |= E1000_TX_FLAGS_IPV4; if (unlikely(skb->no_fcs)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 65c3aef..247335d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5164,7 +5164,8 @@ link_up: #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 -static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; @@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) return 1; } -static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_context_desc *context_desc; @@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) unsigned int i; u8 css; u32 cmd_len = E1000_TXD_CMD_DEXT; - __be16 protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) return false; - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) - protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; - else - protocol = skb->protocol; - switch (protocol) { case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) @@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, int count = 0; int tso; unsigned int f; + __be16 protocol = vlan_get_protocol(skb); if (test_bit(__E1000_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); @@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = e1000_tso(tx_ring, skb); + tso = e1000_tso(tx_ring, skb, protocol); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (tso) tx_flags |= E1000_TX_FLAGS_TSO; - else if (e1000_tx_csum(tx_ring, skb)) + else if (e1000_tx_csum(tx_ring, skb, protocol)) tx_flags |= E1000_TX_FLAGS_CSUM; /* Old method was to assume IPv4 packet by default if TSO was enabled. * 82571 hardware supports TSO capabilities for IPv6 as well... * no longer assume, we must. */ - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tx_flags |= E1000_TX_FLAGS_IPV4; if (unlikely(skb->no_fcs)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a51aa37..369848e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, goto out_drop; /* obtain protocol of skb */ - protocol = skb->protocol; + protocol = vlan_get_protocol(skb); /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_bi[tx_ring->next_to_use]; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 79bf96c..95a3ec2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, goto out_drop; /* obtain protocol of skb */ - protocol = skb->protocol; + protocol = vlan_get_protocol(skb); /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_bi[tx_ring->next_to_use]; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c9f1d1b..ade067d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -20,6 +20,7 @@ #include <linux/mbus.h> #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/io.h> @@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; + __be16 l3_proto = vlan_get_protocol(skb); u8 l4_proto; - if (skb->protocol == htons(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (l3_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ @@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) return MVNETA_TX_L4_CSUM_NOT; return mvneta_txq_desc_csum(skb_network_offset(skb), - skb->protocol, ip_hdr_len, l4_proto); + l3_proto, ip_hdr_len, l4_proto); } return MVNETA_TX_L4_CSUM_NOT; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb536aa..abddcf8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad int qpn, u64 *reg_id) { int err; - struct mlx4_spec_list spec_eth_outer = { {NULL} }; - struct mlx4_spec_list spec_vxlan = { {NULL} }; - struct mlx4_spec_list spec_eth_inner = { {NULL} }; - - struct mlx4_net_trans_rule rule = { - .queue_mode = MLX4_NET_TRANS_Q_FIFO, - .exclusive = 0, - .allow_loopback = 1, - .promisc_mode = MLX4_FS_REGULAR, - .priority = MLX4_DOMAIN_NIC, - }; - - __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return 0; /* do nothing */ - rule.port = priv->port; - rule.qpn = qpn; - INIT_LIST_HEAD(&rule.list); - - spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; - memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); - memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); - - spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ - spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ - - list_add_tail(&spec_eth_outer.list, &rule.list); - list_add_tail(&spec_vxlan.list, &rule.list); - list_add_tail(&spec_eth_inner.list, &rule.list); - - err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id); + err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, + MLX4_DOMAIN_NIC, reg_id); if (err) { en_err(priv, "failed to add vxlan steering rule, err %d\n", err); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index d80e7a6..ca0f98c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) } EXPORT_SYMBOL_GPL(mlx4_flow_detach); +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, + int port, int qpn, u16 prio, u64 *reg_id) +{ + int err; + struct mlx4_spec_list spec_eth_outer = { {NULL} }; + struct mlx4_spec_list spec_vxlan = { {NULL} }; + struct mlx4_spec_list spec_eth_inner = { {NULL} }; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + }; + + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + rule.port = port; + rule.qpn = qpn; + rule.priority = prio; + INIT_LIST_HEAD(&rule.list); + + spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); + memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ + spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ + + list_add_tail(&spec_eth_outer.list, &rule.list); + list_add_tail(&spec_vxlan.list, &rule.list); + list_add_tail(&spec_eth_inner.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + return err; +} +EXPORT_SYMBOL(mlx4_tunnel_steer_add); + int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5020fd4..2f12c88 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) int rx_head = priv->rx_head; int rx = 0; - while (1) { + while (rx < budget) { desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); desc0 = readl(desc + RX_REG_OFFSET_DESC0); @@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) net_dbg_ratelimited("packet error\n"); priv->stats.rx_dropped++; priv->stats.rx_errors++; - continue; + goto rx_next; } len = desc0 & RX_DESC0_FRAME_LEN_MASK; @@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (len > RX_BUF_SIZE) len = RX_BUF_SIZE; - skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); + dma_sync_single_for_cpu(&ndev->dev, + priv->rx_mapping[rx_head], + priv->rx_buf_size, DMA_FROM_DEVICE); + skb = netdev_alloc_skb_ip_align(ndev, len); + if (unlikely(!skb)) { - net_dbg_ratelimited("build_skb failed\n"); + net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n"); priv->stats.rx_dropped++; priv->stats.rx_errors++; + goto rx_next; } + memcpy(skb->data, priv->rx_buf[rx_head], len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&priv->napi, skb); @@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (desc0 & RX_DESC0_MULTICAST) priv->stats.multicast++; +rx_next: writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); rx_head = RX_NEXT(rx_head); priv->rx_head = rx_head; - - if (rx >= budget) - break; } if (rx < budget) { - napi_gro_flush(napi, false); - __napi_complete(napi); + napi_complete(napi); } priv->reg_imr |= RPKT_FINISH_M; @@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) len = ETH_ZLEN; } - txdes1 = readl(desc + TX_REG_OFFSET_DESC1); - txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; - txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); - txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); + dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], + priv->tx_buf_size, DMA_TO_DEVICE); + + txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); + if (tx_head == TX_DESC_NUM_MASK) + txdes1 |= TX_DESC1_END; writel(txdes1, desc + TX_REG_OFFSET_DESC1); writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); @@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev) spin_lock_init(&priv->txlock); priv->tx_buf_size = TX_BUF_SIZE; - priv->rx_buf_size = RX_BUF_SIZE + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + priv->rx_buf_size = RX_BUF_SIZE; priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8706c0d..a44a03c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev) __lpc_eth_clock_enable(pldat, true); + /* Suspended PHY makes LPC ethernet core block, so resume now */ + phy_resume(pldat->phy_dev); + /* Reset and initialize */ __lpc_eth_reset(pldat); __lpc_eth_init(pldat); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 188626e..3e96f26 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) if (skb_is_gso(skb)) { int err; + __be16 l3_proto = vlan_get_protocol(skb); err = skb_cow_head(skb, 0); if (err < 0) @@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) << OB_MAC_TRANSPORT_HDR_SHIFT); mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; - if (likely(skb->protocol == htons(ETH_P_IP))) { + if (likely(l3_proto == htons(ETH_P_IP))) { struct iphdr *iph = ip_hdr(skb); iph->check = 0; mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; @@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (l3_proto == htons(ETH_P_IPV6)) { mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 9e757c7..196e98a 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -5,6 +5,7 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" depends on HAS_DMA + depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST select CRC32 select MII select MDIO_BITBANG diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index c553f6b..cf28dab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -28,7 +28,7 @@ #include "stmmac.h" -static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; @@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); while (len != 0) { @@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), bmax, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); @@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index de507c3..593e6c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -220,10 +220,10 @@ enum dma_irq_status { handle_tx = 0x8, }; -#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) -#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) -#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) -#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) +#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) +#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) +#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) #define CORE_PCS_ANE_COMPLETE (1 << 5) #define CORE_PCS_LINK_STATUS (1 << 6) @@ -287,7 +287,7 @@ struct dma_features { /* Default LPI timers */ #define STMMAC_DEFAULT_LIT_LS 0x3E8 -#define STMMAC_DEFAULT_TWT_LS 0x0 +#define STMMAC_DEFAULT_TWT_LS 0x1E #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 @@ -425,7 +425,7 @@ struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); unsigned int (*is_jumbo_frm) (int len, int ehn_desc); - unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); int (*set_16kib_bfsize)(int mtu); void (*init_desc3)(struct dma_desc *p); void (*refill_desc3) (void *priv, struct dma_desc *p); @@ -445,6 +445,7 @@ struct mac_device_info { int multicast_filter_bins; int unicast_filter_entries; int mcast_bits_log2; + unsigned int rx_csum; }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 71b5419..64d8f56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -153,7 +153,7 @@ enum inter_frame_gap { #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ - GMAC_CONTROL_BE) + GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) /* GMAC Frame Filter defines */ #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d8ef187..5efe60e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); - value |= GMAC_CONTROL_IPC; + if (hw->rx_csum) + value |= GMAC_CONTROL_IPC; + else + value &= ~GMAC_CONTROL_IPC; + writel(value, ioaddr + GMAC_CONTROL); value = readl(ioaddr + GMAC_CONTROL); diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 8607488..192c249 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -68,7 +68,7 @@ struct stmmac_counters { unsigned int mmc_rx_octetcount_g; unsigned int mmc_rx_broadcastframe_g; unsigned int mmc_rx_multicastframe_g; - unsigned int mmc_rx_crc_errror; + unsigned int mmc_rx_crc_error; unsigned int mmc_rx_align_error; unsigned int mmc_rx_run_error; unsigned int mmc_rx_jabber_error; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 50617c5..08c483b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); - mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); + mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 650a4be..5dd50c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -28,7 +28,7 @@ #include "stmmac.h" -static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; @@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + + priv->tx_skbuff_dma[entry].buf = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_RING_MODE); @@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_RING_MODE); @@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, STMMAC_RING_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ca01035..58097c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -34,6 +34,11 @@ #include <linux/ptp_clock_kernel.h> #include <linux/reset.h> +struct stmmac_tx_info { + dma_addr_t buf; + bool map_as_page; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; @@ -45,7 +50,7 @@ struct stmmac_priv { u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; - dma_addr_t *tx_skbuff_dma; + struct stmmac_tx_info *tx_skbuff_dma; dma_addr_t dma_tx_phy; int tx_coalesce; int hwts_tx_en; @@ -105,6 +110,8 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; + struct clk *clk_ptp_ref; + unsigned int clk_ptp_rate; u32 adv_ts; int use_riwt; int irq_wake; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9af50ba..cf4f38d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_rx_octetcount_g), STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), STMMAC_MMC_STAT(mmc_rx_multicastframe_g), - STMMAC_MMC_STAT(mmc_rx_crc_errror), + STMMAC_MMC_STAT(mmc_rx_crc_error), STMMAC_MMC_STAT(mmc_rx_align_error), STMMAC_MMC_STAT(mmc_rx_run_error), STMMAC_MMC_STAT(mmc_rx_jabber_error), diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 08addd6..6e6ee22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) */ bool stmmac_eee_init(struct stmmac_priv *priv) { + char *phy_bus_name = priv->plat->phy_bus_name; bool ret = false; /* Using PCS we cannot dial with the phy registers at this stage @@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) (priv->pcs == STMMAC_PCS_RTBI)) goto out; + /* Never init EEE in case of a switch is attached */ + if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) + goto out; + /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { int tx_lpi_timer = priv->tx_lpi_timer; @@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv) priv->hw->mac->set_eee_timer(priv->hw, STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); - } else - /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->hw, - priv->phydev->link); + } + /* Set HW EEE according to the speed */ + priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); @@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz - * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; - * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to + * where, freq_div_ratio = clk_ptp_ref_i/50MHz + * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; + * NOTE: clk_ptp_ref_i should be >= 50MHz to * achive 20ns accuracy. * * 2^x * y == (y << x), hence * 2^32 * 50000000 ==> (50000000 << 32) */ temp = (u64) (50000000ULL << 32); - priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); + priv->default_addend = div_u64(temp, priv->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ioaddr, priv->default_addend); @@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; + /* Fall-back to main clock in case of no PTP ref is passed */ + priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); + if (IS_ERR(priv->clk_ptp_ref)) { + priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); + priv->clk_ptp_ref = NULL; + } else { + clk_prepare_enable(priv->clk_ptp_ref); + priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); + } + priv->adv_ts = 0; if (priv->dma_cap.atime_stamp && priv->extend_desc) priv->adv_ts = 1; @@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) static void stmmac_release_ptp(struct stmmac_priv *priv) { + if (priv->clk_ptp_ref) + clk_disable_unprepare(priv->clk_ptp_ref); stmmac_ptp_unregister(priv); } @@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev) else p = priv->dma_tx + i; p->des2 = 0; - priv->tx_skbuff_dma[i] = 0; + priv->tx_skbuff_dma[i].buf = 0; + priv->tx_skbuff_dma[i].map_as_page = false; priv->tx_skbuff[i] = NULL; } @@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) else p = priv->dma_tx + i; - if (priv->tx_skbuff_dma[i]) { - dma_unmap_single(priv->device, - priv->tx_skbuff_dma[i], - priv->hw->desc->get_tx_len(p), - DMA_TO_DEVICE); - priv->tx_skbuff_dma[i] = 0; + if (priv->tx_skbuff_dma[i].buf) { + if (priv->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, + priv->tx_skbuff_dma[i].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[i].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); } if (priv->tx_skbuff[i] != NULL) { dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; + priv->tx_skbuff_dma[i].buf = 0; + priv->tx_skbuff_dma[i].map_as_page = false; } } } @@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) if (!priv->rx_skbuff) goto err_rx_skbuff; - priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), + priv->tx_skbuff_dma = kmalloc_array(txsize, + sizeof(*priv->tx_skbuff_dma), GFP_KERNEL); if (!priv->tx_skbuff_dma) goto err_tx_skbuff_dma; @@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) pr_debug("%s: curr %d, dirty %d\n", __func__, priv->cur_tx, priv->dirty_tx); - if (likely(priv->tx_skbuff_dma[entry])) { - dma_unmap_single(priv->device, - priv->tx_skbuff_dma[entry], - priv->hw->desc->get_tx_len(p), - DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = 0; + if (likely(priv->tx_skbuff_dma[entry].buf)) { + if (priv->tx_skbuff_dma[entry].map_as_page) + dma_unmap_page(priv->device, + priv->tx_skbuff_dma[entry].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[entry].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry].buf = 0; + priv->tx_skbuff_dma[entry].map_as_page = false; } priv->hw->mode->clean_desc3(priv, p); @@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); + ret = priv->hw->mac->rx_ipc(priv->hw); + if (!ret) { + pr_warn(" RX IPC Checksum Offload disabled\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + priv->hw->rx_csum = 0; + } + /* Enable the MAC Rx/Tx */ stmmac_set_mac(priv->ioaddr, true); @@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!is_jumbo)) { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + goto dma_map_err; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion, priv->mode); } else { desc = first; entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); + if (unlikely(entry < 0)) + goto dma_map_err; } for (i = 0; i < nfrags; i++) { @@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + goto dma_map_err; /* should reuse desc w/o issues */ + + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].map_as_page = true; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, priv->mode); wmb(); @@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->dma->enable_dma_transmission(priv->ioaddr); spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; +dma_map_err: + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->rx_skbuff_dma[entry] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); - + if (dma_mapping_error(priv->device, + priv->rx_skbuff_dma[entry])) { + dev_err(priv->device, "Rx dma map failed\n"); + dev_kfree_skb(skb); + break; + } p->des2 = priv->rx_skbuff_dma[entry]; priv->hw->mode->refill_desc3(priv, p); @@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) unsigned int entry = priv->cur_rx % rxsize; unsigned int next_entry; unsigned int count = 0; - int coe = priv->plat->rx_coe; + int coe = priv->hw->rx_csum; if (netif_msg_rx_status(priv)) { pr_debug("%s: descriptor ring:\n", __func__); @@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) features &= ~NETIF_F_RXCSUM; - else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) - features &= ~NETIF_F_IPV6_CSUM; + if (!priv->plat->tx_coe) features &= ~NETIF_F_ALL_CSUM; @@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, return features; } +static int stmmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + /* Keep the COE Type in case of csum is supporting */ + if (features & NETIF_F_RXCSUM) + priv->hw->rx_csum = priv->plat->rx_coe; + else + priv->hw->rx_csum = 0; + /* No check needed because rx_coe has been set before and it will be + * fixed in case of issue. + */ + priv->hw->mac->rx_ipc(priv->hw); + + return 0; +} + /** * stmmac_interrupt - main ISR * @irq: interrupt number. @@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_stop = stmmac_release, .ndo_change_mtu = stmmac_change_mtu, .ndo_fix_features = stmmac_fix_features, + .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, @@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = { */ static int stmmac_hw_init(struct stmmac_priv *priv) { - int ret; struct mac_device_info *mac; /* Identify the MAC HW device */ @@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) /* To use alternate (extended) or normal descriptor structures */ stmmac_selec_desc_mode(priv); - ret = priv->hw->mac->rx_ipc(priv->hw); - if (!ret) { - pr_warn(" RX IPC Checksum Offload not configured.\n"); - priv->plat->rx_coe = STMMAC_RX_COE_NONE; - } - - if (priv->plat->rx_coe) + if (priv->plat->rx_coe) { + priv->hw->rx_csum = priv->plat->rx_coe; pr_info(" RX Checksum Offload Engine supported (type %d)\n", priv->plat->rx_coe); + } if (priv->plat->tx_coe) pr_info(" TX Checksum insertion supported\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b7ad356..c5ee79d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) { if (priv->ptp_clock) { ptp_clock_unregister(priv->ptp_clock); + priv->ptp_clock = NULL; pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 3dbc047..4535df3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -25,8 +25,6 @@ #ifndef __STMMAC_PTP_H__ #define __STMMAC_PTP_H__ -#define STMMAC_SYSCLOCK 62500000 - /* IEEE 1588 PTP register offsets */ #define PTP_TCR 0x0700 /* Timestamp Control Reg */ #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h index c1ba26c..3de2f0d 100644 --- a/drivers/net/fddi/skfp/h/skfbi.h +++ b/drivers/net/fddi/skfp/h/skfbi.h @@ -147,11 +147,6 @@ #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ -/* PCI_BASE_2ND 32 bit 2nd Base address */ -#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */ -#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */ -#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */ - /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ /* PCI_SUB_ID 16 bit Subsystem ID */ diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c94e2a2..a854d38 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* First check if the EEE ability is supported */ eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS, phydev->addr); - if (eee_cap < 0) - return eee_cap; + if (eee_cap <= 0) + goto eee_exit_err; cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); if (!cap) - return -EPROTONOSUPPORT; + goto eee_exit_err; /* Check which link settings negotiated and verify it in * the EEE advertising registers. */ eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN, phydev->addr); - if (eee_lp < 0) - return eee_lp; + if (eee_lp <= 0) + goto eee_exit_err; eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, phydev->addr); - if (eee_adv < 0) - return eee_adv; + if (eee_adv <= 0) + goto eee_exit_err; adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); idx = phy_find_setting(phydev->speed, phydev->duplex); if (!(lp & adv & settings[idx].setting)) - return -EPROTONOSUPPORT; + goto eee_exit_err; if (clk_stop_enable) { /* Configure the PHY to stop receiving xMII @@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) return 0; /* EEE supported */ } - +eee_exit_err: return -EPROTONOSUPPORT; } EXPORT_SYMBOL(phy_init_eee); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d6e90c7..6dfcbf5 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev) if (!netdev_mc_empty(netdev)) { new_table = vmxnet3_copy_mc(netdev); if (new_table) { - new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTableLen = cpu_to_le16( netdev_mc_count(netdev) * ETH_ALEN); new_table_pa = dma_map_single( @@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev) new_table, rxConf->mfTableLen, PCI_DMA_TODEVICE); + } + + if (new_table_pa) { + new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { - netdev_info(netdev, "failed to copy mcast list" - ", setting ALL_MULTI\n"); + netdev_info(netdev, + "failed to copy mcast list, setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; } } - if (!(new_mode & VMXNET3_RXM_MCAST)) { rxConf->mfTableLen = 0; rxConf->mfTablePA = 0; @@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_CMD_UPDATE_MAC_FILTERS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); - if (new_table) { + if (new_table_pa) dma_unmap_single(&adapter->pdev->dev, new_table_pa, rxConf->mfTableLen, PCI_DMA_TODEVICE); - kfree(new_table); - } + kfree(new_table); } void diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 29ee77f2..3759479 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01020000 +#define VMXNET3_DRIVER_VERSION_NUM 0x01020100 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1fb7b37..beb377b 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = tip, - .sa.sa_family = AF_INET, + .sin.sin_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); @@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin6.sin6_addr = msg->target, - .sa.sa_family = AF_INET6, + .sin6.sin6_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); @@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = pip->daddr, - .sa.sa_family = AF_INET, + .sin.sin_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); @@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin6.sin6_addr = pip6->daddr, - .sa.sa_family = AF_INET6, + .sin6.sin6_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 334c2ec..da92bfa 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv) kfree_skb(priv->rx_skb); - usb_put_dev(priv->udev); - at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", __func__); ieee80211_free_hw(priv->hw); @@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface) wiphy_info(priv->hw->wiphy, "disconnecting\n"); at76_delete_device(priv); + usb_put_dev(priv->udev); dev_info(&interface->dev, "disconnected\n"); } diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c index 5fe29b9..8f68426 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.c +++ b/drivers/net/wireless/ath/ath9k/spectral.c @@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, if (strncmp("trigger", buf, 7) == 0) { ath9k_spectral_scan_trigger(sc->hw); - } else if (strncmp("background", buf, 9) == 0) { + } else if (strncmp("background", buf, 10) == 0) { ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); } else if (strncmp("chanscan", buf, 8) == 0) { diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 6451d2b..824f5e2 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -51,7 +51,6 @@ config IWLWIFI_LEDS config IWLDVM tristate "Intel Wireless WiFi DVM Firmware support" - depends on m default IWLWIFI help This is the driver that supports the DVM firmware which is @@ -60,7 +59,6 @@ config IWLDVM config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" - depends on m help This is the driver that supports the MVM firmware which is currently only available for 7260 and 3160 devices. diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 6dc5dd3..ed50de6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* recalculate basic rates */ iwl_calc_basic_rates(priv, ctx); + /* + * force CTS-to-self frames protection if RTS-CTS is not preferred + * one aggregation protection method + */ + if (!priv->hw_params.use_rts_for_aggregation) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; @@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); if (vif->type == NL80211_IFTYPE_AP || diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 4873006..d67a37a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -67,8 +67,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 9 -#define IWL3160_UCODE_API_MAX 9 +#define IWL7260_UCODE_API_MAX 10 +#define IWL3160_UCODE_API_MAX 10 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 9 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 44b19e0..e93c697 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -67,7 +67,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 9 +#define IWL8000_UCODE_API_MAX 10 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 8 diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c index 33da3df..d4bd550 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c @@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter) bool is_legacy = false; - if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) + if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G)) is_legacy = true; return is_legacy; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 361435f..1ac6383 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e29e15d..f379689 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, init_waitqueue_head(&queue->dealloc_wq); atomic_set(&queue->inflight_packets, 0); + netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, + XENVIF_NAPI_WEIGHT); + if (tx_evtchn == rx_evtchn) { /* feature-split-event-channels == 0 */ err = bind_interdomain_evtchn_to_irqhandler( @@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, wake_up_process(queue->task); wake_up_process(queue->dealloc_task); - netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, - XENVIF_NAPI_WEIGHT); - return 0; err_rx_unbind: |