diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2.c | 2 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_link.c | 3 | ||||
-rw-r--r-- | drivers/net/sfc/efx.c | 24 | ||||
-rw-r--r-- | drivers/net/sfc/efx.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 13 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 111 | ||||
-rw-r--r-- | drivers/net/stmmac/stmmac_main.c | 50 | ||||
-rw-r--r-- | drivers/net/sundance.c | 23 |
8 files changed, 80 insertions, 148 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 819b55c..6fa7984 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -8395,8 +8395,6 @@ bnx2_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - cancel_work_sync(&bp->reset_task); - unregister_netdev(dev); if (bp->mips_firmware) diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 97cbee2..43b0de2 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -354,9 +354,6 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) struct bnx2x *bp = params->bp; u32 val = 0; - if ((1 < strict_cos) && (NULL == params)) - return -EINVAL; - DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); /** * Bitmap of 5bits length. Each bit specifies whether the entry behaves diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 2166c1d..711449c 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -461,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) } } - spin_lock_init(&channel->tx_stop_lock); - atomic_set(&channel->tx_stop_count, 1); - rx_queue = &channel->rx_queue; rx_queue->efx = efx; setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, @@ -1406,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx) * restart the transmit interface early so the watchdog timer stops */ efx_start_port(efx); - efx_for_each_channel(channel, efx) { - if (efx_dev_registered(efx)) - efx_wake_queue(channel); + if (efx_dev_registered(efx)) + netif_tx_wake_all_queues(efx->net_dev); + + efx_for_each_channel(channel, efx) efx_start_channel(channel); - } if (efx->legacy_irq) efx->legacy_irq_enabled = true; @@ -1498,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx) /* Stop the kernel transmit interface late, so the watchdog * timer isn't ticking over the flush */ if (efx_dev_registered(efx)) { - struct efx_channel *channel; - efx_for_each_channel(channel, efx) - efx_stop_queue(channel); + netif_tx_stop_all_queues(efx->net_dev); netif_tx_lock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev); } @@ -1896,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; + struct efx_channel *channel; int rc; net_dev->watchdog_timeo = 5 * HZ; @@ -1918,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx) if (rc) goto fail_locked; + efx_for_each_channel(channel, efx) { + struct efx_tx_queue *tx_queue; + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->core_txq = netdev_get_tx_queue( + efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); + } + } + /* Always start with carrier off; PHY events will detect the link */ netif_carrier_off(efx->net_dev); diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 003fdb3..d43a7e5 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -extern void efx_stop_queue(struct efx_channel *channel); -extern void efx_wake_queue(struct efx_channel *channel); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 76f2fb1..bdce66d 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -136,6 +136,7 @@ struct efx_tx_buffer { * @efx: The associated Efx NIC * @queue: DMA queue number * @channel: The associated channel + * @core_txq: The networking core TX queue structure * @buffer: The software buffer ring * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. @@ -148,8 +149,6 @@ struct efx_tx_buffer { * variable indicates that the queue is empty. This is to * avoid cache-line ping-pong between the xmit path and the * completion path. - * @stopped: Stopped count. - * Set if this TX queue is currently stopping its port. * @insert_count: Current insert pointer * This is the number of buffers that have been added to the * software ring. @@ -179,7 +178,7 @@ struct efx_tx_queue { struct efx_nic *efx ____cacheline_aligned_in_smp; unsigned queue; struct efx_channel *channel; - struct efx_nic *nic; + struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; struct efx_special_buffer txd; unsigned int ptr_mask; @@ -188,7 +187,6 @@ struct efx_tx_queue { /* Members used mainly on the completion path */ unsigned int read_count ____cacheline_aligned_in_smp; unsigned int old_write_count; - int stopped; /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; @@ -321,7 +319,6 @@ enum efx_rx_alloc_method { * @irq_moderation: IRQ moderation value (in hardware ticks) * @napi_dev: Net device used with NAPI * @napi_str: NAPI control structure - * @reset_work: Scheduled reset work thread * @work_pending: Is work pending via NAPI? * @eventq: Event queue buffer * @eventq_mask: Event queue pointer mask @@ -342,8 +339,6 @@ enum efx_rx_alloc_method { * @n_rx_overlength: Count of RX_OVERLENGTH errors * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun * @rx_queue: RX queue for this channel - * @tx_stop_count: Core TX queue stop count - * @tx_stop_lock: Core TX queue stop lock * @tx_queue: TX queues for this channel */ struct efx_channel { @@ -382,10 +377,6 @@ struct efx_channel { bool rx_pkt_csummed; struct efx_rx_queue rx_queue; - - atomic_t tx_stop_count; - spinlock_t tx_stop_lock; - struct efx_tx_queue tx_queue[2]; }; diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index bdb92b4..2f5e9da 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -30,50 +30,6 @@ */ #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) -/* We need to be able to nest calls to netif_tx_stop_queue(), partly - * because of the 2 hardware queues associated with each core queue, - * but also so that we can inhibit TX for reasons other than a full - * hardware queue. */ -void efx_stop_queue(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0); - - if (!tx_queue) - return; - - spin_lock_bh(&channel->tx_stop_lock); - netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n"); - - atomic_inc(&channel->tx_stop_count); - netif_tx_stop_queue( - netdev_get_tx_queue(efx->net_dev, - tx_queue->queue / EFX_TXQ_TYPES)); - - spin_unlock_bh(&channel->tx_stop_lock); -} - -/* Decrement core TX queue stop count and wake it if the count is 0 */ -void efx_wake_queue(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0); - - if (!tx_queue) - return; - - local_bh_disable(); - if (atomic_dec_and_lock(&channel->tx_stop_count, - &channel->tx_stop_lock)) { - netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); - netif_tx_wake_queue( - netdev_get_tx_queue(efx->net_dev, - tx_queue->queue / EFX_TXQ_TYPES)); - spin_unlock(&channel->tx_stop_lock); - } - local_bh_enable(); -} - static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { @@ -234,9 +190,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) * checked. Update the xmit path's * copy of read_count. */ - ++tx_queue->stopped; + netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the - * change of stopped from the access + * change of queue state from the access * of read_count. */ smp_mb(); tx_queue->old_read_count = @@ -244,10 +200,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; - if (unlikely(q_space-- <= 0)) - goto stop; + if (unlikely(q_space-- <= 0)) { + rc = NETDEV_TX_BUSY; + goto unwind; + } smp_mb(); - --tx_queue->stopped; + netif_tx_start_queue(tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; @@ -307,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); - goto unwind; - - stop: - rc = NETDEV_TX_BUSY; - - if (tx_queue->stopped == 1) - efx_stop_queue(tx_queue->channel); unwind: /* Work backwards until we hit the original insert pointer value */ @@ -400,32 +351,21 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; - struct netdev_queue *queue; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index); /* See if we need to restart the netif queue. This barrier - * separates the update of read_count from the test of - * stopped. */ + * separates the update of read_count from the test of the + * queue state. */ smp_mb(); - if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { + if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && + likely(efx->port_enabled)) { fill_level = tx_queue->insert_count - tx_queue->read_count; if (fill_level < EFX_TXQ_THRESHOLD(efx)) { EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); - - /* Do this under netif_tx_lock(), to avoid racing - * with efx_xmit(). */ - queue = netdev_get_tx_queue( - efx->net_dev, - tx_queue->queue / EFX_TXQ_TYPES); - __netif_tx_lock(queue, smp_processor_id()); - if (tx_queue->stopped) { - tx_queue->stopped = 0; - efx_wake_queue(tx_queue->channel); - } - __netif_tx_unlock(queue); + netif_tx_wake_queue(tx_queue->core_txq); } } @@ -487,7 +427,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->read_count = 0; tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; - BUG_ON(tx_queue->stopped); /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); @@ -523,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) /* Free up TSO header cache */ efx_fini_tso(tx_queue); - - /* Release queue's stop on port, if any */ - if (tx_queue->stopped) { - tx_queue->stopped = 0; - efx_wake_queue(tx_queue->channel); - } } void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) @@ -770,9 +703,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, * since the xmit path last checked. Update * the xmit path's copy of read_count. */ - ++tx_queue->stopped; + netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the change of - * stopped from the access of read_count. */ + * queue state from the access of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); @@ -784,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, return 1; } smp_mb(); - --tx_queue->stopped; + netif_tx_start_queue(tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; @@ -1124,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, while (1) { rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); - if (unlikely(rc)) - goto stop; + if (unlikely(rc)) { + rc2 = NETDEV_TX_BUSY; + goto unwind; + } /* Move onto the next fragment? */ if (state.in_len == 0) { @@ -1154,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, netif_err(efx, tx_err, efx->net_dev, "Out of memory for TSO headers, or PCI mapping error\n"); dev_kfree_skb_any(skb); - goto unwind; - - stop: - rc2 = NETDEV_TX_BUSY; - - /* Stop the queue if it wasn't stopped before. */ - if (tx_queue->stopped == 1) - efx_stop_queue(tx_queue->channel); unwind: /* Free the DMA mapping we were in the process of writing out */ diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 20f803d..34a0af3 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1647,10 +1647,8 @@ static int stmmac_dvr_probe(struct platform_device *pdev) pr_info("STMMAC driver:\n\tplatform registration... "); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - goto out; - } + if (!res) + return -ENODEV; pr_info("\tdone!\n"); if (!request_mem_region(res->start, resource_size(res), @@ -1658,22 +1656,21 @@ static int stmmac_dvr_probe(struct platform_device *pdev) pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", __func__, (unsigned int)res->start); - ret = -EBUSY; - goto out; + return -EBUSY; } addr = ioremap(res->start, resource_size(res)); if (!addr) { pr_err("%s: ERROR: memory mapping failed\n", __func__); ret = -ENOMEM; - goto out; + goto out_release_region; } ndev = alloc_etherdev(sizeof(struct stmmac_priv)); if (!ndev) { pr_err("%s: ERROR: allocating the device\n", __func__); ret = -ENOMEM; - goto out; + goto out_unmap; } SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1683,8 +1680,8 @@ static int stmmac_dvr_probe(struct platform_device *pdev) if (ndev->irq == -ENXIO) { pr_err("%s: ERROR: MAC IRQ configuration " "information not found\n", __func__); - ret = -ENODEV; - goto out; + ret = -ENXIO; + goto out_free_ndev; } priv = netdev_priv(ndev); @@ -1711,18 +1708,18 @@ static int stmmac_dvr_probe(struct platform_device *pdev) if (priv->plat->init) { ret = priv->plat->init(pdev); if (unlikely(ret)) - goto out; + goto out_free_ndev; } /* MAC HW revice detection */ ret = stmmac_mac_device_setup(ndev); if (ret < 0) - goto out; + goto out_plat_exit; /* Network Device Registration */ ret = stmmac_probe(ndev); if (ret < 0) - goto out; + goto out_plat_exit; /* associate a PHY - it is provided by another platform bus */ if (!driver_for_each_device @@ -1730,7 +1727,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) stmmac_associate_phy)) { pr_err("No PHY device is associated with this MAC!\n"); ret = -ENODEV; - goto out; + goto out_unregister; } pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" @@ -1741,19 +1738,22 @@ static int stmmac_dvr_probe(struct platform_device *pdev) pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id); ret = stmmac_mdio_register(ndev); if (ret < 0) - goto out; + goto out_unregister; pr_debug("registered!\n"); + return 0; -out: - if (ret < 0) { - if (priv->plat->exit) - priv->plat->exit(pdev); - - platform_set_drvdata(pdev, NULL); - release_mem_region(res->start, resource_size(res)); - if (addr != NULL) - iounmap(addr); - } +out_unregister: + unregister_netdev(ndev); +out_plat_exit: + if (priv->plat->exit) + priv->plat->exit(pdev); +out_free_ndev: + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); +out_unmap: + iounmap(addr); +out_release_region: + release_mem_region(res->start, resource_size(res)); return ret; } diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 3ed2a67..e5662962 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -294,6 +294,9 @@ enum alta_offsets { /* Aliased and bogus values! */ RxStatus = 0x0c, }; + +#define ASIC_HI_WORD(x) ((x) + 2) + enum ASICCtrl_HiWord_bit { GlobalReset = 0x0001, RxReset = 0x0002, @@ -431,6 +434,7 @@ static void netdev_error(struct net_device *dev, int intr_status); static void netdev_error(struct net_device *dev, int intr_status); static void set_rx_mode(struct net_device *dev); static int __set_mac_addr(struct net_device *dev); +static int sundance_set_mac_addr(struct net_device *dev, void *data); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_close(struct net_device *dev); @@ -464,7 +468,7 @@ static const struct net_device_ops netdev_ops = { .ndo_do_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = change_mtu, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = sundance_set_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1592,6 +1596,19 @@ static int __set_mac_addr(struct net_device *dev) return 0; } +/* Invoked with rtnl_lock held */ +static int sundance_set_mac_addr(struct net_device *dev, void *data) +{ + const struct sockaddr *addr = data; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + __set_mac_addr(dev); + + return 0; +} + static const struct { const char name[ETH_GSTRING_LEN]; } sundance_stats[] = { @@ -1772,10 +1789,10 @@ static int netdev_close(struct net_device *dev) } iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, - ioaddr +ASICCtrl + 2); + ioaddr + ASIC_HI_WORD(ASICCtrl)); for (i = 2000; i > 0; i--) { - if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0) + if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) break; mdelay(1); } |