diff options
Diffstat (limited to 'drivers')
47 files changed, 1085 insertions, 466 deletions
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig index 4175a4b..bd995b2 100644 --- a/drivers/infiniband/hw/mlx4/Kconfig +++ b/drivers/infiniband/hw/mlx4/Kconfig @@ -1,5 +1,6 @@ config MLX4_INFINIBAND tristate "Mellanox ConnectX HCA support" + depends on NETDEVICES && NETDEV_10000 && PCI select MLX4_CORE ---help--- This driver provides low-level InfiniBand support for diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index ca00f0a..de579d0 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -287,7 +287,7 @@ static int elmc_open(struct net_device *dev) elmc_id_attn586(); /* disable interrupts */ - ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, + ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED, dev->name, dev); if (ret) { pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq); diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 70705d1..0d6ca1e 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -443,7 +443,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) * Grab the IRQ */ - err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev); + err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev); if (err) { release_region(dev->base_addr, MC32_IO_EXTENT); pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 13d01f3..d24f54b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -177,6 +177,13 @@ config NET_SB1000 source "drivers/net/arcnet/Kconfig" +config MII + tristate "Generic Media Independent Interface device support" + help + Most ethernet controllers have MII transceiver either as an external + or internal device. It is safe to say Y or M here even if your + ethernet card lacks MII. + source "drivers/net/phy/Kconfig" # @@ -212,13 +219,6 @@ menuconfig NET_ETHERNET if NET_ETHERNET -config MII - tristate "Generic Media Independent Interface device support" - help - Most ethernet controllers have MII transceiver either as an external - or internal device. It is safe to say Y or M here even if your - ethernet card lack MII. - config MACB tristate "Atmel MACB support" depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9 diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c49b643..3bf236b 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -20,8 +20,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.60.00-1" -#define DRV_MODULE_RELDATE "2010/10/06" +#define DRV_MODULE_VERSION "1.60.00-3" +#define DRV_MODULE_RELDATE "2010/10/19" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) @@ -1180,15 +1180,10 @@ struct bnx2x { TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY /* func init flags */ -#define FUNC_FLG_RSS 0x0001 -#define FUNC_FLG_STATS 0x0002 -/* removed FUNC_FLG_UNMATCHED 0x0004 */ -#define FUNC_FLG_TPA 0x0008 -#define FUNC_FLG_SPQ 0x0010 -#define FUNC_FLG_LEADING 0x0020 /* PF only */ - -#define FUNC_CONFIG(flgs) ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \ - FUNC_FLG_LEADING)) +#define FUNC_FLG_STATS 0x0001 +#define FUNC_FLG_TPA 0x0002 +#define FUNC_FLG_SPQ 0x0004 +#define FUNC_FLG_LEADING 0x0008 /* PF only */ struct rxq_pause_params { u16 bd_th_lo; diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 97ef674..1966cee 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -507,8 +507,11 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); pad = cqe->fast_path_cqe.placement_offset; - /* If CQE is marked both TPA_START and TPA_END - it is a non-TPA CQE */ + /* - If CQE is marked both TPA_START and TPA_END it is + * a non-TPA CQE. + * - FP CQE will always have either TPA_START or/and + * TPA_STOP flags set. + */ if ((!fp->disable_tpa) && (TPA_TYPE(cqe_fp_flags) != (TPA_TYPE_START | TPA_TYPE_END))) { @@ -526,9 +529,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) bnx2x_set_skb_rxhash(bp, cqe, skb); goto next_rx; - } - - if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) { + } else { /* TPA_STOP */ DP(NETIF_MSG_RX_STATUS, "calling tpa_stop on queue %d\n", queue); @@ -830,7 +831,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) int i, j; bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + - BNX2X_FW_IP_HDR_ALIGN_PAD; + IP_HEADER_ALIGNMENT_PADDING; DP(NETIF_MSG_IFUP, "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); @@ -1288,8 +1289,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (rc) { BNX2X_ERR("HW init failed, aborting\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); goto load_error2; } @@ -1522,6 +1521,12 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) { u16 pmcsr; + /* If there is no power capability, silently succeed */ + if (!bp->pm_cap) { + DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); + return 0; + } + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); switch (state) { diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 7f52cec..5bfe0ab 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -1032,6 +1032,4 @@ static inline void storm_memset_cmng(struct bnx2x *bp, void bnx2x_acquire_phy_lock(struct bnx2x *bp); void bnx2x_release_phy_lock(struct bnx2x *bp); -#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */ - #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index ead524b..f22e283 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -1111,14 +1111,19 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", - val, port, addr); + if (!CHIP_IS_E1(bp)) { + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); - REG_WR(bp, addr, val); + REG_WR(bp, addr, val); - val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + } } + if (CHIP_IS_E1(bp)) + REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); @@ -1212,10 +1217,26 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; u32 val = REG_RD(bp, addr); - val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | - HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | - HC_CONFIG_0_REG_INT_LINE_EN_0 | - HC_CONFIG_0_REG_ATTN_BIT_EN_0); + /* + * in E1 we must use only PCI configuration space to disable + * MSI/MSIX capablility + * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + */ + if (CHIP_IS_E1(bp)) { + /* Since IGU_PF_CONF_MSI_MSIX_EN still always on + * Use mask register to prevent from HC sending interrupts + * after we exit the function + */ + REG_WR(bp, HC_REG_INT_MASK + port*4, 0); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); @@ -2284,35 +2305,31 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { - if (FUNC_CONFIG(p->func_flgs)) { - struct tstorm_eth_function_common_config tcfg = {0}; - - /* tpa */ - if (p->func_flgs & FUNC_FLG_TPA) - tcfg.config_flags |= - TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; + struct tstorm_eth_function_common_config tcfg = {0}; + u16 rss_flgs; - /* set rss flags */ - if (p->func_flgs & FUNC_FLG_RSS) { - u16 rss_flgs = (p->rss->mode << - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT); + /* tpa */ + if (p->func_flgs & FUNC_FLG_TPA) + tcfg.config_flags |= + TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; - if (p->rss->cap & RSS_IPV4_CAP) - rss_flgs |= RSS_IPV4_CAP_MASK; - if (p->rss->cap & RSS_IPV4_TCP_CAP) - rss_flgs |= RSS_IPV4_TCP_CAP_MASK; - if (p->rss->cap & RSS_IPV6_CAP) - rss_flgs |= RSS_IPV6_CAP_MASK; - if (p->rss->cap & RSS_IPV6_TCP_CAP) - rss_flgs |= RSS_IPV6_TCP_CAP_MASK; + /* set rss flags */ + rss_flgs = (p->rss->mode << + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT); - tcfg.config_flags |= rss_flgs; - tcfg.rss_result_mask = p->rss->result_mask; + if (p->rss->cap & RSS_IPV4_CAP) + rss_flgs |= RSS_IPV4_CAP_MASK; + if (p->rss->cap & RSS_IPV4_TCP_CAP) + rss_flgs |= RSS_IPV4_TCP_CAP_MASK; + if (p->rss->cap & RSS_IPV6_CAP) + rss_flgs |= RSS_IPV6_CAP_MASK; + if (p->rss->cap & RSS_IPV6_TCP_CAP) + rss_flgs |= RSS_IPV6_TCP_CAP_MASK; - } + tcfg.config_flags |= rss_flgs; + tcfg.rss_result_mask = p->rss->result_mask; - storm_memset_func_cfg(bp, &tcfg, p->func_id); - } + storm_memset_func_cfg(bp, &tcfg, p->func_id); /* Enable the function in the FW */ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); @@ -2479,23 +2496,17 @@ void bnx2x_pf_init(struct bnx2x *bp) else flags |= FUNC_FLG_TPA; + /* function setup */ + /** * Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. - * - * if (is_eth_multi(bp)) - * flags |= FUNC_FLG_RSS; */ - flags |= FUNC_FLG_RSS; - - /* function setup */ - if (flags & FUNC_FLG_RSS) { - rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP | - RSS_IPV6_CAP | RSS_IPV6_TCP_CAP); - rss.mode = bp->multi_mode; - rss.result_mask = MULTI_MASK; - func_init.rss = &rss; - } + rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP | + RSS_IPV6_CAP | RSS_IPV6_TCP_CAP); + rss.mode = bp->multi_mode; + rss.result_mask = MULTI_MASK; + func_init.rss = &rss; func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); @@ -5446,7 +5457,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) struct bnx2x_ilt *ilt = BP_ILT(bp); u16 cdu_ilt_start; u32 addr, val; - int i; + u32 main_mem_base, main_mem_size, main_mem_prty_clr; + int i, main_mem_width; DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); @@ -5695,6 +5707,31 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func); bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func); + if (CHIP_IS_E1x(bp)) { + main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ + main_mem_base = HC_REG_MAIN_MEMORY + + BP_PORT(bp) * (main_mem_size * 4); + main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; + main_mem_width = 8; + + val = REG_RD(bp, main_mem_prty_clr); + if (val) + DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC " + "block during " + "function init (0x%x)!\n", val); + + /* Clear "false" parity errors in MSI-X table */ + for (i = main_mem_base; + i < main_mem_base + main_mem_size * 4; + i += main_mem_width) { + bnx2x_read_dmae(bp, i, main_mem_width / 4); + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), + i, main_mem_width / 4); + } + /* Clear HC parity attention */ + REG_RD(bp, main_mem_prty_clr); + } + bnx2x_phy_probe(&bp->link_params); return 0; diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 18a8628..1cefe48 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -800,9 +800,13 @@ #define HC_REG_HC_PRTY_MASK 0x1080a0 /* [R 3] Parity register #0 read */ #define HC_REG_HC_PRTY_STS 0x108094 -#define HC_REG_INT_MASK 0x108108 +/* [RC 3] Parity register #0 read clear */ +#define HC_REG_HC_PRTY_STS_CLR 0x108098 +#define HC_REG_INT_MASK 0x108108 #define HC_REG_LEADING_EDGE_0 0x108040 #define HC_REG_LEADING_EDGE_1 0x108048 +#define HC_REG_MAIN_MEMORY 0x108800 +#define HC_REG_MAIN_MEMORY_SIZE 152 #define HC_REG_P0_PROD_CONS 0x108200 #define HC_REG_P1_PROD_CONS 0x108400 #define HC_REG_PBA_COMMAND 0x108140 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7703d35..6b9a7bd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -76,6 +76,7 @@ #include <linux/if_vlan.h> #include <linux/if_bonding.h> #include <linux/jiffies.h> +#include <linux/preempt.h> #include <net/route.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@ -169,6 +170,10 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link /*----------------------------- Global variables ----------------------------*/ +#ifdef CONFIG_NET_POLL_CONTROLLER +cpumask_var_t netpoll_block_tx; +#endif + static const char * const version = DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; @@ -179,9 +184,6 @@ static int arp_ip_count; static int bond_mode = BOND_MODE_ROUNDROBIN; static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; static int lacp_fast; -#ifdef CONFIG_NET_POLL_CONTROLLER -static int disable_netpoll = 1; -#endif const struct bond_parm_tbl bond_lacp_tbl[] = { { "slow", AD_LACP_SLOW}, @@ -310,6 +312,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); + block_netpoll_tx(); write_lock_bh(&bond->lock); list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { @@ -344,6 +347,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id) out: write_unlock_bh(&bond->lock); + unblock_netpoll_tx(); return res; } @@ -449,11 +453,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { struct netpoll *np = bond->dev->npinfo->netpoll; slave_dev->npinfo = bond->dev->npinfo; - np->real_dev = np->dev = skb->dev; slave_dev->priv_flags |= IFF_IN_NETPOLL; - netpoll_send_skb(np, skb); + netpoll_send_skb_on_dev(np, skb, slave_dev); slave_dev->priv_flags &= ~IFF_IN_NETPOLL; - np->dev = bond->dev; } else #endif dev_queue_xmit(skb); @@ -1332,9 +1334,14 @@ static bool slaves_support_netpoll(struct net_device *bond_dev) static void bond_poll_controller(struct net_device *bond_dev) { - struct net_device *dev = bond_dev->npinfo->netpoll->real_dev; - if (dev != bond_dev) - netpoll_poll_dev(dev); + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; + int i; + + bond_for_each_slave(bond, slave, i) { + if (slave->dev && IS_UP(slave->dev)) + netpoll_poll_dev(slave->dev); + } } static void bond_netpoll_cleanup(struct net_device *bond_dev) @@ -1801,23 +1808,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_set_carrier(bond); #ifdef CONFIG_NET_POLL_CONTROLLER - /* - * Netpoll and bonding is broken, make sure it is not initialized - * until it is fixed. - */ - if (disable_netpoll) { + if (slaves_support_netpoll(bond_dev)) { + bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; + if (bond_dev->npinfo) + slave_dev->npinfo = bond_dev->npinfo; + } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; - } else { - if (slaves_support_netpoll(bond_dev)) { - bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - if (bond_dev->npinfo) - slave_dev->npinfo = bond_dev->npinfo; - } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { - bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; - pr_info("New slave device %s does not support netpoll\n", - slave_dev->name); - pr_info("Disabling netpoll support for %s\n", bond_dev->name); - } + pr_info("New slave device %s does not support netpoll\n", + slave_dev->name); + pr_info("Disabling netpoll support for %s\n", bond_dev->name); } #endif read_unlock(&bond->lock); @@ -1889,6 +1888,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) return -EINVAL; } + block_netpoll_tx(); netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE); write_lock_bh(&bond->lock); @@ -1898,6 +1898,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) pr_info("%s: %s not enslaved\n", bond_dev->name, slave_dev->name); write_unlock_bh(&bond->lock); + unblock_netpoll_tx(); return -EINVAL; } @@ -1991,6 +1992,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) } write_unlock_bh(&bond->lock); + unblock_netpoll_tx(); /* must do this from outside any spinlocks */ bond_destroy_slave_symlinks(bond_dev, slave_dev); @@ -2021,10 +2023,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) #ifdef CONFIG_NET_POLL_CONTROLLER read_lock_bh(&bond->lock); - /* Make sure netpoll over stays disabled until fixed. */ - if (!disable_netpoll) - if (slaves_support_netpoll(bond_dev)) - bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; + if (slaves_support_netpoll(bond_dev)) + bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; read_unlock_bh(&bond->lock); if (slave_dev->netdev_ops->ndo_netpoll_cleanup) slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); @@ -2180,7 +2180,6 @@ static int bond_release_all(struct net_device *bond_dev) out: write_unlock_bh(&bond->lock); - return 0; } @@ -2229,9 +2228,11 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi (old_active) && (new_active->link == BOND_LINK_UP) && IS_UP(new_active->dev)) { + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_change_active_slave(bond, new_active); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); } else res = -EINVAL; @@ -2463,9 +2464,11 @@ static void bond_miimon_commit(struct bonding *bond) do_failover: ASSERT_RTNL(); + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); } bond_set_carrier(bond); @@ -2908,11 +2911,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work) } if (do_failover) { + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); } re_arm: @@ -3071,9 +3076,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) do_failover: ASSERT_RTNL(); + block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); + unblock_netpoll_tx(); } bond_set_carrier(bond); @@ -4561,6 +4568,13 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bonding *bond = netdev_priv(dev); + /* + * If we risk deadlock from transmitting this in the + * netpoll path, tell netpoll to queue the frame for later tx + */ + if (is_netpoll_tx_blocked(dev)) + return NETDEV_TX_BUSY; + if (TX_QUEUE_OVERRIDE(bond->params.mode)) { if (!bond_slave_override(bond, skb)) return NETDEV_TX_OK; @@ -5283,6 +5297,13 @@ static int __init bonding_init(void) if (res) goto out; +#ifdef CONFIG_NET_POLL_CONTROLLER + if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) { + res = -ENOMEM; + goto out; + } +#endif + res = register_pernet_subsys(&bond_net_ops); if (res) goto out; @@ -5301,6 +5322,7 @@ static int __init bonding_init(void) if (res) goto err; + register_netdevice_notifier(&bond_netdev_notifier); register_inetaddr_notifier(&bond_inetaddr_notifier); bond_register_ipv6_notifier(); @@ -5310,6 +5332,9 @@ err: rtnl_link_unregister(&bond_link_ops); err_link: unregister_pernet_subsys(&bond_net_ops); +#ifdef CONFIG_NET_POLL_CONTROLLER + free_cpumask_var(netpoll_block_tx); +#endif goto out; } @@ -5324,6 +5349,10 @@ static void __exit bonding_exit(void) rtnl_link_unregister(&bond_link_ops); unregister_pernet_subsys(&bond_net_ops); + +#ifdef CONFIG_NET_POLL_CONTROLLER + free_cpumask_var(netpoll_block_tx); +#endif } module_init(bonding_init); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 01b4c3f..8fd0174 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1066,6 +1066,7 @@ static ssize_t bonding_store_primary(struct device *d, if (!rtnl_trylock()) return restart_syscall(); + block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); @@ -1101,6 +1102,7 @@ static ssize_t bonding_store_primary(struct device *d, out: write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); + unblock_netpoll_tx(); rtnl_unlock(); return count; @@ -1146,11 +1148,13 @@ static ssize_t bonding_store_primary_reselect(struct device *d, bond->dev->name, pri_reselect_tbl[new_value].modename, new_value); + block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); + unblock_netpoll_tx(); out: rtnl_unlock(); return ret; @@ -1232,6 +1236,8 @@ static ssize_t bonding_store_active_slave(struct device *d, if (!rtnl_trylock()) return restart_syscall(); + + block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); @@ -1288,6 +1294,8 @@ static ssize_t bonding_store_active_slave(struct device *d, out: write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); + unblock_netpoll_tx(); + rtnl_unlock(); return count; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index c15f213..2c12a5f 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -19,6 +19,7 @@ #include <linux/proc_fs.h> #include <linux/if_bonding.h> #include <linux/kobject.h> +#include <linux/cpumask.h> #include <linux/in6.h> #include "bond_3ad.h" #include "bond_alb.h" @@ -117,6 +118,35 @@ bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) +#ifdef CONFIG_NET_POLL_CONTROLLER +extern cpumask_var_t netpoll_block_tx; + +static inline void block_netpoll_tx(void) +{ + preempt_disable(); + BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(), + netpoll_block_tx)); +} + +static inline void unblock_netpoll_tx(void) +{ + BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(), + netpoll_block_tx)); + preempt_enable(); +} + +static inline int is_netpoll_tx_blocked(struct net_device *dev) +{ + if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) + return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx); + return 0; +} +#else +#define block_netpoll_tx() +#define unblock_netpoll_tx() +#define is_netpoll_tx_blocked(dev) (0) +#endif + struct bond_params { int mode; int xmit_policy; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index b11a0cb..c664be2 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -38,14 +38,14 @@ * static struct mcp251x_platform_data mcp251x_info = { * .oscillator_frequency = 8000000, * .board_specific_setup = &mcp251x_setup, - * .model = CAN_MCP251X_MCP2510, * .power_enable = mcp251x_power_enable, * .transceiver_enable = NULL, * }; * * static struct spi_board_info spi_board_info[] = { * { - * .modalias = "mcp251x", + * .modalias = "mcp2510", + * // or "mcp2515" depending on your controller * .platform_data = &mcp251x_info, * .irq = IRQ_EINT13, * .max_speed_hz = 2*1000*1000, @@ -125,6 +125,8 @@ # define CANINTF_TX0IF 0x04 # define CANINTF_RX1IF 0x02 # define CANINTF_RX0IF 0x01 +# define CANINTF_ERR_TX \ + (CANINTF_ERRIF | CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF) #define EFLG 0x2d # define EFLG_EWARN 0x01 # define EFLG_RXWAR 0x02 @@ -222,10 +224,16 @@ static struct can_bittiming_const mcp251x_bittiming_const = { .brp_inc = 1, }; +enum mcp251x_model { + CAN_MCP251X_MCP2510 = 0x2510, + CAN_MCP251X_MCP2515 = 0x2515, +}; + struct mcp251x_priv { struct can_priv can; struct net_device *net; struct spi_device *spi; + enum mcp251x_model model; struct mutex mcp_lock; /* SPI device lock */ @@ -250,6 +258,16 @@ struct mcp251x_priv { int restart_tx; }; +#define MCP251X_IS(_model) \ +static inline int mcp251x_is_##_model(struct spi_device *spi) \ +{ \ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \ + return priv->model == CAN_MCP251X_MCP##_model; \ +} + +MCP251X_IS(2510); +MCP251X_IS(2515); + static void mcp251x_clean(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); @@ -319,6 +337,20 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) return val; } +static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, + uint8_t *v1, uint8_t *v2) +{ + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); + + priv->spi_tx_buf[0] = INSTRUCTION_READ; + priv->spi_tx_buf[1] = reg; + + mcp251x_spi_trans(spi, 4); + + *v1 = priv->spi_rx_buf[2]; + *v2 = priv->spi_rx_buf[3]; +} + static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) { struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); @@ -346,10 +378,9 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg, static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, int len, int tx_buf_idx) { - struct mcp251x_platform_data *pdata = spi->dev.platform_data; struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); - if (pdata->model == CAN_MCP251X_MCP2510) { + if (mcp251x_is_2510(spi)) { int i; for (i = 1; i < TXBDAT_OFF + len; i++) @@ -392,9 +423,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, int buf_idx) { struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); - struct mcp251x_platform_data *pdata = spi->dev.platform_data; - if (pdata->model == CAN_MCP251X_MCP2510) { + if (mcp251x_is_2510(spi)) { int i, len; for (i = 1; i < RXBDAT_OFF; i++) @@ -451,7 +481,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) priv->net->stats.rx_packets++; priv->net->stats.rx_bytes += frame->can_dlc; - netif_rx(skb); + netif_rx_ni(skb); } static void mcp251x_hw_sleep(struct spi_device *spi) @@ -676,7 +706,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1) if (skb) { frame->can_id = can_id; frame->data[1] = data1; - netif_rx(skb); + netif_rx_ni(skb); } else { dev_err(&net->dev, "cannot allocate error skb\n"); @@ -754,24 +784,39 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) mutex_lock(&priv->mcp_lock); while (!priv->force_quit) { enum can_state new_state; - u8 intf = mcp251x_read_reg(spi, CANINTF); - u8 eflag; + u8 intf, eflag; + u8 clear_intf = 0; int can_id = 0, data1 = 0; + mcp251x_read_2regs(spi, CANINTF, &intf, &eflag); + + /* receive buffer 0 */ if (intf & CANINTF_RX0IF) { mcp251x_hw_rx(spi, 0); - /* Free one buffer ASAP */ - mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF, - 0x00); + /* + * Free one buffer ASAP + * (The MCP2515 does this automatically.) + */ + if (mcp251x_is_2510(spi)) + mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); } - if (intf & CANINTF_RX1IF) + /* receive buffer 1 */ + if (intf & CANINTF_RX1IF) { mcp251x_hw_rx(spi, 1); + /* the MCP2515 does this automatically */ + if (mcp251x_is_2510(spi)) + clear_intf |= CANINTF_RX1IF; + } - mcp251x_write_bits(spi, CANINTF, intf, 0x00); + /* any error or tx interrupt we need to clear? */ + if (intf & CANINTF_ERR_TX) + clear_intf |= intf & CANINTF_ERR_TX; + if (clear_intf) + mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00); - eflag = mcp251x_read_reg(spi, EFLG); - mcp251x_write_reg(spi, EFLG, 0x00); + if (eflag) + mcp251x_write_bits(spi, EFLG, eflag, 0x00); /* Update can state */ if (eflag & EFLG_TXBO) { @@ -816,10 +861,14 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (intf & CANINTF_ERRIF) { /* Handle overflow counters */ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) { - if (eflag & EFLG_RX0OVR) + if (eflag & EFLG_RX0OVR) { net->stats.rx_over_errors++; - if (eflag & EFLG_RX1OVR) + net->stats.rx_errors++; + } + if (eflag & EFLG_RX1OVR) { net->stats.rx_over_errors++; + net->stats.rx_errors++; + } can_id |= CAN_ERR_CRTL; data1 |= CAN_ERR_CRTL_RX_OVERFLOW; } @@ -921,16 +970,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi) struct net_device *net; struct mcp251x_priv *priv; struct mcp251x_platform_data *pdata = spi->dev.platform_data; - int model = spi_get_device_id(spi)->driver_data; int ret = -ENODEV; if (!pdata) /* Platform data is required for osc freq */ goto error_out; - if (model) - pdata->model = model; - /* Allocate can/net device */ net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); if (!net) { @@ -947,6 +992,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi) priv->can.clock.freq = pdata->oscillator_frequency / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; + priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; dev_set_drvdata(&spi->dev, priv); @@ -1120,8 +1166,7 @@ static int mcp251x_can_resume(struct spi_device *spi) #define mcp251x_can_resume NULL #endif -static struct spi_device_id mcp251x_id_table[] = { - { "mcp251x", 0 /* Use pdata.model */ }, +static const struct spi_device_id mcp251x_id_table[] = { { "mcp2510", CAN_MCP251X_MCP2510 }, { "mcp2515", CAN_MCP251X_MCP2515 }, { }, diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h index 5c7bde7..873cb7d 100644 --- a/drivers/net/cxgb4vf/t4vf_common.h +++ b/drivers/net/cxgb4vf/t4vf_common.h @@ -132,15 +132,15 @@ struct rss_params { unsigned int mode; /* RSS mode */ union { struct { - int synmapen:1; /* SYN Map Enable */ - int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ - int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ - int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ - int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ - int ofdmapen:1; /* Offload Map Enable */ - int tnlmapen:1; /* Tunnel Map Enable */ - int tnlalllookup:1; /* Tunnel All Lookup */ - int hashtoeplitz:1; /* use Toeplitz hash */ + unsigned int synmapen:1; /* SYN Map Enable */ + unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ + unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ + unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ + unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ + unsigned int ofdmapen:1; /* Offload Map Enable */ + unsigned int tnlmapen:1; /* Tunnel Map Enable */ + unsigned int tnlalllookup:1; /* Tunnel All Lookup */ + unsigned int hashtoeplitz:1; /* use Toeplitz hash */ } basicvirtual; } u; }; @@ -151,10 +151,10 @@ struct rss_params { union rss_vi_config { struct { u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ - int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ - int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ - int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ - int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ + unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ + unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ + unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ + unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ int udpen; /* hash 4-tuple UDP ingress packets */ } basicvirtual; }; diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 7c07575..9d8a20b 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -27,7 +27,7 @@ #undef DEBUG /* function for reading internal MAC register */ -u16 dnet_readw_mac(struct dnet *bp, u16 reg) +static u16 dnet_readw_mac(struct dnet *bp, u16 reg) { u16 data_read; @@ -46,7 +46,7 @@ u16 dnet_readw_mac(struct dnet *bp, u16 reg) } /* function for writing internal MAC register */ -void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) +static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) { /* load data to write */ dnet_writel(bp, val, MACREG_DATA); @@ -63,11 +63,11 @@ static void __dnet_set_hwaddr(struct dnet *bp) { u16 tmp; - tmp = cpu_to_be16(*((u16 *) bp->dev->dev_addr)); + tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); - tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 2))); + tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); - tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 4))); + tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); } @@ -89,11 +89,11 @@ static void __devinit dnet_get_hwaddr(struct dnet *bp) * Mac_addr[15:0]). */ tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); - *((u16 *) addr) = be16_to_cpu(tmp); + *((__be16 *)addr) = cpu_to_be16(tmp); tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); - *((u16 *) (addr + 2)) = be16_to_cpu(tmp); + *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); - *((u16 *) (addr + 4)) = be16_to_cpu(tmp); + *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); if (is_valid_ether_addr(addr)) memcpy(bp->dev->dev_addr, addr, sizeof(addr)); @@ -361,7 +361,7 @@ err_out: } /* For Neptune board: LINK1000 as Link LED and TX as activity LED */ -int dnet_phy_marvell_fixup(struct phy_device *phydev) +static int dnet_phy_marvell_fixup(struct phy_device *phydev) { return phy_write(phydev, 0x18, 0x4148); } diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index b7f15b3..8984d16 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1717,13 +1717,6 @@ static void e1000_diag_test(struct net_device *netdev, e_info("offline testing starting\n"); - /* - * Link test performed before hardware reset so autoneg doesn't - * interfere with test result - */ - if (e1000_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - if (if_running) /* indicate we're in test mode */ dev_close(netdev); @@ -1747,15 +1740,19 @@ static void e1000_diag_test(struct net_device *netdev, if (e1000_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + /* restore speed, duplex, autoneg settings */ adapter->hw.phy.autoneg_advertised = autoneg_advertised; adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; adapter->hw.mac.autoneg = autoneg; - - /* force this routine to wait until autoneg complete/timeout */ - adapter->hw.phy.autoneg_wait_to_complete = 1; e1000e_reset(adapter); - adapter->hw.phy.autoneg_wait_to_complete = 0; clear_bit(__E1000_TESTING, &adapter->state); if (if_running) diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 44e0ff1..edab9c4 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -159,6 +159,7 @@ struct igb_tx_queue_stats { u64 packets; u64 bytes; u64 restart_queue; + u64 restart_queue2; }; struct igb_rx_queue_stats { @@ -210,11 +211,14 @@ struct igb_ring { /* TX */ struct { struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; bool detect_tx_hung; }; /* RX */ struct { struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; u32 rx_buffer_len; }; }; @@ -288,6 +292,9 @@ struct igb_adapter { struct timecompare compare; struct hwtstamp_config hwtstamp_config; + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + /* structs defined in e1000_hw.h */ struct e1000_hw hw; struct e1000_hw_stats stats; @@ -357,7 +364,7 @@ extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); extern void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_buffer *); extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); -extern void igb_update_stats(struct igb_adapter *); +extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); extern bool igb_has_link(struct igb_adapter *adapter); extern void igb_set_ethtool_ops(struct net_device *); extern void igb_power_up_link(struct igb_adapter *); diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 26bf6a1..a70e16b 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -90,8 +90,8 @@ static const struct igb_stats igb_gstrings_stats[] = { #define IGB_NETDEV_STAT(_net_stat) { \ .stat_string = __stringify(_net_stat), \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ - .stat_offset = offsetof(struct net_device_stats, _net_stat) \ + .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ } static const struct igb_stats igb_gstrings_net_stats[] = { IGB_NETDEV_STAT(rx_errors), @@ -111,8 +111,9 @@ static const struct igb_stats igb_gstrings_net_stats[] = { (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) #define IGB_RX_QUEUE_STATS_LEN \ (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) -#define IGB_TX_QUEUE_STATS_LEN \ - (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + #define IGB_QUEUE_STATS_LEN \ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ IGB_RX_QUEUE_STATS_LEN) + \ @@ -2070,12 +2071,14 @@ static void igb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); - struct net_device_stats *net_stats = &netdev->stats; - u64 *queue_stat; - int i, j, k; + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; char *p; - igb_update_stats(adapter); + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, net_stats); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { p = (char *)adapter + igb_gstrings_stats[i].stat_offset; @@ -2088,15 +2091,36 @@ static void igb_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } for (j = 0; j < adapter->num_tx_queues; j++) { - queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; - for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) - data[i] = queue_stat[k]; + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; } for (j = 0; j < adapter->num_rx_queues; j++) { - queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; - for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) - data[i] = queue_stat[k]; + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; } + spin_unlock(&adapter->stats64_lock); } static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 5b04eff..b8dccc0 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -96,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); -void igb_update_stats(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); static int igb_sw_init(struct igb_adapter *); @@ -113,7 +112,8 @@ static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); -static struct net_device_stats *igb_get_stats(struct net_device *); +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter); @@ -1536,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter) netif_carrier_off(netdev); /* record the stats before reset*/ - igb_update_stats(adapter); + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + spin_unlock(&adapter->stats64_lock); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -1689,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, .ndo_start_xmit = igb_xmit_frame_adv, - .ndo_get_stats = igb_get_stats, + .ndo_get_stats64 = igb_get_stats64, .ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_multicast_list = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, @@ -2276,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV if (hw->mac.type == e1000_82576) adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; @@ -3483,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work) } } - igb_update_stats(adapter); + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + spin_unlock(&adapter->stats64_lock); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; @@ -3550,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) int new_val = q_vector->itr_val; int avg_wire_size = 0; struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *ring; + unsigned int packets; /* For non-gigabit speeds, just fix the interrupt rate at 4000 * ints/sec - ITR timer value of 120 ticks. @@ -3559,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) goto set_itr_val; } - if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { - struct igb_ring *ring = q_vector->rx_ring; - avg_wire_size = ring->total_bytes / ring->total_packets; + ring = q_vector->rx_ring; + if (ring) { + packets = ACCESS_ONCE(ring->total_packets); + + if (packets) + avg_wire_size = ring->total_bytes / packets; } - if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { - struct igb_ring *ring = q_vector->tx_ring; - avg_wire_size = max_t(u32, avg_wire_size, - (ring->total_bytes / - ring->total_packets)); + ring = q_vector->tx_ring; + if (ring) { + packets = ACCESS_ONCE(ring->total_packets); + + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + ring->total_bytes / packets); } /* if avg_wire_size isn't set no work was done */ @@ -4077,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) /* A reprieve! */ netif_wake_subqueue(netdev, tx_ring->queue_index); - tx_ring->tx_stats.restart_queue++; + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + return 0; } @@ -4214,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work) } /** - * igb_get_stats - Get System Network Statistics + * igb_get_stats64 - Get System Network Statistics * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. **/ -static struct net_device_stats *igb_get_stats(struct net_device *netdev) +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { - /* only return the current stats */ - return &netdev->stats; + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); + + return stats; } /** @@ -4305,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) * @adapter: board private structure **/ -void igb_update_stats(struct igb_adapter *adapter) +void igb_update_stats(struct igb_adapter *adapter, + struct rtnl_link_stats64 *net_stats) { - struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF @@ -4331,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; struct igb_ring *ring = adapter->rx_ring[i]; + ring->rx_stats.drops += rqdpc_tmp; net_stats->rx_fifo_errors += rqdpc_tmp; - bytes += ring->rx_stats.bytes; - packets += ring->rx_stats.packets; + + do { + start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; } net_stats->rx_bytes = bytes; @@ -4344,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter) packets = 0; for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; - bytes += ring->tx_stats.bytes; - packets += ring->tx_stats.packets; + do { + start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; @@ -5397,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !(test_bit(__IGB_DOWN, &adapter->state))) { netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); } } @@ -5437,8 +5476,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; + u64_stats_update_begin(&tx_ring->tx_syncp); tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); return count < tx_ring->count; } @@ -5480,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring, * packets, (aka let the stack check the crc32c) */ if ((skb->len == 60) && - (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) + (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { + u64_stats_update_begin(&ring->rx_syncp); ring->rx_stats.csum_err++; - + u64_stats_update_end(&ring->rx_syncp); + } /* let the stack verify checksum errors */ return; } @@ -5669,8 +5712,10 @@ next_desc: rx_ring->total_packets += total_packets; rx_ring->total_bytes += total_bytes; + u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); return cleaned; } @@ -5698,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { if (!buffer_info->page) { buffer_info->page = netdev_alloc_page(netdev); - if (!buffer_info->page) { + if (unlikely(!buffer_info->page)) { + u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } buffer_info->page_offset = 0; @@ -5714,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) if (dma_mapping_error(rx_ring->dev, buffer_info->page_dma)) { buffer_info->page_dma = 0; + u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } } @@ -5722,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) skb = buffer_info->skb; if (!skb) { skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (!skb) { + if (unlikely(!skb)) { + u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } @@ -5737,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) if (dma_mapping_error(rx_ring->dev, buffer_info->dma)) { buffer_info->dma = 0; + u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.alloc_failed++; + u64_stats_update_end(&rx_ring->rx_syncp); goto no_buffers; } } diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 2655013..6693323 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -41,14 +41,12 @@ #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> -#include <linux/pm_qos_params.h> #include "igbvf.h" #define DRV_VERSION "1.0.0-k0" char igbvf_driver_name[] = "igbvf"; const char igbvf_driver_version[] = DRV_VERSION; -static struct pm_qos_request_list igbvf_driver_pm_qos_req; static const char igbvf_driver_string[] = "Intel(R) Virtual Function Network Driver"; static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; @@ -2904,8 +2902,6 @@ static int __init igbvf_init_module(void) printk(KERN_INFO "%s\n", igbvf_copyright); ret = pci_register_driver(&igbvf_driver); - pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); return ret; } @@ -2920,7 +2916,6 @@ module_init(igbvf_init_module); static void __exit igbvf_exit_module(void) { pci_unregister_driver(&igbvf_driver); - pm_qos_remove_request(&igbvf_driver_pm_qos_req); } module_exit(igbvf_exit_module); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ca142c4..94255f0 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -678,7 +678,14 @@ static int netconsole_netdev_event(struct notifier_block *this, strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); break; case NETDEV_UNREGISTER: - netpoll_cleanup(&nt->np); + /* + * rtnl_lock already held + */ + if (nt->np.dev) { + __netpoll_cleanup(&nt->np); + dev_put(nt->np.dev); + nt->np.dev = NULL; + } /* Fall through */ case NETDEV_GOING_DOWN: case NETDEV_BONDING_DESLAVE: diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 6dca357..92f89af 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -175,7 +175,10 @@ #define MAX_NUM_CARDS 4 #define MAX_BUFFERS_PER_CMD 32 -#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) #define NX_MAX_TX_TIMEOUTS 2 /* diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 29d7b93d0..4b4ac71 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -598,8 +598,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); - __netif_tx_unlock_bh(tx_ring->txq); - return -EBUSY; + smp_mb(); + if (netxen_tx_avail(tx_ring) > nr_desc) { + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } } do { @@ -1816,14 +1822,14 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) if (netxen_rom_fast_read(adapter, offset, &board_type)) return -EIO; - adapter->ahw.board_type = board_type; - if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); if ((gpio & 0x8000) == 0) board_type = NETXEN_BRDTYPE_P3_10G_TP; } + adapter->ahw.board_type = board_type; + switch (board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: adapter->ahw.port_type = NETXEN_NIC_GBE; diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index a2d805a..95fe552 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1763,14 +1763,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) smp_mb(); - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - __netif_tx_lock(tx_ring->txq, smp_processor_id()); - if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) { + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_wake_queue(netdev); - adapter->tx_timeo_cnt = 0; - } - __netif_tx_unlock(tx_ring->txq); - } + adapter->tx_timeo_cnt = 0; } /* * If everything is freed up to consumer then check if the ring is full diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 2c6ceeb..6f11169 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -125,11 +125,6 @@ netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, struct nx_host_tx_ring *tx_ring) { NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); - - if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) { - netif_stop_queue(adapter->netdev); - smp_mb(); - } } static uint32_t crb_cmd_consumer[4] = { @@ -1209,7 +1204,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter, adapter->max_mc_count = 16; netdev->netdev_ops = &netxen_netdev_ops; - netdev->watchdog_timeo = 2*HZ; + netdev->watchdog_timeo = 5*HZ; netxen_nic_change_mtu(netdev, netdev->mtu); @@ -1254,6 +1249,28 @@ netxen_setup_netdev(struct netxen_adapter *adapter, return 0; } +#ifdef CONFIG_PCIEAER +static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *root = pdev->bus->self; + u32 aer_pos; + + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && + adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) + return; + + if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) + return; + + aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); + if (!aer_pos) + return; + + pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); +} +#endif + static int __devinit netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1322,6 +1339,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iounmap; } +#ifdef CONFIG_PCIEAER + netxen_mask_aer_correctable(adapter); +#endif + /* Mezz cards have PCI function 0,2,3 enabled */ switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: @@ -1825,9 +1846,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; - if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { + if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); - return NETDEV_TX_BUSY; + smp_mb(); + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else + return NETDEV_TX_BUSY; } producer = tx_ring->producer; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index c0437fd..781e368 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -7090,24 +7090,20 @@ static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, struct ethtool_rx_flow_spec *fsp) { + u32 tmp; + u16 prt; - fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >> - TCAM_V4KEY3_SADDR_SHIFT; - fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >> - TCAM_V4KEY3_DADDR_SHIFT; - fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> - TCAM_V4KEY3_SADDR_SHIFT; - fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> - TCAM_V4KEY3_DADDR_SHIFT; - - fsp->h_u.tcp_ip4_spec.ip4src = - cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src); - fsp->m_u.tcp_ip4_spec.ip4src = - cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src); - fsp->h_u.tcp_ip4_spec.ip4dst = - cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst); - fsp->m_u.tcp_ip4_spec.ip4dst = - cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst); + tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; + fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); + + tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; + fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); + + tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; + fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); + + tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; + fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; @@ -7118,54 +7114,40 @@ static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: - fsp->h_u.tcp_ip4_spec.psrc = - ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> - TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; - fsp->h_u.tcp_ip4_spec.pdst = - ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> - TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; - fsp->m_u.tcp_ip4_spec.psrc = - ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> - TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; - fsp->m_u.tcp_ip4_spec.pdst = - ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> - TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; + fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); + + prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); - fsp->h_u.tcp_ip4_spec.psrc = - cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc); - fsp->h_u.tcp_ip4_spec.pdst = - cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst); - fsp->m_u.tcp_ip4_spec.psrc = - cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc); - fsp->m_u.tcp_ip4_spec.pdst = - cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst); + prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; + fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); + + prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); break; case AH_V4_FLOW: case ESP_V4_FLOW: - fsp->h_u.ah_ip4_spec.spi = - (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> - TCAM_V4KEY2_PORT_SPI_SHIFT; - fsp->m_u.ah_ip4_spec.spi = - (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); - fsp->h_u.ah_ip4_spec.spi = - cpu_to_be32(fsp->h_u.ah_ip4_spec.spi); - fsp->m_u.ah_ip4_spec.spi = - cpu_to_be32(fsp->m_u.ah_ip4_spec.spi); + tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); break; case IP_USER_FLOW: - fsp->h_u.usr_ip4_spec.l4_4_bytes = - (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> - TCAM_V4KEY2_PORT_SPI_SHIFT; - fsp->m_u.usr_ip4_spec.l4_4_bytes = - (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); - fsp->h_u.usr_ip4_spec.l4_4_bytes = - cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes); - fsp->m_u.usr_ip4_spec.l4_4_bytes = - cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes); + tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); fsp->h_u.usr_ip4_spec.proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 3bbd0aa..84134c7 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -772,7 +772,7 @@ static int ns83820_setup_rx(struct net_device *ndev) phy_intr(ndev); /* Okay, let it rip */ - spin_lock_irq(&dev->misc_lock); + spin_lock(&dev->misc_lock); dev->IMR_cache |= ISR_PHY; dev->IMR_cache |= ISR_RXRCMP; //dev->IMR_cache |= ISR_RXERR; diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c index e06c6ae..c8cc32c 100644 --- a/drivers/net/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c @@ -113,9 +113,10 @@ static int pch_gbe_set_settings(struct net_device *netdev, pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); - if (ecmd->speed == -1) + if (ecmd->speed == USHRT_MAX) { ecmd->speed = SPEED_1000; ecmd->duplex = DUPLEX_FULL; + } ret = mii_ethtool_sset(&adapter->mii, ecmd); if (ret) { pr_err("Error: mii_ethtool_sset\n"); diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index e44644f..cf4b49d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -2394,7 +2394,7 @@ err_disable_device: return ret; } -static const struct pci_device_id pch_gbe_pcidev_id[] = { +static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { {.vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, .subvendor = PCI_ANY_ID, diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 2568aa6..25e93a5 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c @@ -78,7 +78,25 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = { }; +static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx unicast frames", + "rx multicast frames", + "rx broadcast frames", + "rx dropped frames", + "rx errors", + "rx local frames", + "rx numbytes", + "tx unicast frames", + "tx multicast frames", + "tx broadcast frames", + "tx dropped frames", + "tx errors", + "tx local frames", + "tx numbytes", +}; + #define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) +#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { "Register_Test_on_offline", @@ -625,10 +643,13 @@ static int qlcnic_reg_test(struct net_device *dev) static int qlcnic_get_sset_count(struct net_device *dev, int sset) { + struct qlcnic_adapter *adapter = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: return QLCNIC_TEST_LEN; case ETH_SS_STATS: + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; return QLCNIC_STATS_LEN; default: return -EOPNOTSUPP; @@ -795,7 +816,8 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, static void qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) { - int index; + struct qlcnic_adapter *adapter = netdev_priv(dev); + int index, i; switch (stringset) { case ETH_SS_TEST: @@ -808,16 +830,43 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) qlcnic_gstrings_stats[index].stat_string, ETH_GSTRING_LEN); } - break; + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_device_gstrings_stats[i], + ETH_GSTRING_LEN); + } } } +#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \ + (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1) + +static void +qlcnic_fill_device_stats(int *index, u64 *data, + struct __qlcnic_esw_statistics *stats) +{ + int ind = *index; + + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); + data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); + + *index = ind; +} + static void qlcnic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { struct qlcnic_adapter *adapter = netdev_priv(dev); - int index; + struct qlcnic_esw_statistics port_stats; + int index, ret; for (index = 0; index < QLCNIC_STATS_LEN; index++) { char *p = @@ -827,6 +876,24 @@ qlcnic_get_ethtool_stats(struct net_device *dev, (qlcnic_gstrings_stats[index].sizeof_stat == sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); } + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + + memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); + ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func, + QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); + if (ret) + return; + + qlcnic_fill_device_stats(&index, data, &port_stats.rx); + + ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func, + QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); + if (ret) + return; + + qlcnic_fill_device_stats(&index, data, &port_stats.tx); } static int qlcnic_set_tx_csum(struct net_device *dev, u32 data) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bc669a4..1760533 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -187,12 +187,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); -/* - * we set our copybreak very high so that we don't have - * to allocate 16k frames all the time (see note in - * rtl8169_open() - */ -static int rx_copybreak = 16383; +static int rx_buf_sz = 16383; static int use_dac; static struct { u32 msg_enable; @@ -484,10 +479,8 @@ struct rtl8169_private { struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; dma_addr_t RxPhyAddr; - struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - unsigned align; - unsigned rx_buf_sz; struct timer_list timer; u16 cp_cmd; u16 intr_event; @@ -515,8 +508,6 @@ struct rtl8169_private { MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); -module_param(rx_copybreak, int, 0); -MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); module_param(use_dac, int, 0); MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); @@ -3196,7 +3187,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= NETIF_F_GRO; tp->intr_mask = 0xffff; - tp->align = cfg->align; tp->hw_start = cfg->hw_start; tp->intr_event = cfg->intr_event; tp->napi_event = cfg->napi_event; @@ -3266,18 +3256,6 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, - unsigned int mtu) -{ - unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - - if (max_frame != 16383) - printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " - "NIC may lead to frame reception errors!\n"); - - tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; -} - static int rtl8169_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -3287,18 +3265,6 @@ static int rtl8169_open(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); /* - * Note that we use a magic value here, its wierd I know - * its done because, some subset of rtl8169 hardware suffers from - * a problem in which frames received that are longer than - * the size set in RxMaxSize register return garbage sizes - * when received. To avoid this we need to turn off filtering, - * which is done by setting a value of 16383 in the RxMaxSize register - * and allocating 16k frames to handle the largest possible rx value - * thats what the magic math below does. - */ - rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN); - - /* * Rx and Tx desscriptors needs 256 bytes alignment. * dma_alloc_coherent provides more. */ @@ -3474,7 +3440,7 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); + rtl_set_rx_max_size(ioaddr, rx_buf_sz); if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || (tp->mac_version == RTL_GIGA_MAC_VER_02) || @@ -3735,7 +3701,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); + rtl_set_rx_max_size(ioaddr, rx_buf_sz); tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; @@ -3915,7 +3881,7 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); + rtl_set_rx_max_size(ioaddr, rx_buf_sz); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; @@ -3956,8 +3922,6 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) rtl8169_down(dev); - rtl8169_set_rxbufsize(tp, dev->mtu); - ret = rtl8169_init_ring(dev); if (ret < 0) goto out; @@ -3978,15 +3942,15 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); } -static void rtl8169_free_rx_skb(struct rtl8169_private *tp, - struct sk_buff **sk_buff, struct RxDesc *desc) +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) { struct pci_dev *pdev = tp->pci_dev; - dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), rx_buf_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb(*sk_buff); - *sk_buff = NULL; + kfree(*data_buff); + *data_buff = NULL; rtl8169_make_unusable_by_asic(desc); } @@ -4005,33 +3969,34 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, rtl8169_mark_to_asic(desc, rx_buf_sz); } -static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, +static inline void *rtl8169_align(void *data) +{ + return (void *)ALIGN((long)data, 16); +} + +static struct sk_buff *rtl8169_alloc_rx_data(struct pci_dev *pdev, struct net_device *dev, - struct RxDesc *desc, int rx_buf_sz, - unsigned int align, gfp_t gfp) + struct RxDesc *desc) { - struct sk_buff *skb; + void *data; dma_addr_t mapping; - unsigned int pad; + int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; - pad = align ? align : NET_IP_ALIGN; + data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!data) + return NULL; - skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp); - if (!skb) - goto err_out; - - skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); - - mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz, + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + mapping = dma_map_single(&pdev->dev, rtl8169_align(data), rx_buf_sz, PCI_DMA_FROMDEVICE); rtl8169_map_to_asic(desc, mapping, rx_buf_sz); -out: - return skb; - -err_out: - rtl8169_make_unusable_by_asic(desc); - goto out; + return data; } static void rtl8169_rx_clear(struct rtl8169_private *tp) @@ -4039,8 +4004,8 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) unsigned int i; for (i = 0; i < NUM_RX_DESC; i++) { - if (tp->Rx_skbuff[i]) { - rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i, + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, tp->RxDescArray + i); } } @@ -4052,21 +4017,21 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, u32 cur; for (cur = start; end - cur != 0; cur++) { - struct sk_buff *skb; + void *data; unsigned int i = cur % NUM_RX_DESC; WARN_ON((s32)(end - cur) < 0); - if (tp->Rx_skbuff[i]) + if (tp->Rx_databuff[i]) continue; - skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, - tp->RxDescArray + i, - tp->rx_buf_sz, tp->align, gfp); - if (!skb) + data = rtl8169_alloc_rx_data(tp->pci_dev, dev, + tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); break; - - tp->Rx_skbuff[i] = skb; + } + tp->Rx_databuff[i] = data; } return cur - start; } @@ -4088,7 +4053,7 @@ static int rtl8169_init_ring(struct net_device *dev) rtl8169_init_ring_indexes(tp); memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); - memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); + memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC) goto err_out; @@ -4473,27 +4438,23 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) skb_checksum_none_assert(skb); } -static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, - struct rtl8169_private *tp, int pkt_size, - dma_addr_t addr) +static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) { struct sk_buff *skb; - bool done = false; - - if (pkt_size >= rx_copybreak) - goto out; - - skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); - if (!skb) - goto out; + data = rtl8169_align(data); dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size, PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); - *sk_buff = skb; - done = true; -out: - return done; + prefetch(data); + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); + if (skb) + memcpy(skb->data, data, pkt_size); + dma_sync_single_for_device(&tp->pci_dev->dev, addr, pkt_size, + PCI_DMA_FROMDEVICE); + return skb; } /* @@ -4508,7 +4469,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, void __iomem *ioaddr, u32 budget) { unsigned int cur_rx, rx_left; - unsigned int delta, count; + unsigned int count; int polling = (budget != ~(u32)0) ? 1 : 0; cur_rx = tp->cur_rx; @@ -4537,12 +4498,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, rtl8169_schedule_work(dev, rtl8169_reset_task); dev->stats.rx_fifo_errors++; } - rtl8169_mark_to_asic(desc, tp->rx_buf_sz); + rtl8169_mark_to_asic(desc, rx_buf_sz); } else { - struct sk_buff *skb = tp->Rx_skbuff[entry]; + struct sk_buff *skb; dma_addr_t addr = le64_to_cpu(desc->addr); int pkt_size = (status & 0x00001FFF) - 4; - struct pci_dev *pdev = tp->pci_dev; /* * The driver does not support incoming fragmented @@ -4552,18 +4512,16 @@ static int rtl8169_rx_interrupt(struct net_device *dev, if (unlikely(rtl8169_fragmented_frame(status))) { dev->stats.rx_dropped++; dev->stats.rx_length_errors++; - rtl8169_mark_to_asic(desc, tp->rx_buf_sz); + rtl8169_mark_to_asic(desc, rx_buf_sz); continue; } - if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { - dma_sync_single_for_device(&pdev->dev, addr, - pkt_size, PCI_DMA_FROMDEVICE); - rtl8169_mark_to_asic(desc, tp->rx_buf_sz); - } else { - dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz, - PCI_DMA_FROMDEVICE); - tp->Rx_skbuff[entry] = NULL; + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + rtl8169_mark_to_asic(desc, rx_buf_sz); + if (!skb) { + dev->stats.rx_dropped++; + continue; } rtl8169_rx_csum(skb, status); @@ -4592,20 +4550,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; - delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC); - if (!delta && count) - netif_info(tp, intr, dev, "no Rx buffer allocated\n"); - tp->dirty_rx += delta; - - /* - * FIXME: until there is periodic timer to try and refill the ring, - * a temporary shortage may definitely kill the Rx process. - * - disable the asic to try and avoid an overflow and kick it again - * after refill ? - * - how do others driver handle this condition (Uh oh...). - */ - if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) - netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); + tp->dirty_rx += count; return count; } diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h index dec7ce4..375ea19 100644 --- a/drivers/net/stmmac/common.h +++ b/drivers/net/stmmac/common.h @@ -235,9 +235,9 @@ struct mii_regs { }; struct mac_device_info { - struct stmmac_ops *mac; - struct stmmac_desc_ops *desc; - struct stmmac_dma_ops *dma; + const struct stmmac_ops *mac; + const struct stmmac_desc_ops *desc; + const struct stmmac_dma_ops *dma; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; }; diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h index 97956cbf1..7c6d857 100644 --- a/drivers/net/stmmac/dwmac100.h +++ b/drivers/net/stmmac/dwmac100.h @@ -118,4 +118,4 @@ enum ttc_control { #define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ -extern struct stmmac_dma_ops dwmac100_dma_ops; +extern const struct stmmac_dma_ops dwmac100_dma_ops; diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h index 81ee4fd..cfcef0e 100644 --- a/drivers/net/stmmac/dwmac1000.h +++ b/drivers/net/stmmac/dwmac1000.h @@ -205,4 +205,4 @@ enum rtc_control { #define GMAC_MMC_TX_INTR 0x108 #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 -extern struct stmmac_dma_ops dwmac1000_dma_ops; +extern const struct stmmac_dma_ops dwmac1000_dma_ops; diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c index 65667b6..6ae4c3f 100644 --- a/drivers/net/stmmac/dwmac1000_core.c +++ b/drivers/net/stmmac/dwmac1000_core.c @@ -212,7 +212,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr) } } -struct stmmac_ops dwmac1000_ops = { +static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .rx_coe = dwmac1000_rx_coe_supported, .dump_regs = dwmac1000_dump_regs, diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c index ce6163e3..2c47712 100644 --- a/drivers/net/stmmac/dwmac1000_dma.c +++ b/drivers/net/stmmac/dwmac1000_dma.c @@ -138,7 +138,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) } } -struct stmmac_dma_ops dwmac1000_dma_ops = { +const struct stmmac_dma_ops dwmac1000_dma_ops = { .init = dwmac1000_dma_init, .dump_regs = dwmac1000_dump_dma_regs, .dma_mode = dwmac1000_dma_operation_mode, diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c index 94eeccf..c724fc3 100644 --- a/drivers/net/stmmac/dwmac100_core.c +++ b/drivers/net/stmmac/dwmac100_core.c @@ -168,7 +168,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode) return; } -struct stmmac_ops dwmac100_ops = { +static const struct stmmac_ops dwmac100_ops = { .core_init = dwmac100_core_init, .rx_coe = dwmac100_rx_coe_supported, .dump_regs = dwmac100_dump_mac_regs, diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c index 96aac93..e3e224b 100644 --- a/drivers/net/stmmac/dwmac100_dma.c +++ b/drivers/net/stmmac/dwmac100_dma.c @@ -126,7 +126,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, } } -struct stmmac_dma_ops dwmac100_dma_ops = { +const struct stmmac_dma_ops dwmac100_dma_ops = { .init = dwmac100_dma_init, .dump_regs = dwmac100_dump_dma_regs, .dma_mode = dwmac100_dma_operation_mode, diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c index 5d1471d..e5dfb6a 100644 --- a/drivers/net/stmmac/enh_desc.c +++ b/drivers/net/stmmac/enh_desc.c @@ -318,7 +318,7 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p) return p->des01.erx.frame_length; } -struct stmmac_desc_ops enh_desc_ops = { +const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, .get_tx_len = enh_desc_get_tx_len, diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c index 0dce90c..cd0cc76 100644 --- a/drivers/net/stmmac/norm_desc.c +++ b/drivers/net/stmmac/norm_desc.c @@ -202,7 +202,7 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p) return p->des01.rx.frame_length; } -struct stmmac_desc_ops ndesc_ops = { +const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, .get_tx_len = ndesc_get_tx_len, diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h index 92154ff..79bdc2e 100644 --- a/drivers/net/stmmac/stmmac.h +++ b/drivers/net/stmmac/stmmac.h @@ -121,5 +121,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev) extern int stmmac_mdio_unregister(struct net_device *ndev); extern int stmmac_mdio_register(struct net_device *ndev); extern void stmmac_set_ethtool_ops(struct net_device *netdev); -extern struct stmmac_desc_ops enh_desc_ops; -extern struct stmmac_desc_ops ndesc_ops; +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c index 25a7e38..6d65482 100644 --- a/drivers/net/stmmac/stmmac_ethtool.c +++ b/drivers/net/stmmac/stmmac_ethtool.c @@ -89,8 +89,8 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) -void stmmac_ethtool_getdrvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) +static void stmmac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { struct stmmac_priv *priv = netdev_priv(dev); @@ -104,7 +104,8 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev, info->n_stats = STMMAC_STATS_LEN; } -int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +static int stmmac_ethtool_getsettings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phy = priv->phydev; @@ -126,7 +127,8 @@ int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) return rc; } -int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +static int stmmac_ethtool_setsettings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phy = priv->phydev; @@ -139,32 +141,32 @@ int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) return rc; } -u32 stmmac_ethtool_getmsglevel(struct net_device *dev) +static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); return priv->msg_enable; } -void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) +static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) { struct stmmac_priv *priv = netdev_priv(dev); priv->msg_enable = level; } -int stmmac_check_if_running(struct net_device *dev) +static int stmmac_check_if_running(struct net_device *dev) { if (!netif_running(dev)) return -EBUSY; return 0; } -int stmmac_ethtool_get_regs_len(struct net_device *dev) +static int stmmac_ethtool_get_regs_len(struct net_device *dev) { return REG_SPACE_SIZE; } -void stmmac_ethtool_gregs(struct net_device *dev, +static void stmmac_ethtool_gregs(struct net_device *dev, struct ethtool_regs *regs, void *space) { int i; @@ -195,7 +197,7 @@ void stmmac_ethtool_gregs(struct net_device *dev, } } -int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) +static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) { if (data) netdev->features |= NETIF_F_HW_CSUM; @@ -205,7 +207,7 @@ int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) return 0; } -u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) +static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); @@ -378,10 +380,8 @@ static struct ethtool_ops stmmac_ethtool_ops = { .get_wol = stmmac_get_wol, .set_wol = stmmac_set_wol, .get_sset_count = stmmac_get_sset_count, -#ifdef NETIF_F_TSO .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, -#endif }; void stmmac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 4283cc5..3ed2a67 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -363,6 +363,19 @@ struct netdev_private { dma_addr_t tx_ring_dma; dma_addr_t rx_ring_dma; struct timer_list timer; /* Media monitoring timer. */ + /* ethtool extra stats */ + struct { + u64 tx_multiple_collisions; + u64 tx_single_collisions; + u64 tx_late_collisions; + u64 tx_deferred; + u64 tx_deferred_excessive; + u64 tx_aborted; + u64 tx_bcasts; + u64 rx_bcasts; + u64 tx_mcasts; + u64 rx_mcasts; + } xstats; /* Frequently used values: keep some adjacent for cache effect. */ spinlock_t lock; int msg_enable; @@ -1486,21 +1499,34 @@ static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; - int i; unsigned long flags; + u8 late_coll, single_coll, mult_coll; spin_lock_irqsave(&np->statlock, flags); /* The chip only need report frame silently dropped. */ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); - dev->stats.collisions += ioread8(ioaddr + StatsLateColl); - dev->stats.collisions += ioread8(ioaddr + StatsMultiColl); - dev->stats.collisions += ioread8(ioaddr + StatsOneColl); dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); - ioread8(ioaddr + StatsTxDefer); - for (i = StatsTxDefer; i <= StatsMcastRx; i++) - ioread8(ioaddr + i); + + mult_coll = ioread8(ioaddr + StatsMultiColl); + np->xstats.tx_multiple_collisions += mult_coll; + single_coll = ioread8(ioaddr + StatsOneColl); + np->xstats.tx_single_collisions += single_coll; + late_coll = ioread8(ioaddr + StatsLateColl); + np->xstats.tx_late_collisions += late_coll; + dev->stats.collisions += mult_coll + + single_coll + + late_coll; + + np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); + np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); + np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); + np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); + np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); + np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); + np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); + dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); @@ -1566,6 +1592,21 @@ static int __set_mac_addr(struct net_device *dev) return 0; } +static const struct { + const char name[ETH_GSTRING_LEN]; +} sundance_stats[] = { + { "tx_multiple_collisions" }, + { "tx_single_collisions" }, + { "tx_late_collisions" }, + { "tx_deferred" }, + { "tx_deferred_excessive" }, + { "tx_aborted" }, + { "tx_bcasts" }, + { "rx_bcasts" }, + { "tx_mcasts" }, + { "rx_mcasts" }, +}; + static int check_if_running(struct net_device *dev) { if (!netif_running(dev)) @@ -1624,6 +1665,42 @@ static void set_msglevel(struct net_device *dev, u32 val) np->msg_enable = val; } +static void get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, sundance_stats, sizeof(sundance_stats)); +} + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sundance_stats); + default: + return -EOPNOTSUPP; + } +} + +static void get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct netdev_private *np = netdev_priv(dev); + int i = 0; + + get_stats(dev); + data[i++] = np->xstats.tx_multiple_collisions; + data[i++] = np->xstats.tx_single_collisions; + data[i++] = np->xstats.tx_late_collisions; + data[i++] = np->xstats.tx_deferred; + data[i++] = np->xstats.tx_deferred_excessive; + data[i++] = np->xstats.tx_aborted; + data[i++] = np->xstats.tx_bcasts; + data[i++] = np->xstats.rx_bcasts; + data[i++] = np->xstats.tx_mcasts; + data[i++] = np->xstats.rx_mcasts; +} + static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, @@ -1633,6 +1710,9 @@ static const struct ethtool_ops ethtool_ops = { .get_link = get_link, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, + .get_strings = get_strings, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_ethtool_stats, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 943c283..5b4c510 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -69,10 +69,10 @@ #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 114 +#define TG3_MIN_NUM 115 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "September 30, 2010" +#define DRV_MODULE_RELDATE "October 14, 2010" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -1162,6 +1162,52 @@ static void tg3_mdio_fini(struct tg3 *tp) } } +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + /* tp->lock is held. */ static inline void tg3_generate_fw_event(struct tg3 *tp) { @@ -1538,6 +1584,17 @@ static void tg3_phy_fini(struct tg3 *tp) } } +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) { int err; @@ -1701,6 +1758,42 @@ static void tg3_phy_apply_otp(struct tg3 *tp) tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); } +static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) +{ + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up == 1 && + (tp->link_config.active_speed == SPEED_1000 || + (tp->link_config.active_speed == SPEED_100 && + tp->link_config.active_duplex == DUPLEX_FULL))) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val); + + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } +} + static int tg3_wait_macro_done(struct tg3 *tp) { int limit = 100; @@ -2875,6 +2968,44 @@ static void tg3_phy_copper_begin(struct tg3 *tp) tg3_writephy(tp, MII_TG3_CTRL, new_adv); } + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + u32 val = 0; + + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + /* Enable SM_DSP clock and tx 6dB coding. */ + val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, + val | MII_TG3_DSP_CH34TP2_HIBW01); + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + /* Advertise 100-BaseTX EEE ability */ + if (tp->link_config.advertising & + (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) + val |= TG3_CL45_D7_EEEADV_CAP_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (tp->link_config.advertising & + (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) + val |= TG3_CL45_D7_EEEADV_CAP_1000T; + } + tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val); + + /* Turn off SM_DSP clock. */ + val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + } + if (tp->link_config.autoneg == AUTONEG_DISABLE && tp->link_config.speed != SPEED_INVALID) { u32 bmcr, orig_bmcr; @@ -3236,6 +3367,8 @@ relink: tw32_f(MAC_MODE, tp->mac_mode); udelay(40); + tg3_phy_eee_adjust(tp, current_link_up); + if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { /* Polled via timer. */ tw32_f(MAC_EVENT, 0); @@ -4440,12 +4573,11 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; - struct ring_info *map, *src_map; + struct ring_info *map; struct sk_buff *skb; dma_addr_t mapping; int skb_size, dest_idx; - src_map = NULL; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; @@ -6087,7 +6219,8 @@ static void tg3_rx_prodring_free(struct tg3 *tp, tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); @@ -6114,7 +6247,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, if (tpr != &tp->napi[0].prodring) { memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE(tp)); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) + if (tpr->rx_jmb_buffers) memset(&tpr->rx_jmb_buffers[0], 0, TG3_RX_JMB_BUFF_RING_SIZE(tp)); goto done; @@ -6157,7 +6290,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } } - if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) + if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) goto done; memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); @@ -6229,7 +6363,8 @@ static int tg3_rx_prodring_init(struct tg3 *tp, if (!tpr->rx_std) goto err_out; - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { + if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), GFP_KERNEL); if (!tpr->rx_jmb_buffers) @@ -7536,6 +7671,9 @@ static void tg3_rings_reset(struct tg3 *tp) /* Disable all transmit rings but the first. */ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else @@ -7741,6 +7879,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3_CPMU_LSPD_10MB_CLK, val); } + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, + TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + tw32_f(TG3_CPMU_EEE_MODE, + TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + TG3_CPMU_EEEMD_LPI_IN_TX | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE); + } + /* This works around an issue with Athlon chipsets on * B3 tigon3 silicon. This bit has no effect on any * other revision. But do not set this on PCI Express @@ -8549,6 +8703,12 @@ static void tg3_timer(unsigned long __opaque) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) tg3_periodic_fetch_stats(tp); + if (tp->setlpicnt && !--tp->setlpicnt) { + u32 val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, + val | TG3_CPMU_EEEMD_LPI_ENABLE); + } + if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { u32 mac_stat; int phy_event; @@ -9647,6 +9807,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (netif_running(dev)) { cmd->speed = tp->link_config.active_speed; cmd->duplex = tp->link_config.active_duplex; + } else { + cmd->speed = SPEED_INVALID; + cmd->duplex = DUPLEX_INVALID; } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; @@ -12383,6 +12546,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) } } + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && + tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)) + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { @@ -12702,6 +12870,9 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) case TG3_EEPROM_SB_REVISION_5: offset = TG3_EEPROM_SB_F1R5_EDH_OFF; break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; default: return; } diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index f6b709a..8342190 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -1091,7 +1091,26 @@ #define CPMU_MUTEX_GNT_DRIVER 0x00001000 #define TG3_CPMU_PHY_STRAP 0x00003664 #define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 -/* 0x3664 --> 0x3800 unused */ +/* 0x3664 --> 0x36b0 unused */ + +#define TG3_CPMU_EEE_MODE 0x000036b0 +#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 +#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 +#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 +#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 +#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 +/* 0x36b4 --> 0x36b8 unused */ + +#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc +#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 +#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 +/* 0x36c0 --> 0x36d0 unused */ + +#define TG3_CPMU_EEE_CTRL 0x000036d0 +#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d +#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384 +#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8 +/* 0x36d4 --> 0x3800 unused */ /* Mbuf cluster free registers */ #define MBFREE_MODE 0x00003800 @@ -1915,6 +1934,7 @@ #define TG3_EEPROM_SB_REVISION_3 0x00030000 #define TG3_EEPROM_SB_REVISION_4 0x00040000 #define TG3_EEPROM_SB_REVISION_5 0x00050000 +#define TG3_EEPROM_SB_REVISION_6 0x00060000 #define TG3_EEPROM_MAGIC_HW 0xabcd #define TG3_EEPROM_MAGIC_HW_MSK 0xffff @@ -1934,6 +1954,7 @@ #define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 #define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c #define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 +#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c #define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 #define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 #define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff @@ -2059,6 +2080,10 @@ #define MII_TG3_CTRL_AS_MASTER 0x0800 #define MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000 +#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */ +#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000 +#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */ + #define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ #define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 #define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 @@ -2076,6 +2101,8 @@ #define MII_TG3_DSP_TAP1 0x0001 #define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 #define MII_TG3_DSP_AADJ1CH0 0x001f +#define MII_TG3_DSP_CH34TP2 0x4022 +#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 #define MII_TG3_DSP_AADJ1CH3 0x601f #define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 #define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 @@ -2142,6 +2169,14 @@ #define MII_TG3_TEST1_TRIM_EN 0x0010 #define MII_TG3_TEST1_CRC_EN 0x8000 +/* Clause 45 expansion registers */ +#define TG3_CL45_D7_EEEADV_CAP 0x003c +#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002 +#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004 +#define TG3_CL45_D7_EEERES_STAT 0x803e +#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 +#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 + /* Fast Ethernet Tranceiver definitions */ #define MII_TG3_FET_PTEST 0x17 @@ -2986,9 +3021,11 @@ struct tg3 { #define TG3_PHYFLG_BER_BUG 0x00008000 #define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 #define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 +#define TG3_PHYFLG_EEE_CAP 0x00040000 u32 led_ctrl; u32 phy_otp; + u32 setlpicnt; #define TG3_BPN_SIZE 24 char board_part_number[TG3_BPN_SIZE]; diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 6884813..f199561 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -312,13 +312,14 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability"); #define MED_LNK_DEF 0 #define MED_LNK_MIN 0 -#define MED_LNK_MAX 4 +#define MED_LNK_MAX 5 /* speed_duplex[] is used for setting the speed and duplex mode of NIC. 0: indicate autonegotiation for both speed and duplex mode 1: indicate 100Mbps half duplex mode 2: indicate 100Mbps full duplex mode 3: indicate 10Mbps half duplex mode 4: indicate 10Mbps full duplex mode + 5: indicate 1000Mbps full duplex mode Note: if EEPROM have been set to the force mode, this option is ignored @@ -617,6 +618,9 @@ static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) case SPD_DPX_10_HALF: status = VELOCITY_SPEED_10; break; + case SPD_DPX_1000_FULL: + status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + break; } vptr->mii_status = status; return status; @@ -922,6 +926,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) /* enable AUTO-NEGO mode */ mii_set_auto_on(vptr); } else { + u16 CTRL1000; u16 ANAR; u8 CHIPGCR; @@ -936,7 +941,11 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); CHIPGCR = readb(®s->CHIPGCR); - CHIPGCR &= ~CHIPGCR_FCGMII; + + if (mii_status & VELOCITY_SPEED_1000) + CHIPGCR |= CHIPGCR_FCGMII; + else + CHIPGCR &= ~CHIPGCR_FCGMII; if (mii_status & VELOCITY_DUPLEX_FULL) { CHIPGCR |= CHIPGCR_FCFDX; @@ -952,7 +961,13 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); } - MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); + velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); + CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + if ((mii_status & VELOCITY_SPEED_1000) && + (mii_status & VELOCITY_DUPLEX_FULL)) { + CTRL1000 |= ADVERTISE_1000FULL; + } + velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); @@ -967,7 +982,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) ANAR |= ADVERTISE_100FULL; else ANAR |= ADVERTISE_100HALF; - } else { + } else if (mii_status & VELOCITY_SPEED_10) { if (mii_status & VELOCITY_DUPLEX_FULL) ANAR |= ADVERTISE_10FULL; else @@ -1013,6 +1028,9 @@ static void velocity_print_link_status(struct velocity_info *vptr) } else { VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); switch (vptr->options.spd_dpx) { + case SPD_DPX_1000_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); + break; case SPD_DPX_100_HALF: VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); break; @@ -3170,6 +3188,37 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; + + cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + cmd->advertising |= + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + } else { + switch (vptr->options.spd_dpx) { + case SPD_DPX_1000_FULL: + cmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case SPD_DPX_100_HALF: + cmd->advertising |= ADVERTISED_100baseT_Half; + break; + case SPD_DPX_100_FULL: + cmd->advertising |= ADVERTISED_100baseT_Full; + break; + case SPD_DPX_10_HALF: + cmd->advertising |= ADVERTISED_10baseT_Half; + break; + case SPD_DPX_10_FULL: + cmd->advertising |= ADVERTISED_10baseT_Full; + break; + default: + break; + } + } if (status & VELOCITY_SPEED_1000) cmd->speed = SPEED_1000; else if (status & VELOCITY_SPEED_100) @@ -3200,14 +3249,35 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd curr_status &= (~VELOCITY_LINK_FAIL); new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); + new_status |= ((cmd->speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); - if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) + if ((new_status & VELOCITY_AUTONEG_ENABLE) && + (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) { ret = -EINVAL; - else + } else { + enum speed_opt spd_dpx; + + if (new_status & VELOCITY_AUTONEG_ENABLE) + spd_dpx = SPD_DPX_AUTO; + else if ((new_status & VELOCITY_SPEED_1000) && + (new_status & VELOCITY_DUPLEX_FULL)) { + spd_dpx = SPD_DPX_1000_FULL; + } else if (new_status & VELOCITY_SPEED_100) + spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? + SPD_DPX_100_FULL : SPD_DPX_100_HALF; + else if (new_status & VELOCITY_SPEED_10) + spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? + SPD_DPX_10_FULL : SPD_DPX_10_HALF; + else + return -EOPNOTSUPP; + + vptr->options.spd_dpx = spd_dpx; + velocity_set_media_mode(vptr, new_status); + } return ret; } diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index b5e120b..aa2e69b 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -848,7 +848,7 @@ enum velocity_owner { * Bits in CHIPGCR register */ -#define CHIPGCR_FCGMII 0x80 +#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ #define CHIPGCR_FCFDX 0x40 #define CHIPGCR_FCRESV 0x20 #define CHIPGCR_FCMODE 0x10 @@ -1390,7 +1390,8 @@ enum speed_opt { SPD_DPX_100_HALF = 1, SPD_DPX_100_FULL = 2, SPD_DPX_10_HALF = 3, - SPD_DPX_10_FULL = 4 + SPD_DPX_10_FULL = 4, + SPD_DPX_1000_FULL = 5 }; enum velocity_init_type { |