diff options
Diffstat (limited to 'drivers/net')
168 files changed, 7484 insertions, 1322 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index fbb3719..682aad8 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -480,9 +480,13 @@ static int pnp_registered; #ifdef CONFIG_EISA static struct eisa_device_id el3_eisa_ids[] = { + { "TCM5090" }, + { "TCM5091" }, { "TCM5092" }, { "TCM5093" }, + { "TCM5094" }, { "TCM5095" }, + { "TCM5098" }, { "" } }; MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 29df398..1fc4543 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -1383,6 +1383,11 @@ static void rtl8139_hw_start (struct net_device *dev) RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4))); + tp->cur_rx = 0; + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + /* Must enable Tx/Rx before setting transfer thresholds! */ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); @@ -1390,8 +1395,6 @@ static void rtl8139_hw_start (struct net_device *dev) RTL_W32 (RxConfig, tp->rx_config); RTL_W32 (TxConfig, rtl8139_tx_config); - tp->cur_rx = 0; - rtl_check_media (dev, 1); if (tp->chipset >= CH_8139B) { @@ -1406,9 +1409,6 @@ static void rtl8139_hw_start (struct net_device *dev) /* Lock Config[01234] and BMCR register writes */ RTL_W8 (Cfg9346, Cfg9346_Lock); - /* init Rx ring buffer DMA address */ - RTL_W32_F (RxBuf, tp->rx_ring_dma); - /* init Tx buffer DMA addresses */ for (i = 0; i < NUM_TX_DESC; i++) RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9e7baec..214a92d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -28,9 +28,9 @@ if NETDEVICES config COMPAT_NET_DEV_OPS default y - bool "Enable older network device API compatiablity" + bool "Enable older network device API compatibility" ---help--- - This option enables kernel compatiability with older network devices + This option enables kernel compatibility with older network devices that do not use net_device_ops interface. If unsure, say Y. @@ -977,6 +977,8 @@ config ETHOC depends on NET_ETHERNET && HAS_IOMEM select MII select PHYLIB + select CRC32 + select BITREVERSE help Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. @@ -2056,6 +2058,27 @@ config IGB_DCA driver. DCA is a method for warming the CPU cache before data is used, with the intent of lessening the impact of cache misses. +config IGBVF + tristate "Intel(R) 82576 Virtual Function Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) 82576 virtual functions. For more + information on how to identify your adapter, go to the Adapter & + Driver ID Guide at: + + <http://support.intel.com/support/network/adapter/pro100/21397.htm> + + For general information and support, go to the Intel support + website at: + + <http://support.intel.com> + + More specific information on configuring the driver is in + <file:Documentation/networking/e1000.txt>. + + To compile this driver as a module, choose M here. The module + will be called igbvf. + source "drivers/net/ixp2000/Kconfig" config MYRI_SBUS diff --git a/drivers/net/Makefile b/drivers/net/Makefile index edc9a0d..a1c25cb 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ obj-$(CONFIG_IGB) += igb/ +obj-$(CONFIG_IGBVF) += igbvf/ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o @@ -101,7 +102,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o obj-$(CONFIG_NET) += Space.o loopback.o obj-$(CONFIG_SEEQ8005) += seeq8005.o obj-$(CONFIG_NET_SB1000) += sb1000.o -obj-$(CONFIG_MAC8390) += mac8390.o 8390.o +obj-$(CONFIG_MAC8390) += mac8390.o obj-$(CONFIG_APNE) += apne.o 8390.o obj-$(CONFIG_PCMCIA_PCNET) += 8390.o obj-$(CONFIG_HP100) += hp100.o diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index d0d0c2f..02f64d5 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -692,6 +692,17 @@ static struct zorro_driver a2065_driver = { .remove = __devexit_p(a2065_remove_one), }; +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit a2065_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { @@ -753,12 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, priv->rx_ring_mod_mask = RX_RING_MOD_MASK; priv->tx_ring_mod_mask = TX_RING_MOD_MASK; - dev->open = &lance_open; - dev->stop = &lance_close; - dev->hard_start_xmit = &lance_start_xmit; - dev->tx_timeout = &lance_tx_timeout; + dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->set_multicast_list = &lance_set_multicast; dev->dma = 0; init_timer(&priv->multicast_timer); diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index e1d72e0..58e8d52 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -155,6 +155,18 @@ static struct zorro_driver ariadne_driver = { .remove = __devexit_p(ariadne_remove_one), }; +static const struct net_device_ops ariadne_netdev_ops = { + .ndo_open = ariadne_open, + .ndo_stop = ariadne_close, + .ndo_start_xmit = ariadne_start_xmit, + .ndo_tx_timeout = ariadne_tx_timeout, + .ndo_get_stats = ariadne_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit ariadne_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { @@ -197,13 +209,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, dev->mem_start = ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE; - dev->open = &ariadne_open; - dev->stop = &ariadne_close; - dev->hard_start_xmit = &ariadne_start_xmit; - dev->tx_timeout = &ariadne_tx_timeout; + dev->netdev_ops = &ariadne_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &ariadne_get_stats; - dev->set_multicast_list = &set_multicast_list; err = register_netdev(dev); if (err) { diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 4bc6901..627bc75 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -665,6 +665,20 @@ static void __init am79c961_banner(void) if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } +static const struct net_device_ops am79c961_netdev_ops = { + .ndo_open = am79c961_open, + .ndo_stop = am79c961_close, + .ndo_start_xmit = am79c961_sendpacket, + .ndo_get_stats = am79c961_getstats, + .ndo_set_multicast_list = am79c961_setmulticastlist, + .ndo_tx_timeout = am79c961_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = am79c961_poll_controller, +#endif +}; static int __init am79c961_probe(struct platform_device *pdev) { @@ -732,15 +746,7 @@ static int __init am79c961_probe(struct platform_device *pdev) if (am79c961_hw_init(dev)) goto release; - dev->open = am79c961_open; - dev->stop = am79c961_close; - dev->hard_start_xmit = am79c961_sendpacket; - dev->get_stats = am79c961_getstats; - dev->set_multicast_list = am79c961_setmulticastlist; - dev->tx_timeout = am79c961_timeout; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = am79c961_poll_controller; -#endif + dev->netdev_ops = &am79c961_netdev_ops; ret = register_netdev(dev); if (ret == 0) { diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 442938d..7f4bc8a 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -577,7 +577,7 @@ static void at91ether_sethashtable(struct net_device *dev) /* * Enable/Disable promiscuous and multicast modes. */ -static void at91ether_set_rx_mode(struct net_device *dev) +static void at91ether_set_multicast_list(struct net_device *dev) { unsigned long cfg; @@ -808,7 +808,7 @@ static int at91ether_close(struct net_device *dev) /* * Transmit packet. */ -static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) +static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_private *lp = netdev_priv(dev); @@ -828,7 +828,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; } else { - printk(KERN_ERR "at91_ether.c: at91ether_tx() called, but device is busy!\n"); + printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) on this skb, he also reports -ENETDOWN and printk's, so either we free and return(0) or don't free and return 1 */ @@ -965,6 +965,21 @@ static void at91ether_poll_controller(struct net_device *dev) } #endif +static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = at91ether_stats, + .ndo_set_multicast_list = at91ether_set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_do_ioctl = at91ether_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, +#endif +}; + /* * Initialize the ethernet interface */ @@ -1005,17 +1020,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add spin_lock_init(&lp->lock); ether_setup(dev); - dev->open = at91ether_open; - dev->stop = at91ether_close; - dev->hard_start_xmit = at91ether_tx; - dev->get_stats = at91ether_stats; - dev->set_multicast_list = at91ether_set_rx_mode; - dev->set_mac_address = set_mac_address; + dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &at91ether_ethtool_ops; - dev->do_ioctl = at91ether_ioctl; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = at91ether_poll_controller; -#endif SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index cc77087..b72b3d6 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -153,7 +153,7 @@ struct ep93xx_descs struct ep93xx_priv { struct resource *res; - void *base_addr; + void __iomem *base_addr; int irq; struct ep93xx_descs *descs; @@ -770,7 +770,18 @@ static struct ethtool_ops ep93xx_ethtool_ops = { .get_link = ep93xx_get_link, }; -struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) +static const struct net_device_ops ep93xx_netdev_ops = { + .ndo_open = ep93xx_open, + .ndo_stop = ep93xx_close, + .ndo_start_xmit = ep93xx_xmit, + .ndo_get_stats = ep93xx_get_stats, + .ndo_do_ioctl = ep93xx_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) { struct net_device *dev; @@ -780,12 +791,8 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); - dev->get_stats = ep93xx_get_stats; dev->ethtool_ops = &ep93xx_ethtool_ops; - dev->hard_start_xmit = ep93xx_xmit; - dev->open = ep93xx_open; - dev->stop = ep93xx_close; - dev->do_ioctl = ep93xx_ioctl; + dev->netdev_ops = &ep93xx_netdev_ops; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index e380de4..edf770f 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c @@ -991,6 +991,18 @@ static void __devinit ether1_banner(void) printk(KERN_INFO "%s", version); } +static const struct net_device_ops ether1_netdev_ops = { + .ndo_open = ether1_open, + .ndo_stop = ether1_close, + .ndo_start_xmit = ether1_sendpacket, + .ndo_get_stats = ether1_getstats, + .ndo_set_multicast_list = ether1_setmulticastlist, + .ndo_tx_timeout = ether1_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit ether1_probe(struct expansion_card *ec, const struct ecard_id *id) { @@ -1031,12 +1043,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) goto free; } - dev->open = ether1_open; - dev->stop = ether1_close; - dev->hard_start_xmit = ether1_sendpacket; - dev->get_stats = ether1_getstats; - dev->set_multicast_list = ether1_setmulticastlist; - dev->tx_timeout = ether1_timeout; + dev->netdev_ops = ðer1_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index 21a7bef..ec8a1ae 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c @@ -770,6 +770,18 @@ static void __devinit ether3_banner(void) printk(KERN_INFO "%s", version); } +static const struct net_device_ops ether3_netdev_ops = { + .ndo_open = ether3_open, + .ndo_stop = ether3_close, + .ndo_start_xmit = ether3_sendpacket, + .ndo_get_stats = ether3_getstats, + .ndo_set_multicast_list = ether3_setmulticastlist, + .ndo_tx_timeout = ether3_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { @@ -846,12 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) goto free; } - dev->open = ether3_open; - dev->stop = ether3_close; - dev->hard_start_xmit = ether3_sendpacket; - dev->get_stats = ether3_getstats; - dev->set_multicast_list = ether3_setmulticastlist; - dev->tx_timeout = ether3_timeout; + dev->netdev_ops = ðer3_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 448487e..a740053 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -338,12 +338,12 @@ static int ixp4xx_mdio_register(void) if (cpu_is_ixp43x()) { /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH)) - return -ENOSYS; + return -ENODEV; mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; } else { /* All MII PHY accesses use NPE-B Ethernet registers */ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) - return -ENOSYS; + return -ENODEV; mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; } @@ -1174,7 +1174,7 @@ static int __devinit eth_init_one(struct platform_device *pdev) regs_phys = IXP4XX_EthC_BASE_PHYS; break; default: - err = -ENOSYS; + err = -ENODEV; goto err_free; } @@ -1189,15 +1189,10 @@ static int __devinit eth_init_one(struct platform_device *pdev) goto err_free; } - if (register_netdev(dev)) { - err = -EIO; - goto err_npe_rel; - } - port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); if (!port->mem_res) { err = -EBUSY; - goto err_unreg; + goto err_npe_rel; } port->plat = plat; @@ -1215,20 +1210,25 @@ static int __devinit eth_init_one(struct platform_device *pdev) snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy); port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, PHY_INTERFACE_MODE_MII); - if (IS_ERR(port->phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); - return PTR_ERR(port->phydev); - } + if ((err = IS_ERR(port->phydev))) + goto err_free_mem; port->phydev->irq = PHY_POLL; + if ((err = register_netdev(dev))) + goto err_phy_dis; + printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, npe_name(port->npe)); return 0; -err_unreg: - unregister_netdev(dev); +err_phy_dis: + phy_disconnect(port->phydev); +err_free_mem: + npe_port_tab[NPE_ID(port->id)] = NULL; + platform_set_drvdata(pdev, NULL); + release_resource(port->mem_res); err_npe_rel: npe_release(port->npe); err_free: @@ -1242,6 +1242,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev) struct port *port = netdev_priv(dev); unregister_netdev(dev); + phy_disconnect(port->phydev); npe_port_tab[NPE_ID(port->id)] = NULL; platform_set_drvdata(pdev, NULL); npe_release(port->npe); diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 2d81f6a..5425ab0 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -453,6 +453,16 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag, return( ret ); } +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = lance_set_mac_address, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; static unsigned long __init lance_probe1( struct net_device *dev, struct lance_addr *init_rec ) @@ -623,15 +633,9 @@ static unsigned long __init lance_probe1( struct net_device *dev, if (did_version++ == 0) DPRINTK( 1, ( version )); - /* The LANCE-specific entries in the device structure. */ - dev->open = &lance_open; - dev->hard_start_xmit = &lance_start_xmit; - dev->stop = &lance_close; - dev->set_multicast_list = &set_multicast_list; - dev->set_mac_address = &lance_set_mac_address; + dev->netdev_ops = &lance_netdev_ops; /* XXX MSch */ - dev->tx_timeout = lance_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; return( 1 ); diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index deb7b53..83a1212 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -2532,8 +2532,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ - if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) || - (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); goto err_dma; } diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index fb57b75..1342418 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -37,6 +37,7 @@ char atl1e_driver_version[] = DRV_VERSION; */ static struct pci_device_id atl1e_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, /* required last entry */ { 0 } }; diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 0ab2254..4e81712 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -82,6 +82,12 @@ #include "atl1.h" +#define ATLX_DRIVER_VERSION "2.1.3" +MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ + Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATLX_DRIVER_VERSION); + /* Temporary hack for merging atl1 and atl2 */ #include "atlx.c" diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h index 297a03d..14054b7 100644 --- a/drivers/net/atlx/atlx.h +++ b/drivers/net/atlx/atlx.h @@ -29,12 +29,6 @@ #include <linux/module.h> #include <linux/types.h> -#define ATLX_DRIVER_VERSION "2.1.3" -MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ - Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(ATLX_DRIVER_VERSION); - #define ATLX_ERR_PHY 2 #define ATLX_ERR_PHY_SPEED 7 #define ATLX_ERR_PHY_RES 8 diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 4274e4a..d58c105 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -1004,12 +1004,12 @@ static void au1000_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static void set_rx_mode(struct net_device *dev) +static void au1000_multicast_list(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); if (au1000_debug > 4) - printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags); + printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ aup->mac->control |= MAC_PROMISCUOUS; @@ -1047,6 +1047,18 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); } +static const struct net_device_ops au1000_netdev_ops = { + .ndo_open = au1000_open, + .ndo_stop = au1000_close, + .ndo_start_xmit = au1000_tx, + .ndo_set_multicast_list = au1000_multicast_list, + .ndo_do_ioctl = au1000_ioctl, + .ndo_tx_timeout = au1000_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static struct net_device * au1000_probe(int port_num) { static unsigned version_printed = 0; @@ -1197,13 +1209,8 @@ static struct net_device * au1000_probe(int port_num) dev->base_addr = base; dev->irq = irq; - dev->open = au1000_open; - dev->hard_start_xmit = au1000_tx; - dev->stop = au1000_close; - dev->set_multicast_list = &set_rx_mode; - dev->do_ioctl = &au1000_ioctl; + dev->netdev_ops = &au1000_netdev_ops; SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); - dev->tx_timeout = au1000_tx_timeout; dev->watchdog_timeo = ETH_TX_TIMEOUT; /* diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index c49ddd0..b4bb06f 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -35,8 +35,22 @@ #define DRV_VER "2.0.348" #define DRV_NAME "be2net" #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" +#define OC_NAME "Emulex OneConnect 10Gbps NIC" #define DRV_DESC BE_NAME "Driver" +#define BE_VENDOR_ID 0x19a2 +#define BE_DEVICE_ID1 0x211 +#define OC_DEVICE_ID1 0x700 +#define OC_DEVICE_ID2 0x701 + +static inline char *nic_name(struct pci_dev *pdev) +{ + if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) + return OC_NAME; + else + return BE_NAME; +} + /* Number of bytes of an RX frame that are copied to skb->data */ #define BE_HDR_LEN 64 #define BE_MAX_JUMBO_FRAME_SIZE 9018 diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 04f4b73..9592f22 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -319,7 +319,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, &ecmd->rx_pause); - ecmd->autoneg = AUTONEG_ENABLE; + ecmd->autoneg = 0; } static int @@ -328,7 +328,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) struct be_adapter *adapter = netdev_priv(netdev); int status; - if (ecmd->autoneg != AUTONEG_ENABLE) + if (ecmd->autoneg != 0) return -EINVAL; status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 9b75aa6..5c378b5 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -28,10 +28,10 @@ static unsigned int rx_frag_size = 2048; module_param(rx_frag_size, uint, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); -#define BE_VENDOR_ID 0x19a2 -#define BE2_DEVICE_ID_1 0x0211 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { - { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) }, + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -1821,11 +1821,11 @@ static int __devinit be_probe(struct pci_dev *pdev, be_msix_enable(adapter); - status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!status) { netdev->features |= NETIF_F_HIGHDMA; } else { - status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; @@ -1859,7 +1859,7 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status != 0) goto stats_clean; - dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num); + dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); return 0; stats_clean: @@ -1873,7 +1873,7 @@ rel_reg: disable_dev: pci_disable_device(pdev); do_none: - dev_warn(&pdev->dev, BE_NAME " initialization failed\n"); + dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); return status; } diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 9afe809..b4da182 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -980,7 +980,6 @@ static int bfin_mac_open(struct net_device *dev) } /* - * * this makes the board clean up everything that it can * and not talk to the outside world. Caused by * an 'ifconfig ethX down' @@ -1005,6 +1004,20 @@ static int bfin_mac_close(struct net_device *dev) return 0; } +static const struct net_device_ops bfin_mac_netdev_ops = { + .ndo_open = bfin_mac_open, + .ndo_stop = bfin_mac_close, + .ndo_start_xmit = bfin_mac_hard_start_xmit, + .ndo_set_mac_address = bfin_mac_set_mac_address, + .ndo_tx_timeout = bfin_mac_timeout, + .ndo_set_multicast_list = bfin_mac_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bfin_mac_poll, +#endif +}; + static int __devinit bfin_mac_probe(struct platform_device *pdev) { struct net_device *ndev; @@ -1086,15 +1099,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) /* Fill in the fields of the device structure with ethernet values. */ ether_setup(ndev); - ndev->open = bfin_mac_open; - ndev->stop = bfin_mac_close; - ndev->hard_start_xmit = bfin_mac_hard_start_xmit; - ndev->set_mac_address = bfin_mac_set_mac_address; - ndev->tx_timeout = bfin_mac_timeout; - ndev->set_multicast_list = bfin_mac_set_multicast_list; -#ifdef CONFIG_NET_POLL_CONTROLLER - ndev->poll_controller = bfin_mac_poll; -#endif + ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops; spin_lock_init(&lp->lock); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 9d268be..b0cb29d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -54,8 +54,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "2.0.0" -#define DRV_MODULE_RELDATE "April 2, 2009" +#define DRV_MODULE_VERSION "2.0.1" +#define DRV_MODULE_RELDATE "May 6, 2009" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw" @@ -2600,6 +2600,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) /* Tell compiler that status block fields can change. */ barrier(); cons = *bnapi->hw_tx_cons_ptr; + barrier(); if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) cons++; return cons; @@ -2879,6 +2880,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) /* Tell compiler that status block fields can change. */ barrier(); cons = *bnapi->hw_rx_cons_ptr; + barrier(); if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) cons++; return cons; @@ -3427,8 +3429,8 @@ static int __devinit bnx2_request_firmware(struct bnx2 *bp) { const char *mips_fw_file, *rv2p_fw_file; - const struct bnx2_mips_fw_file *mips; - const struct bnx2_rv2p_fw_file *rv2p; + const struct bnx2_mips_fw_file *mips_fw; + const struct bnx2_rv2p_fw_file *rv2p_fw; int rc; if (CHIP_NUM(bp) == CHIP_NUM_5709) { @@ -3452,21 +3454,21 @@ bnx2_request_firmware(struct bnx2 *bp) rv2p_fw_file); return rc; } - mips = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; - rv2p = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; - if (bp->mips_firmware->size < sizeof(*mips) || - check_mips_fw_entry(bp->mips_firmware, &mips->com) || - check_mips_fw_entry(bp->mips_firmware, &mips->cp) || - check_mips_fw_entry(bp->mips_firmware, &mips->rxp) || - check_mips_fw_entry(bp->mips_firmware, &mips->tpat) || - check_mips_fw_entry(bp->mips_firmware, &mips->txp)) { + mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; + rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; + if (bp->mips_firmware->size < sizeof(*mips_fw) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", mips_fw_file); return -EINVAL; } - if (bp->rv2p_firmware->size < sizeof(*rv2p) || - check_fw_section(bp->rv2p_firmware, &rv2p->proc1.rv2p, 8, true) || - check_fw_section(bp->rv2p_firmware, &rv2p->proc2.rv2p, 8, true)) { + if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", rv2p_fw_file); return -EINVAL; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 8c2e5ab..faf094a 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1465,6 +1465,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, return best; } +static int agg_device_up(const struct aggregator *agg) +{ + return (netif_running(agg->slave->dev) && + netif_carrier_ok(agg->slave->dev)); +} + /** * ad_agg_selection_logic - select an aggregation group for a team * @aggregator: the aggregator we're looking at @@ -1496,14 +1502,13 @@ static void ad_agg_selection_logic(struct aggregator *agg) struct port *port; origin = agg; - active = __get_active_agg(agg); - best = active; + best = (active && agg_device_up(active)) ? active : NULL; do { agg->is_active = 0; - if (agg->num_of_ports) + if (agg->num_of_ports && agg_device_up(agg)) best = ad_agg_selection_test(best, agg); } while ((agg = __get_next_agg(agg))); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8dc6fbb..46d312b 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct if (arp->op_code == htons(ARPOP_REPLY)) { /* update rx hash table for this ARP */ - printk("rar: update orig %s bond_dev %s\n", orig_dev->name, - bond_dev->name); bond = netdev_priv(bond_dev); rlb_update_entry_from_arp(bond, arp); pr_debug("Server received an ARP Reply from client\n"); @@ -1708,10 +1706,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave * Called with RTNL */ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) - __releases(&bond->curr_slave_lock) - __releases(&bond->lock) __acquires(&bond->lock) - __acquires(&bond->curr_slave_lock) + __releases(&bond->lock) { struct bonding *bond = netdev_priv(bond_dev); struct sockaddr *sa = addr; @@ -1747,9 +1743,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) } } - write_unlock_bh(&bond->curr_slave_lock); - read_unlock(&bond->lock); - if (swap_slave) { alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); @@ -1757,16 +1750,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, bond->alb_info.rlb_enabled); + read_lock(&bond->lock); alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, bond->curr_active_slave); } + read_unlock(&bond->lock); } - read_lock(&bond->lock); - write_lock_bh(&bond->curr_slave_lock); - return 0; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 99610f3..7482402 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2213,33 +2213,24 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - int i, found = 0; - - if (info->slave_id < 0) { - return -ENODEV; - } + int i, res = -ENODEV; read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (i == (int)info->slave_id) { - found = 1; + res = 0; + strcpy(info->slave_name, slave->dev->name); + info->link = slave->link; + info->state = slave->state; + info->link_failure_count = slave->link_failure_count; break; } } read_unlock(&bond->lock); - if (found) { - strcpy(info->slave_name, slave->dev->name); - info->link = slave->link; - info->state = slave->state; - info->link_failure_count = slave->link_failure_count; - } else { - return -ENODEV; - } - - return 0; + return res; } /*-------------------------------- Monitoring -------------------------------*/ @@ -2570,7 +2561,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { if (!targets[i]) - continue; + break; pr_debug("basa: target %x\n", targets[i]); if (list_empty(&bond->vlan_list)) { pr_debug("basa: empty vlan: arp_send\n"); @@ -2677,7 +2668,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 int i; __be32 *targets = bond->params.arp_targets; - targets = bond->params.arp_targets; for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); @@ -3303,7 +3293,7 @@ static void bond_info_show_master(struct seq_file *seq) for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { if (!bond->params.arp_targets[i]) - continue; + break; if (printed) seq_printf(seq, ","); seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); @@ -5168,16 +5158,15 @@ int bond_create(char *name, struct bond_params *params) up_write(&bonding_rwsem); rtnl_unlock(); /* allows sysfs registration of net device */ res = bond_create_sysfs_entry(netdev_priv(bond_dev)); - if (res < 0) { - rtnl_lock(); - down_write(&bonding_rwsem); - bond_deinit(bond_dev); - unregister_netdevice(bond_dev); - goto out_rtnl; - } + if (res < 0) + goto out_unreg; return 0; +out_unreg: + rtnl_lock(); + down_write(&bonding_rwsem); + unregister_netdevice(bond_dev); out_bond: bond_deinit(bond_dev); out_netdev: @@ -5192,7 +5181,6 @@ static int __init bonding_init(void) { int i; int res; - struct bonding *bond; printk(KERN_INFO "%s", version); @@ -5223,13 +5211,6 @@ static int __init bonding_init(void) goto out; err: - list_for_each_entry(bond, &bond_dev_list, bond_list) { - bond_work_cancel_all(bond); - destroy_workqueue(bond->wq); - } - - bond_destroy_sysfs(); - rtnl_lock(); bond_free_all(); rtnl_unlock(); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 18cf4787..d287315 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d, goto out; } /* look for an empty slot to put the target in, and check for dupes */ - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { if (targets[i] == newtarget) { /* duplicate */ printk(KERN_ERR DRV_NAME ": %s: ARP target %pI4 is already present\n", bond->dev->name, &newtarget); - if (done) - targets[i] = 0; ret = -EINVAL; goto out; } - if (targets[i] == 0 && !done) { + if (targets[i] == 0) { printk(KERN_INFO DRV_NAME ": %s: adding ARP target %pI4.\n", bond->dev->name, &newtarget); @@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, goto out; } - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { if (targets[i] == newtarget) { + int j; printk(KERN_INFO DRV_NAME ": %s: removing ARP target %pI4.\n", bond->dev->name, &newtarget); - targets[i] = 0; + for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) + targets[j] = targets[j+1]; + + targets[j] = 0; done = 1; } } diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index c9806c5..7a18dc7 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -257,6 +257,23 @@ struct transceiver_ops transceivers[] = struct transceiver_ops* transceiver = &transceivers[0]; +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_send_packet, + .ndo_tx_timeout = e100_tx_timeout, + .ndo_get_stats = e100_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_do_ioctl = e100_ioctl, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_config = e100_set_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif +}; + #define tx_done(dev) (*R_DMA_CH0_CMD == 0) /* @@ -300,19 +317,8 @@ etrax_ethernet_init(void) /* fill in our handlers so the network layer can talk to us in the future */ - dev->open = e100_open; - dev->hard_start_xmit = e100_send_packet; - dev->stop = e100_close; - dev->get_stats = e100_get_stats; - dev->set_multicast_list = set_multicast_list; - dev->set_mac_address = e100_set_mac_address; dev->ethtool_ops = &e100_ethtool_ops; - dev->do_ioctl = e100_ioctl; - dev->set_config = e100_set_config; - dev->tx_timeout = e100_tx_timeout; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = e100_netpoll; -#endif + dev->netdev_ops = &e100_netdev_ops; spin_lock_init(&np->lock); spin_lock_init(&np->led_lock); diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 714df2b..c888e97 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -85,8 +85,8 @@ struct fl_pg_chunk { struct page *page; void *va; unsigned int offset; - u64 *p_cnt; - DECLARE_PCI_UNMAP_ADDR(mapping); + unsigned long *p_cnt; + dma_addr_t mapping; }; struct rx_desc; diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index ab0e5fe..17858b9 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -1117,8 +1117,8 @@ static void cxgb_down(struct adapter *adapter) spin_unlock_irq(&adapter->work_lock); free_irq_resources(adapter); - flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ quiesce_rx(adapter); + flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ } static void schedule_chk_task(struct adapter *adap) @@ -1187,6 +1187,9 @@ static int offload_close(struct t3cdev *tdev) sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); + /* Flush work scheduled while releasing TIDs */ + flush_scheduled_work(); + tdev->lldev = NULL; cxgb3_set_dummy_ops(tdev); t3_tp_set_offload_mode(adapter, 0); @@ -1232,6 +1235,10 @@ static int cxgb_close(struct net_device *dev) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + + if (!adapter->open_device_map) + return 0; + /* Stop link fault interrupts */ t3_xgm_intr_disable(adapter, pi->port_id); t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); @@ -1247,8 +1254,7 @@ static int cxgb_close(struct net_device *dev) spin_unlock_irq(&adapter->work_lock); if (!(adapter->open_device_map & PORT_MASK)) - cancel_rearming_delayed_workqueue(cxgb3_wq, - &adapter->adap_check_task); + cancel_delayed_work_sync(&adapter->adap_check_task); if (!adapter->open_device_map) cxgb_down(adapter); @@ -2490,13 +2496,16 @@ static void check_link_status(struct adapter *adapter) for_each_port(adapter, i) { struct net_device *dev = adapter->port[i]; struct port_info *p = netdev_priv(dev); + int link_fault; spin_lock_irq(&adapter->work_lock); - if (p->link_fault) { - spin_unlock_irq(&adapter->work_lock); + link_fault = p->link_fault; + spin_unlock_irq(&adapter->work_lock); + + if (link_fault) { + t3_link_fault(adapter, i); continue; } - spin_unlock_irq(&adapter->work_lock); if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { t3_xgm_intr_disable(adapter, i); @@ -2554,9 +2563,7 @@ static void t3_adap_check_task(struct work_struct *work) adapter->check_task_cnt++; - /* Check link status for PHYs without interrupts */ - if (p->linkpoll_period) - check_link_status(adapter); + check_link_status(adapter); /* Accumulate MAC stats if needed */ if (!p->linkpoll_period || @@ -2680,21 +2687,6 @@ void t3_os_ext_intr_handler(struct adapter *adapter) spin_unlock(&adapter->work_lock); } -static void link_fault_task(struct work_struct *work) -{ - struct adapter *adapter = container_of(work, struct adapter, - link_fault_handler_task); - int i; - - for_each_port(adapter, i) { - struct net_device *netdev = adapter->port[i]; - struct port_info *pi = netdev_priv(netdev); - - if (pi->link_fault) - t3_link_fault(adapter, i); - } -} - void t3_os_link_fault_handler(struct adapter *adapter, int port_id) { struct net_device *netdev = adapter->port[port_id]; @@ -2702,7 +2694,6 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id) spin_lock(&adapter->work_lock); pi->link_fault = 1; - queue_work(cxgb3_wq, &adapter->link_fault_handler_task); spin_unlock(&adapter->work_lock); } @@ -2838,6 +2829,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, struct adapter *adapter = pci_get_drvdata(pdev); int ret; + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + ret = t3_adapter_error(adapter, 0); /* Request a slot reset. */ @@ -2932,8 +2926,13 @@ static int __devinit cxgb_enable_msix(struct adapter *adap) while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) vectors = err; - if (!err && vectors < (adap->params.nports + 1)) + if (err < 0) + pci_disable_msix(adap->pdev); + + if (!err && vectors < (adap->params.nports + 1)) { + pci_disable_msix(adap->pdev); err = -1; + } if (!err) { for (i = 0; i < vectors; ++i) @@ -3082,7 +3081,6 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&adapter->adapter_list); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); - INIT_WORK(&adapter->link_fault_handler_task, link_fault_task); INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 26d3587..b3ee2bc 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, (*d->pg_chunk.p_cnt)--; if (!*d->pg_chunk.p_cnt) pci_unmap_page(pdev, - pci_unmap_addr(&d->pg_chunk, mapping), + d->pg_chunk.mapping, q->alloc_size, PCI_DMA_FROMDEVICE); put_page(d->pg_chunk.page); @@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); - pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); + q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) nomem: q->alloc_failed++; break; } - mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + - sd->pg_chunk.offset; + mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; pci_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); @@ -881,7 +880,7 @@ recycle: (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt) pci_unmap_page(adap->pdev, - pci_unmap_addr(&sd->pg_chunk, mapping), + sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); if (!skb) { @@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt) pci_unmap_page(adap->pdev, - pci_unmap_addr(&sd->pg_chunk, mapping), + sd->pg_chunk.mapping, fl->alloc_size, PCI_DMA_FROMDEVICE); diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 31ed31a..4950d5d 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -1202,7 +1202,6 @@ void t3_link_changed(struct adapter *adapter, int port_id) struct cphy *phy = &pi->phy; struct cmac *mac = &pi->mac; struct link_config *lc = &pi->link_config; - int force_link_down = 0; phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); @@ -1218,14 +1217,9 @@ void t3_link_changed(struct adapter *adapter, int port_id) status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); if (status & F_LINKFAULTCHANGE) { mac->stats.link_faults++; - force_link_down = 1; + pi->link_fault = 1; } t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); - - if (force_link_down) { - t3_os_link_fault_handler(adapter, port_id); - return; - } } if (lc->requested_fc & PAUSE_AUTONEG) @@ -1280,6 +1274,11 @@ void t3_link_fault(struct adapter *adapter, int port_id) A_XGM_INT_STATUS + mac->offset); link_fault &= F_LINKFAULTCHANGE; + link_ok = lc->link_ok; + speed = lc->speed; + duplex = lc->duplex; + fc = lc->fc; + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); if (link_fault) { @@ -1292,9 +1291,6 @@ void t3_link_fault(struct adapter *adapter, int port_id) /* Account link faults only when the phy reports a link up */ if (link_ok) mac->stats.link_faults++; - - msleep(1000); - t3_os_link_fault_handler(adapter, port_id); } else { if (link_ok) t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, @@ -3788,7 +3784,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, adapter->params.info = ai; adapter->params.nports = ai->nports0 + ai->nports1; - adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1); + adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1); adapter->params.rev = t3_read_reg(adapter, A_PL_REV); /* * We used to only run the "adapter check task" once a second if diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 861c867..b62405a 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -1010,6 +1010,17 @@ static void lance_set_multicast_retry(unsigned long _opaque) lance_set_multicast(dev); } +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init dec_lance_probe(struct device *bdev, const int type) { static unsigned version_printed; @@ -1223,12 +1234,8 @@ static int __init dec_lance_probe(struct device *bdev, const int type) printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); - dev->open = &lance_open; - dev->stop = &lance_close; - dev->hard_start_xmit = &lance_start_xmit; - dev->tx_timeout = &lance_tx_timeout; + dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->set_multicast_list = &lance_set_multicast; /* lp->ll is the location of the registers for lance card */ lp->ll = ll; diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 5c0b457..0f9ee13 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -2728,7 +2728,7 @@ static void __devexit e100_remove(struct pci_dev *pdev) #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ -static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); @@ -2749,19 +2749,32 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) E100_82552_SMARTSPEED, smartspeed | E100_82552_REV_ANEG | E100_82552_ANEG_NOW); } - if (pci_enable_wake(pdev, PCI_D3cold, true)) - pci_enable_wake(pdev, PCI_D3hot, true); + *enable_wake = true; } else { - pci_enable_wake(pdev, PCI_D3hot, false); + *enable_wake = false; } pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); +} - return 0; +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) { + return pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + return pci_set_power_state(pdev, PCI_D3hot); + } } #ifdef CONFIG_PM +static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + __e100_shutdown(pdev, &wake); + return __e100_power_off(pdev, wake); +} + static int e100_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -2792,7 +2805,10 @@ static int e100_resume(struct pci_dev *pdev) static void e100_shutdown(struct pci_dev *pdev) { - e100_suspend(pdev, PMSG_SUSPEND); + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); } /* ------------------ PCI Error Recovery infrastructure -------------- */ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index ddc5c53..fffb006 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -156,8 +156,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); static int e1000_resume(struct pci_dev *pdev); #endif static void e1000_shutdown(struct pci_dev *pdev); @@ -3738,7 +3738,7 @@ static irqreturn_t e1000_intr(int irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 rctl, icr = er32(ICR); - if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags))) + if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is @@ -3834,7 +3834,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - bool cleaned; unsigned int total_tx_bytes=0, total_tx_packets=0; i = tx_ring->next_to_clean; @@ -3843,7 +3842,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { - for (cleaned = false; !cleaned; count++) { + bool cleaned = false; + for ( ; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); @@ -3871,7 +3871,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD 32 - if (unlikely(cleaned && netif_carrier_ok(netdev) && + if (unlikely(count && netif_carrier_ok(netdev) && E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. @@ -4027,8 +4027,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, PCI_DMA_FROMDEVICE); length = le16_to_cpu(rx_desc->length); - - if (unlikely(!(status & E1000_RXD_STAT_EOP))) { + /* !EOP means multiple descriptors were used to store a single + * packet, also make sure the frame isn't just CRC only */ + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { /* All receives must fit into a single buffer */ E1000_DBG("%s: Receive packet consumed multiple" " buffers\n", netdev->name); @@ -4601,7 +4602,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) return 0; } -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -4664,22 +4665,18 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(WUC, E1000_WUC_PME_EN); ew32(WUFC, wufc); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); } else { ew32(WUC, 0); ew32(WUFC, 0); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); } e1000_release_manageability(adapter); + *enable_wake = !!wufc; + /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->en_mng_pt) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } + if (adapter->en_mng_pt) + *enable_wake = true; if (hw->phy_type == e1000_phy_igp_3) e1000_phy_powerdown_workaround(hw); @@ -4693,12 +4690,29 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; } #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + static int e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4753,7 +4767,14 @@ static int e1000_resume(struct pci_dev *pdev) static void e1000_shutdown(struct pci_dev *pdev) { - e1000_suspend(pdev, PMSG_SUSPEND); + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 409b58c..ca82f19 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -621,7 +621,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) struct e1000_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - bool cleaned; unsigned int total_tx_bytes = 0, total_tx_packets = 0; i = tx_ring->next_to_clean; @@ -630,7 +629,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { - for (cleaned = 0; !cleaned; count++) { + bool cleaned = false; + for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); @@ -661,8 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD 32 - if (cleaned && netif_carrier_ok(netdev) && - e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + if (count && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -4346,7 +4346,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -4409,20 +4409,16 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(WUC, E1000_WUC_PME_EN); ew32(WUFC, wufc); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); } else { ew32(WUC, 0); ew32(WUFC, 0); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); } + *enable_wake = !!wufc; + /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->flags & FLAG_MNG_PT_ENABLED) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } + if (adapter->flags & FLAG_MNG_PT_ENABLED) + *enable_wake = true; if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -4435,6 +4431,26 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + /* * The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To @@ -4450,14 +4466,12 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + e1000_power_off(pdev, sleep, wake); pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); } else { - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + e1000_power_off(pdev, sleep, wake); } - - return 0; } static void e1000e_disable_l1aspm(struct pci_dev *pdev) @@ -4486,6 +4500,18 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev) } #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + static int e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4549,7 +4575,12 @@ static int e1000_resume(struct pci_dev *pdev) static void e1000_shutdown(struct pci_dev *pdev) { - e1000_suspend(pdev, PMSG_SUSPEND); + bool wake = false; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 6e317ca..16a4138 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0100" +#define DRV_VERSION "EHEA_0101" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index ac0c5b4..b22dab9 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -545,14 +545,17 @@ static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, x &= (arr_len - 1); pref = skb_array[x]; - prefetchw(pref); - prefetchw(pref + EHEA_CACHE_LINE); - - pref = (skb_array[x]->data); - prefetch(pref); - prefetch(pref + EHEA_CACHE_LINE); - prefetch(pref + EHEA_CACHE_LINE * 2); - prefetch(pref + EHEA_CACHE_LINE * 3); + if (pref) { + prefetchw(pref); + prefetchw(pref + EHEA_CACHE_LINE); + + pref = (skb_array[x]->data); + prefetch(pref); + prefetch(pref + EHEA_CACHE_LINE); + prefetch(pref + EHEA_CACHE_LINE * 2); + prefetch(pref + EHEA_CACHE_LINE * 3); + } + skb = skb_array[skb_index]; skb_array[skb_index] = NULL; return skb; @@ -569,12 +572,14 @@ static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, x &= (arr_len - 1); pref = skb_array[x]; - prefetchw(pref); - prefetchw(pref + EHEA_CACHE_LINE); + if (pref) { + prefetchw(pref); + prefetchw(pref + EHEA_CACHE_LINE); - pref = (skb_array[x]->data); - prefetchw(pref); - prefetchw(pref + EHEA_CACHE_LINE); + pref = (skb_array[x]->data); + prefetchw(pref); + prefetchw(pref + EHEA_CACHE_LINE); + } skb = skb_array[wqe_index]; skb_array[wqe_index] = NULL; @@ -3080,7 +3085,8 @@ static const struct net_device_ops ehea_netdev_ops = { .ndo_change_mtu = ehea_change_mtu, .ndo_vlan_rx_register = ehea_vlan_rx_register, .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid + .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, + .ndo_tx_timeout = ehea_tx_watchdog, }; struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, @@ -3142,7 +3148,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | NETIF_F_LLTX; - dev->tx_timeout = &ehea_tx_watchdog; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; INIT_WORK(&port->reset_task, ehea_reset_port); diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 51ead79..5210bb1 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -542,6 +542,8 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) } spin_unlock_bh(&eql->queue.lock); + dev_put(slave_dev); + return ret; } diff --git a/drivers/net/fec.c b/drivers/net/fec.c index a515acc..682e7f0 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -1240,6 +1240,7 @@ static void __inline__ fec_phy_ack_intr(void) icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); *icrp = 0x0d000000; } +#endif #ifdef CONFIG_M5272 static void __inline__ fec_get_mac(struct net_device *dev) diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d374650..9f6a68fb 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -897,6 +897,12 @@ enum { }; static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; +/* + * Power down phy when interface is down (persists through reboot; + * older Linux and other OSes may not power it up again) + */ +static int phy_power_down = 0; + static inline struct fe_priv *get_nvpriv(struct net_device *dev) { return netdev_priv(dev); @@ -1485,7 +1491,10 @@ static int phy_init(struct net_device *dev) /* restart auto negotiation, power down phy */ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); - mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + if (phy_power_down) { + mii_control |= BMCR_PDOWN; + } if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { return PHY_ERROR; } @@ -1880,6 +1889,7 @@ static void nv_init_tx(struct net_device *dev) np->tx_pkts_in_progress = 0; np->tx_change_owner = NULL; np->tx_end_flip = NULL; + np->tx_stop = 0; for (i = 0; i < np->tx_ring_size; i++) { if (!nv_optimized(np)) { @@ -2530,6 +2540,8 @@ static void nv_tx_timeout(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 status; + union ring_type put_tx; + int saved_tx_limit; if (np->msi_flags & NV_MSI_X_ENABLED) status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; @@ -2589,24 +2601,32 @@ static void nv_tx_timeout(struct net_device *dev) /* 1) stop tx engine */ nv_stop_tx(dev); - /* 2) check that the packets were not sent already: */ + /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ + saved_tx_limit = np->tx_limit; + np->tx_limit = 0; /* prevent giving HW any limited pkts */ + np->tx_stop = 0; /* prevent waking tx queue */ if (!nv_optimized(np)) nv_tx_done(dev, np->tx_ring_size); else nv_tx_done_optimized(dev, np->tx_ring_size); - /* 3) if there are dead entries: clear everything */ - if (np->get_tx_ctx != np->put_tx_ctx) { - printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); - nv_drain_tx(dev); - nv_init_tx(dev); - setup_hw_rings(dev, NV_SETUP_TX_RING); - } + /* save current HW postion */ + if (np->tx_change_owner) + put_tx.ex = np->tx_change_owner->first_tx_desc; + else + put_tx = np->put_tx; - netif_wake_queue(dev); + /* 3) clear all tx state */ + nv_drain_tx(dev); + nv_init_tx(dev); - /* 4) restart tx engine */ + /* 4) restore state to current HW position */ + np->get_tx = np->put_tx = put_tx; + np->tx_limit = saved_tx_limit; + + /* 5) restart tx engine */ nv_start_tx(dev); + netif_wake_queue(dev); spin_unlock_irq(&np->lock); } @@ -3745,14 +3765,14 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock_irqrestore(&np->lock, flags); - __napi_complete(napi); + napi_complete(napi); return rx_work; } if (rx_work < budget) { /* re-enable interrupts (msix not enabled in napi) */ - __napi_complete(napi); + napi_complete(napi); writel(np->irqmask, base + NvRegIrqMask); } @@ -5502,7 +5522,7 @@ static int nv_close(struct net_device *dev) nv_drain_rxtx(dev); - if (np->wolenabled) { + if (np->wolenabled || !phy_power_down) { writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); nv_start_rx(dev); } else { @@ -6356,6 +6376,8 @@ module_param(dma_64bit, int, 0); MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); module_param(phy_cross, int, 0); MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); +module_param(phy_power_down, int, 0); +MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index b037ce98..a9cbc31 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1019,6 +1019,22 @@ out_put_phy: #define IS_FEC(match) 0 #endif +static const struct net_device_ops fs_enet_netdev_ops = { + .ndo_open = fs_enet_open, + .ndo_stop = fs_enet_close, + .ndo_get_stats = fs_enet_get_stats, + .ndo_start_xmit = fs_enet_start_xmit, + .ndo_tx_timeout = fs_timeout, + .ndo_set_multicast_list = fs_set_multicast_list, + .ndo_do_ioctl = fs_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fs_enet_netpoll, +#endif +}; + static int __devinit fs_enet_probe(struct of_device *ofdev, const struct of_device_id *match) { @@ -1093,22 +1109,13 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, fep->tx_ring = fpi->tx_ring; fep->rx_ring = fpi->rx_ring; - ndev->open = fs_enet_open; - ndev->hard_start_xmit = fs_enet_start_xmit; - ndev->tx_timeout = fs_timeout; + ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; - ndev->stop = fs_enet_close; - ndev->get_stats = fs_enet_get_stats; - ndev->set_multicast_list = fs_set_multicast_list; -#ifdef CONFIG_NET_POLL_CONTROLLER - ndev->poll_controller = fs_enet_netpoll; -#endif if (fpi->use_napi) netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); ndev->ethtool_ops = &fs_ethtool_ops; - ndev->do_ioctl = fs_ioctl; init_timer(&fep->phy_timer_list); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 65f5587..a051918 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1583,8 +1583,10 @@ static void gfar_reset_task(struct work_struct *work) struct net_device *dev = priv->ndev; if (dev->flags & IFF_UP) { + netif_stop_queue(dev); stop_gfar(dev); startup_gfar(dev); + netif_start_queue(dev); } netif_tx_schedule_all(dev); @@ -1883,8 +1885,17 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) if (unlikely(!newskb)) newskb = skb; - else if (skb) + else if (skb) { + /* + * We need to reset ->data to what it + * was before gfar_new_skb() re-aligned + * it to an RXBUF_ALIGNMENT boundary + * before we put the skb back on the + * recycle list. + */ + skb->data = skb->head + NET_SKB_PAD; __skb_queue_head(&priv->rx_recycle, skb); + } } else { /* Increment the number of packets */ dev->stats.rx_packets++; diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 0642d52..cf35296 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -259,7 +259,7 @@ extern const char gfar_driver_version[]; (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \ - | IEVENT_MAG) + | IEVENT_MAG | IEVENT_BABR) #define IMASK_INIT_CLEAR 0x00000000 #define IMASK_BABR 0x80000000 diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 77e4b5b..806533c 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2686,6 +2686,32 @@ static int __devinit emac_init_config(struct emac_instance *dev) return 0; } +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_get_stats = emac_stats, + .ndo_set_multicast_list = emac_set_multicast_list, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_start_xmit = emac_start_xmit, + .ndo_change_mtu = eth_change_mtu, +}; + +static const struct net_device_ops emac_gige_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_get_stats = emac_stats, + .ndo_set_multicast_list = emac_set_multicast_list, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_start_xmit = emac_start_xmit_sg, + .ndo_change_mtu = emac_change_mtu, +}; + static int __devinit emac_probe(struct of_device *ofdev, const struct of_device_id *match) { @@ -2827,23 +2853,14 @@ static int __devinit emac_probe(struct of_device *ofdev, if (err != 0) goto err_detach_tah; - /* Fill in the driver function table */ - ndev->open = &emac_open; if (dev->tah_dev) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - ndev->tx_timeout = &emac_tx_timeout; ndev->watchdog_timeo = 5 * HZ; - ndev->stop = &emac_close; - ndev->get_stats = &emac_stats; - ndev->set_multicast_list = &emac_set_multicast_list; - ndev->do_ioctl = &emac_ioctl; if (emac_phy_supports_gige(dev->phy_mode)) { - ndev->hard_start_xmit = &emac_start_xmit_sg; - ndev->change_mtu = &emac_change_mtu; + ndev->netdev_ops = &emac_gige_netdev_ops; dev->commac.ops = &emac_commac_sg_ops; - } else { - ndev->hard_start_xmit = &emac_start_xmit; - } + } else + ndev->netdev_ops = &emac_netdev_ops; SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); netif_carrier_off(ndev); diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index f4c315b..472f3f1 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -111,7 +111,7 @@ void igb_clear_vfta(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { array_wr32(E1000_VFTA, offset, value); wrfl(); diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index a34de52..1d690b4 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h @@ -66,7 +66,6 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); s32 igb_check_alt_mac_addr(struct e1000_hw *hw); void igb_reset_adaptive(struct e1000_hw *hw); void igb_update_adaptive(struct e1000_hw *hw); -void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); bool igb_enable_mng_pass_thru(struct e1000_hw *hw); diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index fe71c7d..840782f 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c @@ -188,7 +188,7 @@ out: * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ -s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; @@ -214,7 +214,7 @@ out: * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ -s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = 0; @@ -232,19 +232,6 @@ out: return ret_val; } -/** - * e1000_init_mbx_ops_generic - Initialize NVM function pointers - * @hw: pointer to the HW structure - * - * Setups up the function pointers to no-op functions - **/ -void e1000_init_mbx_ops_generic(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - mbx->ops.read_posted = igb_read_posted_mbx; - mbx->ops.write_posted = igb_write_posted_mbx; -} - static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) { u32 mbvficr = rd32(E1000_MBVFICR); diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h index 6ec9890..ebc02ea 100644 --- a/drivers/net/igb/e1000_mbx.h +++ b/drivers/net/igb/e1000_mbx.h @@ -67,8 +67,6 @@ s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); s32 igb_check_for_msg(struct e1000_hw *, u16); s32 igb_check_for_ack(struct e1000_hw *, u16); s32 igb_check_for_rst(struct e1000_hw *, u16); diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 6b0697c..e253435 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -152,14 +152,13 @@ static struct notifier_block dca_notifier = { /* for netdump / net console */ static void igb_netpoll(struct net_device *); #endif - #ifdef CONFIG_PCI_IOV -static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *, - const char *, size_t); -static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *, - char *); -DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs); -#endif +static unsigned int max_vfs = 0; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " + "per physical function"); +#endif /* CONFIG_PCI_IOV */ + static pci_ers_result_t igb_io_error_detected(struct pci_dev *, pci_channel_state_t); static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); @@ -671,6 +670,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* If we can't do MSI-X, try MSI */ msi_only: +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; if (!pci_enable_msi(adapter->pdev)) @@ -1238,6 +1252,46 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_sw_init; +#ifdef CONFIG_PCI_IOV + /* since iov functionality isn't critical to base device function we + * can accept failure. If it fails we don't allow iov to be enabled */ + if (hw->mac.type == e1000_82576) { + /* 82576 supports a maximum of 7 VFs in addition to the PF */ + unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs; + int i; + unsigned char mac_addr[ETH_ALEN]; + + if (num_vfs) { + adapter->vf_data = kcalloc(num_vfs, + sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!adapter->vf_data) { + dev_err(&pdev->dev, + "Could not allocate VF private data - " + "IOV enable failed\n"); + } else { + err = pci_enable_sriov(pdev, num_vfs); + if (!err) { + adapter->vfs_allocated_count = num_vfs; + dev_info(&pdev->dev, + "%d vfs allocated\n", + num_vfs); + for (i = 0; + i < adapter->vfs_allocated_count; + i++) { + random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, i, + mac_addr); + } + } else { + kfree(adapter->vf_data); + adapter->vf_data = NULL; + } + } + } + } + +#endif /* setup the private structure */ err = igb_sw_init(adapter); if (err) @@ -1397,19 +1451,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_register; -#ifdef CONFIG_PCI_IOV - /* since iov functionality isn't critical to base device function we - * can accept failure. If it fails we don't allow iov to be enabled */ - if (hw->mac.type == e1000_82576) { - err = pci_enable_sriov(pdev, 0); - if (!err) - err = device_create_file(&netdev->dev, - &dev_attr_num_vfs); - if (err) - dev_err(&pdev->dev, "Failed to initialize IOV\n"); - } - -#endif #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -1965,7 +2006,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rctl; u32 srrctl = 0; - int i, j; + int i; rctl = rd32(E1000_RCTL); @@ -2030,8 +2071,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter) if (adapter->vfs_allocated_count) { u32 vmolr; - j = adapter->rx_ring[0].reg_idx; - /* set all queue drop enable bits */ wr32(E1000_QDE, ALL_QUEUES); srrctl |= E1000_SRRCTL_DROP_EN; @@ -2039,16 +2078,16 @@ static void igb_setup_rctl(struct igb_adapter *adapter) /* disable queue 0 to prevent tail write w/o re-config */ wr32(E1000_RXDCTL(0), 0); - vmolr = rd32(E1000_VMOLR(j)); + vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); if (rctl & E1000_RCTL_LPE) vmolr |= E1000_VMOLR_LPE; - if (adapter->num_rx_queues > 0) + if (adapter->num_rx_queues > 1) vmolr |= E1000_VMOLR_RSSE; - wr32(E1000_VMOLR(j), vmolr); + wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); } for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i].reg_idx; + int j = adapter->rx_ring[i].reg_idx; wr32(E1000_SRRCTL(j), srrctl); } @@ -5422,89 +5461,4 @@ static void igb_vmm_control(struct igb_adapter *adapter) igb_vmdq_set_replication_pf(hw, true); } -#ifdef CONFIG_PCI_IOV -static ssize_t igb_show_num_vfs(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct igb_adapter *adapter = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", adapter->vfs_allocated_count); -} - -static ssize_t igb_set_num_vfs(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct net_device *netdev = to_net_dev(dev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - unsigned int num_vfs, i; - unsigned char mac_addr[ETH_ALEN]; - int err; - - sscanf(buf, "%u", &num_vfs); - - if (num_vfs > 7) - num_vfs = 7; - - /* value unchanged do nothing */ - if (num_vfs == adapter->vfs_allocated_count) - return count; - - if (netdev->flags & IFF_UP) - igb_close(netdev); - - igb_reset_interrupt_capability(adapter); - igb_free_queues(adapter); - adapter->tx_ring = NULL; - adapter->rx_ring = NULL; - adapter->vfs_allocated_count = 0; - - /* reclaim resources allocated to VFs since we are changing count */ - if (adapter->vf_data) { - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(pdev); - msleep(500); - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - msleep(100); - dev_info(&pdev->dev, "IOV Disabled\n"); - } - - if (num_vfs) { - adapter->vf_data = kcalloc(num_vfs, - sizeof(struct vf_data_storage), - GFP_KERNEL); - if (!adapter->vf_data) { - dev_err(&pdev->dev, "Could not allocate VF private " - "data - IOV enable failed\n"); - } else { - err = pci_enable_sriov(pdev, num_vfs); - if (!err) { - adapter->vfs_allocated_count = num_vfs; - dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs); - for (i = 0; i < adapter->vfs_allocated_count; i++) { - random_ether_addr(mac_addr); - igb_set_vf_mac(adapter, i, mac_addr); - } - } else { - kfree(adapter->vf_data); - adapter->vf_data = NULL; - } - } - } - - igb_set_interrupt_capability(adapter); - igb_alloc_queues(adapter); - igb_reset(adapter); - - if (netdev->flags & IFF_UP) - igb_open(netdev); - - return count; -} -#endif /* CONFIG_PCI_IOV */ /* igb_main.c */ diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile new file mode 100644 index 0000000..c2f150d --- /dev/null +++ b/drivers/net/igbvf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel(R) 82576 Virtual Function Linux driver +# Copyright(c) 2009 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82576 VF ethernet driver +# + +obj-$(CONFIG_IGBVF) += igbvf.o + +igbvf-objs := vf.o \ + mbx.o \ + ethtool.o \ + netdev.o + diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h new file mode 100644 index 0000000..88a4753 --- /dev/null +++ b/drivers/net/igbvf/defines.h @@ -0,0 +1,125 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* IVAR valid bit */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Device Control */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c new file mode 100644 index 0000000..1dcaa69 --- /dev/null +++ b/drivers/net/igbvf/ethtool.c @@ -0,0 +1,540 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for igbvf */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> + +#include "igbvf.h" +#include <linux/if_vlan.h> + + +struct igbvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int base_stat_offset; +}; + +#define IGBVF_STAT(current, base) \ + sizeof(((struct igbvf_adapter *)0)->current), \ + offsetof(struct igbvf_adapter, current), \ + offsetof(struct igbvf_adapter, base) + +static const struct igbvf_stats igbvf_gstrings_stats[] = { + { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, + { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, + { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, + { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, + { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, + { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, + { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, + { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, + { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, + { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, + { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, +}; + +#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) + +static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)" +}; + +#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) + +static int igbvf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + ecmd->supported = SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_1000baseT_Full; + + ecmd->port = -1; + ecmd->transceiver = XCVR_DUMMY1; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + ecmd->speed = 1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = 100; + else + ecmd->speed = 10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static u32 igbvf_get_link(struct net_device *netdev) +{ + return netif_carrier_ok(netdev); +} + +static int igbvf_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return; +} + +static int igbvf_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return -EOPNOTSUPP; +} + +static u32 igbvf_get_tx_csum(struct net_device *netdev) +{ + return ((netdev->features & NETIF_F_IP_CSUM) != 0); +} + +static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + return 0; +} + +static int igbvf_set_tso(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int i; + struct net_device *v_netdev; + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~NETIF_F_TSO; + v_netdev->features &= ~NETIF_F_TSO6; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + dev_info(&adapter->pdev->dev, "TSO is %s\n", + data ? "Enabled" : "Disabled"); + adapter->flags |= FLAG_TSO_FORCE; + return 0; +} + +static u32 igbvf_get_msglevel(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igbvf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igbvf_get_regs_len(struct net_device *netdev) +{ +#define IGBVF_REGS_LEN 8 + return IGBVF_REGS_LEN * sizeof(u32); +} + +static void igbvf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 revision_id; + + memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); + + pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); + + regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RDLEN(0)); + regs_buff[3] = er32(RDH(0)); + regs_buff[4] = er32(RDT(0)); + + regs_buff[5] = er32(TDLEN(0)); + regs_buff[6] = er32(TDH(0)); + regs_buff[7] = er32(TDT(0)); +} + +static int igbvf_get_eeprom_len(struct net_device *netdev) +{ + return 0; +} + +static int igbvf_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static int igbvf_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + char firmware_version[32] = "N/A"; + + strncpy(drvinfo->driver, igbvf_driver_name, 32); + strncpy(drvinfo->version, igbvf_driver_version, 32); + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = igbvf_get_regs_len(netdev); + drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); +} + +static void igbvf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IGBVF_MAX_RXD; + ring->tx_max_pending = IGBVF_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int igbvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *temp_ring; + int err; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + temp_ring = vmalloc(sizeof(struct igbvf_ring)); + if (!temp_ring) + return -ENOMEM; + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(adapter->netdev)) + igbvf_down(adapter); + + /* + * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring->count) { + memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_tx_count; + err = igbvf_setup_tx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_tx_resources(adapter->tx_ring); + + memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); + } + + if (new_rx_count != adapter->rx_ring->count) { + memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_rx_count; + err = igbvf_setup_rx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_rx_resources(adapter->rx_ring); + + memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + } + + err = 0; +err_setup: + if (netif_running(adapter->netdev)) + igbvf_up(adapter); + + clear_bit(__IGBVF_RESETTING, &adapter->state); + vfree(temp_ring); + return err; +} + +static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + + hw->mac.ops.check_for_link(hw); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + + return *data; +} + +static int igbvf_get_self_test_count(struct net_device *netdev) +{ + return IGBVF_TEST_LEN; +} + +static int igbvf_get_stats_count(struct net_device *netdev) +{ + return IGBVF_GLOBAL_STATS_LEN; +} + +static void igbvf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + set_bit(__IGBVF_TESTING, &adapter->state); + + /* + * Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igbvf_link_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__IGBVF_TESTING, &adapter->state); + msleep_interruptible(4 * 1000); +} + +static void igbvf_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + return; +} + +static int igbvf_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + return -EOPNOTSUPP; +} + +static int igbvf_phys_id(struct net_device *netdev, u32 data) +{ + return 0; +} + +static int igbvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = adapter->itr_setting >> 2; + + return 0; +} + +static int igbvf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { + adapter->itr = IGBVF_START_ITR; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = ec->rx_coalesce_usecs << 2; + adapter->itr_setting = adapter->itr; + } + + writel(adapter->itr, + hw->hw_addr + adapter->rx_ring[0].itr_register); + + return 0; +} + +static int igbvf_nway_reset(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igbvf_reinit_locked(adapter); + return 0; +} + + +static void igbvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int i; + + igbvf_update_stats(adapter); + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + + igbvf_gstrings_stats[i].stat_offset; + char *b = (char *)adapter + + igbvf_gstrings_stats[i].base_stat_offset; + data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); + } + +} + +static void igbvf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, igbvf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static const struct ethtool_ops igbvf_ethtool_ops = { + .get_settings = igbvf_get_settings, + .set_settings = igbvf_set_settings, + .get_drvinfo = igbvf_get_drvinfo, + .get_regs_len = igbvf_get_regs_len, + .get_regs = igbvf_get_regs, + .get_wol = igbvf_get_wol, + .set_wol = igbvf_set_wol, + .get_msglevel = igbvf_get_msglevel, + .set_msglevel = igbvf_set_msglevel, + .nway_reset = igbvf_nway_reset, + .get_link = igbvf_get_link, + .get_eeprom_len = igbvf_get_eeprom_len, + .get_eeprom = igbvf_get_eeprom, + .set_eeprom = igbvf_set_eeprom, + .get_ringparam = igbvf_get_ringparam, + .set_ringparam = igbvf_set_ringparam, + .get_pauseparam = igbvf_get_pauseparam, + .set_pauseparam = igbvf_set_pauseparam, + .get_tx_csum = igbvf_get_tx_csum, + .set_tx_csum = igbvf_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = igbvf_set_tso, + .self_test = igbvf_diag_test, + .get_strings = igbvf_get_strings, + .phys_id = igbvf_phys_id, + .get_ethtool_stats = igbvf_get_ethtool_stats, + .self_test_count = igbvf_get_self_test_count, + .get_stats_count = igbvf_get_stats_count, + .get_coalesce = igbvf_get_coalesce, + .set_coalesce = igbvf_set_coalesce, +}; + +void igbvf_set_ethtool_ops(struct net_device *netdev) +{ + /* have to "undeclare" const on this struct to remove warnings */ + SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); +} diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h new file mode 100644 index 0000000..4bff35e --- /dev/null +++ b/drivers/net/igbvf/igbvf.h @@ -0,0 +1,332 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGBVF_H_ +#define _IGBVF_H_ + +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/io.h> +#include <linux/netdevice.h> + + +#include "vf.h" + +/* Forward declarations */ +struct igbvf_info; +struct igbvf_adapter; + +/* Interrupt defines */ +#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ + +/* Interrupt modes, as used by the IntMode paramter */ +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 + +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 + +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define IGBVF_TX_QUEUE_WAKE 32 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 + +#define IGBVF_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +enum igbvf_boards { + board_vf, +}; + +struct igbvf_queue_stats { + u64 packets; + u64 bytes; +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igbvf_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + }; + /* Rx */ + struct { + struct page *page; + u64 page_dma; + unsigned int page_offset; + }; + }; + struct page *page; +}; + +union igbvf_desc { + union e1000_adv_rx_desc rx_desc; + union e1000_adv_tx_desc tx_desc; + struct e1000_adv_tx_context_desc tx_context_desc; +}; + +struct igbvf_ring { + struct igbvf_adapter *adapter; /* backlink */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct igbvf_buffer *buffer_info; + struct napi_struct napi; + + char name[IFNAMSIZ + 5]; + u32 eims_value; + u32 itr_val; + u16 itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; + + struct igbvf_queue_stats stats; +}; + +/* board specific private data structure */ +struct igbvf_adapter { + struct timer_list watchdog_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct igbvf_info *ei; + + struct vlan_group *vlgrp; + u32 bd_number; + u32 rx_buffer_len; + u32 polling_interval; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + + spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * Tx + */ + struct igbvf_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + unsigned long tx_queue_len; + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * Rx + */ + struct igbvf_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_hdr_size; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + spinlock_t stats_lock; /* prevent concurrent stats updates */ + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + /* The VF counters don't clear on read so we have to get a base + * count on driver start up and always subtract that base on + * on the first update, thus the flag.. + */ + struct e1000_vf_stats stats; + u64 zero_base; + + struct igbvf_ring test_tx_ring; + struct igbvf_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + struct msix_entry *msix_entries; + int int_mode; + u32 eims_enable_mask; + u32 eims_other; + u32 int_counter0; + u32 int_counter1; + + u32 eeprom_wol; + u32 wol; + u32 pba; + + bool fc_autoneg; + + unsigned long led_status; + + unsigned int flags; +}; + +struct igbvf_info { + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_HW_VLAN_FILTER (1 << 0) +#define FLAG_HAS_JUMBO_FRAMES (1 << 1) +#define FLAG_MSI_ENABLED (1 << 2) +#define FLAG_RX_CSUM_ENABLED (1 << 3) +#define FLAG_TSO_FORCE (1 << 4) + +#define IGBVF_RX_DESC_ADV(R, i) \ + (&((((R).desc))[i].rx_desc)) +#define IGBVF_TX_DESC_ADV(R, i) \ + (&((((R).desc))[i].tx_desc)) +#define IGBVF_TX_CTXTDESC_ADV(R, i) \ + (&((((R).desc))[i].tx_context_desc)) + +enum igbvf_state_t { + __IGBVF_TESTING, + __IGBVF_RESETTING, + __IGBVF_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char igbvf_driver_name[]; +extern const char igbvf_driver_version[]; + +extern void igbvf_check_options(struct igbvf_adapter *); +extern void igbvf_set_ethtool_ops(struct net_device *); + +extern int igbvf_up(struct igbvf_adapter *); +extern void igbvf_down(struct igbvf_adapter *); +extern void igbvf_reinit_locked(struct igbvf_adapter *); +extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); +extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); +extern void igbvf_free_rx_resources(struct igbvf_ring *); +extern void igbvf_free_tx_resources(struct igbvf_ring *); +extern void igbvf_update_stats(struct igbvf_adapter *); + +extern unsigned int copybreak; + +#endif /* _IGBVF_H_ */ diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c new file mode 100644 index 0000000..819a8ec --- /dev/null +++ b/drivers/net/igbvf/mbx.c @@ -0,0 +1,350 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if we either can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg*/ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw); +out: + return ret_val; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = er32(V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush any ack or msg as we are going to overwrite mailbox */ + e1000_check_for_ack_vf(hw); + e1000_check_for_msg_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_ew32(VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_er32(VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * e1000_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to being communications */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} + diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h new file mode 100644 index 0000000..4938609 --- /dev/null +++ b/drivers/net/igbvf/mbx.h @@ -0,0 +1,75 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "vf.h" + +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ + +/* We have a total wait time of 1s for vf mailbox posted messages */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ + +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c new file mode 100644 index 0000000..b774666 --- /dev/null +++ b/drivers/net/igbvf/netdev.c @@ -0,0 +1,2922 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/pm_qos_params.h> + +#include "igbvf.h" + +#define DRV_VERSION "1.0.0-k0" +char igbvf_driver_name[] = "igbvf"; +const char igbvf_driver_version[] = DRV_VERSION; +static const char igbvf_driver_string[] = + "Intel(R) Virtual Function Network Driver"; +static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; + +static int igbvf_poll(struct napi_struct *napi, int budget); +static void igbvf_reset(struct igbvf_adapter *); +static void igbvf_set_interrupt_capability(struct igbvf_adapter *); +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); + +static struct igbvf_info igbvf_vf_info = { + .mac = e1000_vfadapt, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_RX_CSUM_ENABLED, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, +}; + +static const struct igbvf_info *igbvf_info_tbl[] = { + [board_vf] = &igbvf_vf_info, +}; + +/** + * igbvf_desc_unused - calculate if we have unused descriptors + **/ +static int igbvf_desc_unused(struct igbvf_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * igbvf_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void igbvf_receive_skb(struct igbvf_adapter *adapter, + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) +{ + if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(vlan) & + E1000_RXD_SPC_VLAN_MASK); + else + netif_receive_skb(skb); + + netdev->last_rx = jiffies; +} + +static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, + u32 status_err, struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set or checksum is disabled through ethtool */ + if ((status_err & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (status_err & + (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + adapter->hw_csum_good++; +} + +/** + * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: address of ring structure to repopulate + * @cleaned_count: number of buffers to repopulate + **/ +static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, + int cleaned_count) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + int bufsz; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + if (adapter->rx_ps_hdr_size) + bufsz = adapter->rx_ps_hdr_size; + else + bufsz = adapter->rx_buffer_len; + bufsz += NET_IP_ALIGN; + + while (cleaned_count--) { + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + + if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + buffer_info->page_offset = 0; + } else { + buffer_info->page_offset ^= PAGE_SIZE / 2; + } + buffer_info->page_dma = + pci_map_page(pdev, buffer_info->page, + buffer_info->page_offset, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + } + + if (!buffer_info->skb) { + skb = netdev_alloc_skb(netdev, bufsz); + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; + buffer_info->dma = pci_map_single(pdev, skb->data, + bufsz, + PCI_DMA_FROMDEVICE); + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (adapter->rx_ps_hdr_size) { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); + } else { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i == 0) + i = (rx_ring->count - 1); + else + i--; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * igbvf_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, + int *work_done, int work_to_do) +{ + struct igbvf_ring *rx_ring = adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc, *next_rxd; + struct igbvf_buffer *buffer_info, *next_buffer; + struct sk_buff *skb; + bool cleaned = false; + int cleaned_count = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i; + u32 length, hlen, staterr; + + i = rx_ring->next_to_clean; + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + + buffer_info = &rx_ring->buffer_info[i]; + + /* HW will not DMA in data larger than the given buffer, even + * if it parses the (NFS, of course) header to be larger. In + * that case, it fills the header buffer and spills the rest + * into the page. + */ + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & + E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > adapter->rx_ps_hdr_size) + hlen = adapter->rx_ps_hdr_size; + + length = le16_to_cpu(rx_desc->wb.upper.length); + cleaned = true; + cleaned_count++; + + skb = buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + buffer_info->skb = NULL; + if (!adapter->rx_ps_hdr_size) { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + skb_put(skb, length); + goto send_up; + } + + if (!skb_shinfo(skb)->nr_frags) { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_hdr_size + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + skb_put(skb, hlen); + } + + if (length) { + pci_unmap_page(pdev, buffer_info->page_dma, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + buffer_info->page_dma = 0; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + buffer_info->page, + buffer_info->page_offset, + length); + + if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || + (page_count(buffer_info->page) != 1)) + buffer_info->page = NULL; + else + get_page(buffer_info->page); + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } +send_up: + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + next_buffer = &rx_ring->buffer_info[i]; + + if (!(staterr & E1000_RXD_STAT_EOP)) { + buffer_info->skb = next_buffer->skb; + buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + total_bytes += skb->len; + total_packets++; + + igbvf_rx_checksum_adv(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + igbvf_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + + netdev->last_rx = jiffies; + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = igbvf_desc_unused(rx_ring); + + if (cleaned_count) + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + + adapter->total_rx_packets += total_packets; + adapter->total_rx_bytes += total_bytes; + adapter->net_stats.rx_bytes += total_bytes; + adapter->net_stats.rx_packets += total_packets; + return cleaned; +} + +static void igbvf_put_txbuf(struct igbvf_adapter *adapter, + struct igbvf_buffer *buffer_info) +{ + buffer_info->dma = 0; + if (buffer_info->skb) { + skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, + DMA_TO_DEVICE); + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void igbvf_print_tx_hang(struct igbvf_adapter *adapter) +{ + struct igbvf_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + + /* detected Tx unit hang */ + dev_err(&adapter->pdev->dev, + "Detected Tx Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->wb.status); +} + +/** + * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + tx_ring->buffer_info = vmalloc(size); + if (!tx_ring->buffer_info) + goto err; + memset(tx_ring->buffer_info, 0, size); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, + &tx_ring->dma); + + if (!tx_ring->desc) + goto err; + + tx_ring->adapter = adapter; + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + rx_ring->buffer_info = vmalloc(size); + if (!rx_ring->buffer_info) + goto err; + memset(rx_ring->buffer_info, 0, size); + + desc_len = sizeof(union e1000_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, + &rx_ring->dma); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + rx_ring->adapter = adapter; + + return 0; + +err: + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct igbvf_buffer *buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + igbvf_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * igbvf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: ring to free resources from + * + * Free all transmit software resources + **/ +void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = tx_ring->adapter->pdev; + + igbvf_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igbvf_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct igbvf_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + if (!rx_ring->buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->rx_ps_hdr_size){ + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_hdr_size, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + } + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + if (buffer_info->page) { + if (buffer_info->page_dma) + pci_unmap_page(pdev, buffer_info->page_dma, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + put_page(buffer_info->page); + buffer_info->page = NULL; + buffer_info->page_dma = 0; + buffer_info->page_offset = 0; + } + } + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * igbvf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ + +void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = rx_ring->adapter->pdev; + + igbvf_clean_rx_ring(rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * igbvf_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void igbvf_set_itr(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + + if (new_itr != adapter->itr) { + /* + * this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = 1952; + + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + ew32(ITR, 1952); + } +} + +/** + * igbvf_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * returns true if ring is completely cleaned + **/ +static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + union e1000_adv_tx_desc *tx_desc, *eop_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i, eop, count = 0; + bool cleaned = false; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + for (cleaned = false; !cleaned; count++) { + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + skb = buffer_info->skb; + + if (skb) { + unsigned int segs, bytecount; + + /* gso_segs is currently only valid for tcp */ + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_packets += segs; + total_bytes += bytecount; + } + + igbvf_put_txbuf(adapter, buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + if (unlikely(count && + netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_queue_stopped(netdev) && + !(test_bit(__IGBVF_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) + && !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + /* detected Tx unit hang */ + igbvf_print_tx_hang(adapter); + + netif_stop_queue(netdev); + } + } + adapter->net_stats.tx_bytes += total_bytes; + adapter->net_stats.tx_packets += total_packets; + return (count < tx_ring->count); +} + +static irqreturn_t igbvf_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->int_counter1++; + + netif_carrier_off(netdev); + hw->mac.get_link_status = 1; + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + ew32(EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + /* auto mask will automatically reenable the interrupt when we write + * EICS */ + if (!igbvf_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(EICS, tx_ring->eims_value); + else + ew32(EIMS, tx_ring->eims_value); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->int_counter0++; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(adapter->rx_ring->itr_val, + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->rx_ring->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->rx_ring->napi); + } + + return IRQ_HANDLED; +} + +#define IGBVF_NO_QUEUE -1 + +static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, + int tx_queue, int msix_vector) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ivar, index; + + /* 82576 uses a table-based method for assigning vectors. + Each queue has a single entry in the table to which we write + a vector number along with a "valid" bit. Sadly, the layout + of the table is somewhat counterintuitive. */ + if (rx_queue > IGBVF_NO_QUEUE) { + index = (rx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (rx_queue & 0x1) { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } else { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } + adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } + if (tx_queue > IGBVF_NO_QUEUE) { + index = (tx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (tx_queue & 0x1) { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } else { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } + adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } +} + +/** + * igbvf_configure_msix - Configure MSI-X hardware + * + * igbvf_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igbvf_configure_msix(struct igbvf_adapter *adapter) +{ + u32 tmp; + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + int vector = 0; + + adapter->eims_enable_mask = 0; + + igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); + adapter->eims_enable_mask |= tx_ring->eims_value; + if (tx_ring->itr_val) + writel(tx_ring->itr_val, + hw->hw_addr + tx_ring->itr_register); + else + writel(1952, hw->hw_addr + tx_ring->itr_register); + + igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); + adapter->eims_enable_mask |= rx_ring->eims_value; + if (rx_ring->itr_val) + writel(rx_ring->itr_val, + hw->hw_addr + rx_ring->itr_register); + else + writel(1952, hw->hw_addr + rx_ring->itr_register); + + /* set vector for other causes, i.e. link changes */ + + tmp = (vector++ | E1000_IVAR_VALID); + + ew32(IVAR_MISC, tmp); + + adapter->eims_enable_mask = (1 << (vector)) - 1; + adapter->eims_other = 1 << (vector - 1); + e1e_flush(); +} + +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } +} + +/** + * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) +{ + int err = -ENOMEM; + int i; + + /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < 3; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, 3); + } + + if (err) { + /* MSI-X failed */ + dev_err(&adapter->pdev->dev, + "Failed to initialize MSI-X interrupts.\n"); + igbvf_reset_interrupt_capability(adapter); + } +} + +/** + * igbvf_request_msix - Initialize MSI-X interrupts + * + * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igbvf_request_msix(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) { + sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); + sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); + } else { + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + } + + err = request_irq(adapter->msix_entries[vector].vector, + &igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + + adapter->tx_ring->itr_register = E1000_EITR(vector); + adapter->tx_ring->itr_val = 1952; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + &igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + + adapter->rx_ring->itr_register = E1000_EITR(vector); + adapter->rx_ring->itr_val = 1952; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + &igbvf_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + igbvf_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * igbvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); + + return 0; +} + +/** + * igbvf_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igbvf_request_irq(struct igbvf_adapter *adapter) +{ + int err = -1; + + /* igbvf supports msi-x only */ + if (adapter->msix_entries) + err = igbvf_request_msix(adapter); + + if (!err) + return err; + + dev_err(&adapter->pdev->dev, + "Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void igbvf_free_irq(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector; + + if (adapter->msix_entries) { + for (vector = 0; vector < 3; vector++) + free_irq(adapter->msix_entries[vector].vector, netdev); + } +} + +/** + * igbvf_irq_disable - Mask off interrupt generation on the NIC + **/ +static void igbvf_irq_disable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIMC, ~0); + + if (adapter->msix_entries) + ew32(EIAC, 0); +} + +/** + * igbvf_irq_enable - Enable default interrupt generation settings + **/ +static void igbvf_irq_enable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIAC, adapter->eims_enable_mask); + ew32(EIAM, adapter->eims_enable_mask); + ew32(EIMS, adapter->eims_enable_mask); +} + +/** + * igbvf_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int igbvf_poll(struct napi_struct *napi, int budget) +{ + struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); + struct igbvf_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + int work_done = 0; + + igbvf_clean_rx_irq(adapter, &work_done, budget); + + /* If not enough Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + + if (adapter->itr_setting & 3) + igbvf_set_itr(adapter); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + ew32(EIMS, adapter->rx_ring->eims_value); + } + + return work_done; +} + +/** + * igbvf_set_rlpml - set receive large packet maximum length + * @adapter: board private structure + * + * Configure the maximum size of packets that will be received + */ +static void igbvf_set_rlpml(struct igbvf_adapter *adapter) +{ + int max_frame_size = adapter->max_frame_size; + struct e1000_hw *hw = &adapter->hw; + + if (adapter->vlgrp) + max_frame_size += VLAN_TAG_SIZE; + + e1000_rlpml_set_vf(hw, max_frame_size); +} + +static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac.ops.set_vfta(hw, vid, true)) + dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); +} + +static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + igbvf_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + igbvf_irq_enable(adapter); + + if (hw->mac.ops.set_vfta(hw, vid, false)) + dev_err(&adapter->pdev->dev, + "Failed to remove vlan id %d\n", vid); +} + +static void igbvf_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->vlgrp = grp; +} + +static void igbvf_restore_vlan(struct igbvf_adapter *adapter) +{ + u16 vid; + + if (!adapter->vlgrp) + return; + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + igbvf_vlan_rx_add_vid(adapter->netdev, vid); + } + + igbvf_set_rlpml(adapter); +} + +/** + * igbvf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igbvf_configure_tx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 txdctl, dca_txctrl; + + /* disable transmits */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + msleep(10); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); + tdba = tx_ring->dma; + ew32(TDBAL(0), (tdba & DMA_32BIT_MASK)); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = E1000_TDH(0); + tx_ring->tail = E1000_TDT(0); + + /* Turn off Relaxed Ordering on head write-backs. The writebacks + * MUST be delivered in order or it will completely screw up + * our bookeeping. + */ + dca_txctrl = er32(DCA_TXCTRL(0)); + dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; + ew32(DCA_TXCTRL(0), dca_txctrl); + + /* enable transmits */ + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + ew32(TXDCTL(0), txdctl); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; + + adapter->tx_queue_len = adapter->netdev->tx_queue_len; +} + +/** + * igbvf_setup_srrctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 srrctl = 0; + + srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); + + /* Enable queue drop to avoid head of line blocking */ + srrctl |= E1000_SRRCTL_DROP_EN; + + /* Setup buffer sizes */ + srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + + if (adapter->rx_buffer_len < 2048) { + adapter->rx_ps_hdr_size = 0; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + } else { + adapter->rx_ps_hdr_size = 128; + srrctl |= adapter->rx_ps_hdr_size << + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } + + ew32(SRRCTL(0), srrctl); +} + +/** + * igbvf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igbvf_configure_rx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rxdctl; + + /* disable receives */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + msleep(10); + + rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_32BIT_MASK)); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); + rx_ring->head = E1000_RDH(0); + rx_ring->tail = E1000_RDT(0); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGBVF_RX_PTHRESH; + rxdctl |= IGBVF_RX_HTHRESH << 8; + rxdctl |= IGBVF_RX_WTHRESH << 16; + + igbvf_set_rlpml(adapter); + + /* enable receives */ + ew32(RXDCTL(0), rxdctl); +} + +/** + * igbvf_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igbvf_set_multi(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u8 *mta_list = NULL; + int i; + + if (netdev->mc_count) { + mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); + if (!mta_list) { + dev_err(&adapter->pdev->dev, + "failed to allocate multicast filter list\n"); + return; + } + } + + /* prepare a packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, + ETH_ALEN); + mc_ptr = mc_ptr->next; + } + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + kfree(mta_list); +} + +/** + * igbvf_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void igbvf_configure(struct igbvf_adapter *adapter) +{ + igbvf_set_multi(adapter->netdev); + + igbvf_restore_vlan(adapter); + + igbvf_configure_tx(adapter); + igbvf_setup_srrctl(adapter); + igbvf_configure_rx(adapter); + igbvf_alloc_rx_buffers(adapter->rx_ring, + igbvf_desc_unused(adapter->rx_ring)); +} + +/* igbvf_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +static void igbvf_reset(struct igbvf_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + /* Allow time for pending master requests to run */ + if (mac->ops.reset_hw(hw)) + dev_err(&adapter->pdev->dev, "PF still resetting\n"); + + mac->ops.init_hw(hw); + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } +} + +int igbvf_up(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + igbvf_configure(adapter); + + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + if (adapter->msix_entries) + igbvf_configure_msix(adapter); + + /* Clear any pending interrupts. */ + er32(EICR); + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + + return 0; +} + +void igbvf_down(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rxdctl, txdctl; + + /* + * signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGBVF_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + msleep(10); + + napi_disable(&adapter->rx_ring->napi); + + igbvf_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + + netdev->tx_queue_len = adapter->tx_queue_len; + netif_carrier_off(netdev); + + /* record the stats before reset*/ + igbvf_update_stats(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + igbvf_reset(adapter); + igbvf_clean_tx_ring(adapter->tx_ring); + igbvf_clean_rx_ring(adapter->rx_ring); +} + +void igbvf_reinit_locked(struct igbvf_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + igbvf_down(adapter); + igbvf_up(adapter); + clear_bit(__IGBVF_RESETTING, &adapter->state); +} + +/** + * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) + * @adapter: board private structure to initialize + * + * igbvf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + s32 rc; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_hdr_size = 0; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->tx_int_delay = 8; + adapter->tx_abs_int_delay = 32; + adapter->rx_int_delay = 0; + adapter->rx_abs_int_delay = 8; + adapter->itr_setting = 3; + adapter->itr = 20000; + + /* Set various function pointers */ + adapter->ei->init_ops(&adapter->hw); + + rc = adapter->hw.mac.ops.init_params(&adapter->hw); + if (rc) + return rc; + + rc = adapter->hw.mbx.ops.init_params(&adapter->hw); + if (rc) + return rc; + + igbvf_set_interrupt_capability(adapter); + + if (igbvf_alloc_queues(adapter)) + return -ENOMEM; + + spin_lock_init(&adapter->tx_queue_lock); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igbvf_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__IGBVF_DOWN, &adapter->state); + return 0; +} + +static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->stats.last_gprc = er32(VFGPRC); + adapter->stats.last_gorc = er32(VFGORC); + adapter->stats.last_gptc = er32(VFGPTC); + adapter->stats.last_gotc = er32(VFGOTC); + adapter->stats.last_mprc = er32(VFMPRC); + adapter->stats.last_gotlbc = er32(VFGOTLBC); + adapter->stats.last_gptlbc = er32(VFGPTLBC); + adapter->stats.last_gorlbc = er32(VFGORLBC); + adapter->stats.last_gprlbc = er32(VFGPRLBC); + + adapter->stats.base_gprc = er32(VFGPRC); + adapter->stats.base_gorc = er32(VFGORC); + adapter->stats.base_gptc = er32(VFGPTC); + adapter->stats.base_gotc = er32(VFGOTC); + adapter->stats.base_mprc = er32(VFMPRC); + adapter->stats.base_gotlbc = er32(VFGOTLBC); + adapter->stats.base_gptlbc = er32(VFGPTLBC); + adapter->stats.base_gorlbc = er32(VFGORLBC); + adapter->stats.base_gprlbc = er32(VFGPRLBC); +} + +/** + * igbvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int igbvf_open(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__IGBVF_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* + * before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igbvf_configure(adapter); + + err = igbvf_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as igbvf_up() */ + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + + /* clear any pending interrupts */ + er32(EICR); + + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + return 0; + +err_req_irq: + igbvf_free_rx_resources(adapter->rx_ring); +err_setup_rx: + igbvf_free_tx_resources(adapter->tx_ring); +err_setup_tx: + igbvf_reset(adapter); + + return err; +} + +/** + * igbvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int igbvf_close(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + + igbvf_free_irq(adapter); + + igbvf_free_tx_resources(adapter->tx_ring); + igbvf_free_rx_resources(adapter->rx_ring); + + return 0; +} +/** + * igbvf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_set_mac(struct net_device *netdev, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + if (memcmp(addr->sa_data, hw->mac.addr, 6)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + return 0; +} + +#define UPDATE_VF_COUNTER(reg, name) \ + { \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ + } + +/** + * igbvf_update_stats - Update the board statistics counters + * @adapter: board private structure +**/ +void igbvf_update_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* + * Prevent stats update while adapter is being reset, link is down + * or if the pci connection is down. + */ + if (adapter->link_speed == 0) + return; + + if (test_bit(__IGBVF_RESETTING, &adapter->state)) + return; + + if (pci_channel_offline(pdev)) + return; + + UPDATE_VF_COUNTER(VFGPRC, gprc); + UPDATE_VF_COUNTER(VFGORC, gorc); + UPDATE_VF_COUNTER(VFGPTC, gptc); + UPDATE_VF_COUNTER(VFGOTC, gotc); + UPDATE_VF_COUNTER(VFMPRC, mprc); + UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); + UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); + UPDATE_VF_COUNTER(VFGORLBC, gorlbc); + UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); + + /* Fill out the OS statistics structure */ + adapter->net_stats.multicast = adapter->stats.mprc; +} + +static void igbvf_print_link_info(struct igbvf_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", + adapter->link_speed, + ((adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex")); +} + +static bool igbvf_has_link(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = E1000_SUCCESS; + bool link_active; + + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + /* if check for link returns error we will need to reset */ + if (ret_val) + schedule_work(&adapter->reset_task); + + return link_active; +} + +/** + * igbvf_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igbvf_watchdog(unsigned long data) +{ + struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igbvf_watchdog_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter = container_of(work, + struct igbvf_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link; + int tx_pending = 0; + + link = igbvf_has_link(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = 1; + + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + igbvf_print_link_info(adapter); + + /* + * tweak tx_queue_len according to speed/duplex + * and adjust the timeout factor + */ + netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = 0; + netdev->tx_queue_len = 100; + /* maybe add some timeout factor ? */ + break; + } + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + dev_info(&adapter->pdev->dev, "Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + + if (netif_carrier_ok(netdev)) { + igbvf_update_stats(adapter); + } else { + tx_pending = (igbvf_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + ew32(EICS, adapter->rx_ring->eims_value); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* Reset the timer */ + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + (2 * HZ))); +} + +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 + +static int igbvf_tso(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + u32 mss_l4len_idx, l4len; + *hdr_len = 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) { + dev_err(&adapter->pdev->dev, + "igbvf_tso returning an error\n"); + return err; + } + } + + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + /* VLAN MACLEN IPLEN */ + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + info |= (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(info); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->protocol == htons(ETH_P_IP)) + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + + /* MSS L4LEN IDX */ + mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->seqnum_seed = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IGBVF_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + info |= (skb_transport_header(skb) - + skb_network_header(skb)); + + + context_desc->vlan_macip_lens = cpu_to_le32(info); + + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + case __constant_htons(ETH_P_IPV6): + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + default: + break; + } + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + context_desc->seqnum_seed = 0; + context_desc->mss_l4len_idx = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + + return false; +} + +static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* there is enough descriptors then we don't need to worry */ + if (igbvf_desc_unused(adapter->tx_ring) >= size) + return 0; + + netif_stop_queue(netdev); + + smp_mb(); + + /* We need to check again just in case room has been made available */ + if (igbvf_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + netif_wake_queue(netdev); + + ++adapter->restart_queue; + return 0; +} + +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) + +static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, + unsigned int first) +{ + struct igbvf_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int count = 0, i; + unsigned int f; + dma_addr_t *map; + + i = tx_ring->next_to_use; + + if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { + dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); + return 0; + } + + map = skb_shinfo(skb)->dma_maps; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = map[count]; + count++; + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + i++; + if (i == tx_ring->count) + i = 0; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = map[count]; + count++; + } + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; +} + +static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + int tx_flags, int count, u32 paylen, + u8 hdr_len) +{ + union e1000_adv_tx_desc *tx_desc = NULL; + struct igbvf_buffer *buffer_info; + u32 olinfo_status = 0, cmd_type_len; + unsigned int i; + + cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | + E1000_ADVTXD_DCMD_DEXT); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + + if (tx_flags & IGBVF_TX_FLAGS_TSO) { + cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + + /* insert tcp checksum */ + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + + /* insert ip checksum */ + if (tx_flags & IGBVF_TX_FLAGS_IPV4) + olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + + } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + } + + olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it syncronizes IO on IA64/Altix systems */ + mmiowb(); +} + +static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb, + struct net_device *netdev, + struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + unsigned int first, tx_flags = 0; + u8 hdr_len = 0; + int count = 0; + int tso = 0; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, + * head, otherwise try next time + */ + if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= IGBVF_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); + } + + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IGBVF_TX_FLAGS_IPV4; + + first = tx_ring->next_to_use; + + tso = skb_is_gso(skb) ? + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; + if (unlikely(tso < 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IGBVF_TX_FLAGS_TSO; + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IGBVF_TX_FLAGS_CSUM; + + /* + * count reflects descriptors mapped, if 0 then mapping error + * has occured and we need to rewind the descriptor queue + */ + count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); + + if (count) { + igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, + skb->len, hdr_len); + netdev->trans_start = jiffies; + /* Make sure there is space in the ring for the next send. */ + igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +static int igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring; + int retval; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_ring = &adapter->tx_ring[0]; + + retval = igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); + + return retval; +} + +/** + * igbvf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void igbvf_tx_timeout(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void igbvf_reset_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); + + igbvf_reinit_locked(adapter); +} + +/** + * igbvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * igbvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame size limits */ + if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { + if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + dev_err(&adapter->pdev->dev, + "Jumbo Frames not supported.\n"); + return -EINVAL; + } + } + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + /* igbvf_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + if (netif_running(netdev)) + igbvf_down(adapter); + + /* + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 1024) + adapter->rx_buffer_len = 1024; + else if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else +#if (PAGE_SIZE / 2) > 16384 + adapter->rx_buffer_len = 16384; +#else + adapter->rx_buffer_len = PAGE_SIZE / 2; +#endif + + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igbvf_up(adapter); + else + igbvf_reset(adapter); + + clear_bit(__IGBVF_RESETTING, &adapter->state); + + return 0; +} + +static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } +} + +static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + igbvf_free_irq(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int igbvf_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_restore_state(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + if (netif_running(netdev)) { + err = igbvf_request_irq(adapter); + if (err) + return err; + } + + igbvf_reset(adapter); + + if (netif_running(netdev)) + igbvf_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void igbvf_shutdown(struct pci_dev *pdev) +{ + igbvf_suspend(pdev, PMSG_SUSPEND); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void igbvf_netpoll(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + + igbvf_clean_tx_irq(adapter->tx_ring); + + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * igbvf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + igbvf_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igbvf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igbvf_resume routine. + */ +static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + igbvf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * igbvf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igbvf_resume routine. + */ +static void igbvf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igbvf_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static void igbvf_print_device_info(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); + dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", + /* MAC address */ + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); +} + +static const struct net_device_ops igbvf_netdev_ops = { + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_multicast_list = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_register = igbvf_vlan_rx_register, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igbvf_netpoll, +#endif +}; + +/** + * igbvf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igbvf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igbvf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit igbvf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igbvf_adapter *adapter; + struct e1000_hw *hw; + const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; + + static int cards_found; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_regions(pdev, igbvf_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->hw.back = adapter; + adapter->hw.mac.type = ei->mac; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + + err = -EIO; + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if (ei->get_variants) { + err = ei->get_variants(adapter); + if (err) + goto err_ioremap; + } + + /* setup adapter struct */ + err = igbvf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* construct the net_device struct */ + netdev->netdev_ops = &igbvf_netdev_ops; + + igbvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found++; + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + /*reset the controller to put the device in a known good state */ + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state, assigning new address\n"); + random_ether_addr(hw->mac.addr); + } else { + err = hw->mac.ops.read_mac_addr(hw); + if (err) { + dev_err(&pdev->dev, "Error reading MAC address\n"); + goto err_hw_init; + } + } + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + err = -EIO; + goto err_hw_init; + } + + setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igbvf_reset_task); + INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); + + /* ring size defaults */ + adapter->rx_ring->count = 1024; + adapter->tx_ring->count = 1024; + + /* reset the hardware with the new settings */ + igbvf_reset(adapter); + + /* tell the stack to leave us alone until igbvf_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_hw_init; + + igbvf_print_device_info(adapter); + + igbvf_initialize_last_counter_stats(adapter); + + return 0; + +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + igbvf_reset_interrupt_capability(adapter); + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igbvf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igbvf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit igbvf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* + * flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__IGBVF_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + igbvf_reset_interrupt_capability(adapter); + + /* + * it is important to delete the napi struct prior to freeing the + * rx ring so that you do not end up with null pointer refs + */ + netif_napi_del(&adapter->rx_ring->napi); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers igbvf_err_handler = { + .error_detected = igbvf_io_error_detected, + .slot_reset = igbvf_io_slot_reset, + .resume = igbvf_io_resume, +}; + +static struct pci_device_id igbvf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver igbvf_driver = { + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = __devexit_p(igbvf_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = igbvf_suspend, + .resume = igbvf_resume, +#endif + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler +}; + +/** + * igbvf_init_module - Driver Registration Routine + * + * igbvf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igbvf_init_module(void) +{ + int ret; + printk(KERN_INFO "%s - version %s\n", + igbvf_driver_string, igbvf_driver_version); + printk(KERN_INFO "%s\n", igbvf_copyright); + + ret = pci_register_driver(&igbvf_driver); + pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, + PM_QOS_DEFAULT_VALUE); + + return ret; +} +module_init(igbvf_init_module); + +/** + * igbvf_exit_module - Driver Exit Cleanup Routine + * + * igbvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igbvf_exit_module(void) +{ + pci_unregister_driver(&igbvf_driver); + pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); +} +module_exit(igbvf_exit_module); + + +MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); +MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h new file mode 100644 index 0000000..b9e24ed --- /dev/null +++ b/drivers/net/igbvf/regs.h @@ -0,0 +1,108 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) + +/* Statistics registers */ +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 + +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) + +/* Define macros for handling registers */ +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define array_er32(reg, offset) \ + readl(hw->hw_addr + E1000_##reg + (offset << 2)) +#define array_ew32(reg, offset, val) \ + writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) +#define e1e_flush() er32(STATUS) + +#endif diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c new file mode 100644 index 0000000..2a4faf9 --- /dev/null +++ b/drivers/net/igbvf/vf.c @@ -0,0 +1,398 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "vf.h" + +static s32 e1000_check_for_link_vf(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_vf(struct e1000_hw *hw); +static s32 e1000_reset_hw_vf(struct e1000_hw *hw); + +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, + u32, u32, u32); +static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +static s32 e1000_read_mac_addr_vf(struct e1000_hw *); +static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + /* set vlan filter table array */ + mac->ops.set_vfta = e1000_set_vfta_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +static s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + u32 ret_val = -E1000_ERR_MAC_INIT; + u32 msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 ctrl; + + /* assert vf queue/interrupt reset */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot initialize while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + /* notify pf of vf reset completion */ + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + msleep(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +static s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = E1000_VF_SET_MULTICAST; + msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + hash_list[i] = hash_value & 0x0FFFF; + mc_addr_list += ETH_ADDR_LEN; + } + + mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); +} + +/** + * e1000_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if true then set bit, else clear bit + **/ +static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to true indicates "add" */ + if (set) + msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; + + mbx->ops.write_posted(hw, msgbuf, 2); + + err = mbx->ops.read_posted(hw, msgbuf, 2); + + /* if nacked the vlan was rejected */ + if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) + err = -E1000_ERR_MAC_INIT; + + return err; +} + +/** e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + mbx->ops.write_posted(hw, msgbuf, 2); +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index receive address array register + **/ +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +static s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw)) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(er32(STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* message is not CTS and is NACK we must have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = false; + +out: + return ret_val; +} + diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h new file mode 100644 index 0000000..1e8ce374 --- /dev/null +++ b/drivers/net/igbvf/vf.h @@ -0,0 +1,264 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/if_ether.h> + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 + +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +#define E1000_RAR_ENTRIES_VF 1 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + u16 pkt_info; /* RSS/Packet type */ + u16 hdr_info; /* Split Header, + * hdr buffer length */ + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*mta_set)(struct e1000_hw *, u32); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*set_vfta)(struct e1000_hw *, u16, bool); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16); + s32 (*check_for_msg)(struct e1000_hw *); + s32 (*check_for_ack)(struct e1000_hw *); + s32 (*check_for_rst)(struct e1000_hw *); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* These functions must be implemented by drivers */ +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +void e1000_init_function_pointers_vf(struct e1000_hw *hw); + + +#endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index cbc63ff..c5593f4 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1214,6 +1214,19 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) } #endif +static const struct net_device_ops ioc3_netdev_ops = { + .ndo_open = ioc3_open, + .ndo_stop = ioc3_close, + .ndo_start_xmit = ioc3_start_xmit, + .ndo_tx_timeout = ioc3_timeout, + .ndo_get_stats = ioc3_get_stats, + .ndo_set_multicast_list = ioc3_set_multicast_list, + .ndo_do_ioctl = ioc3_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ioc3_set_mac_address, + .ndo_change_mtu = eth_change_mtu, +}; + static int __devinit ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1310,15 +1323,8 @@ static int __devinit ioc3_probe(struct pci_dev *pdev, ioc3_get_eaddr(ip); /* The IOC3-specific entries in the device structure. */ - dev->open = ioc3_open; - dev->hard_start_xmit = ioc3_start_xmit; - dev->tx_timeout = ioc3_timeout; dev->watchdog_timeo = 5 * HZ; - dev->stop = ioc3_close; - dev->get_stats = ioc3_get_stats; - dev->do_ioctl = ioc3_ioctl; - dev->set_multicast_list = ioc3_set_multicast_list; - dev->set_mac_address = ioc3_set_mac_address; + dev->netdev_ops = &ioc3_netdev_ops; dev->ethtool_ops = &ioc3_ethtool_ops; dev->features = NETIF_F_IP_CSUM; diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c index 3126678..73585fd 100644 --- a/drivers/net/isa-skeleton.c +++ b/drivers/net/isa-skeleton.c @@ -181,6 +181,18 @@ out: } #endif +static const struct net_device_ops netcard_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = net_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + /* * This is the real probe routine. Linux has a history of friendly device * probes on the ISA bus. A good device probes avoids doing writes, and @@ -303,13 +315,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) np = netdev_priv(dev); spin_lock_init(&np->lock); - dev->open = net_open; - dev->stop = net_close; - dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; - dev->set_multicast_list = &set_multicast_list; - - dev->tx_timeout = &net_tx_timeout; + dev->netdev_ops = &netcard_netdev_ops; dev->watchdog_timeo = MY_TX_TIMEOUT; err = register_netdev(dev); diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index de4db0d..4791238 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -885,61 +885,6 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) } /** - * ixgbe_blink_led_start_82598 - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) -{ - ixgbe_link_speed speed = 0; - bool link_up = 0; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - /* - * Link must be up to auto-blink the LEDs on the 82598EB MAC; - * force it if link is down. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (!link_up) { - autoc_reg |= IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - msleep(10); - } - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) -{ - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - autoc_reg &= ~IXGBE_AUTOC_FLU; - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg &= ~IXGBE_LED_BLINK(index); - led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register * @hw: pointer to hardware structure * @reg: analog register to read @@ -1128,8 +1073,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .get_link_capabilities = &ixgbe_get_link_capabilities_82598, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_82598, - .blink_led_stop = &ixgbe_blink_led_stop_82598, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_82598, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index beae7e0..29771fb 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -68,8 +68,6 @@ s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); -s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); @@ -991,40 +989,6 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw) } /** - * ixgbe_blink_led_start_82599 - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_blink_led_stop_82599 - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg &= ~IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array * @hw: pointer to hardware structure **/ @@ -1243,8 +1207,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .get_link_capabilities = &ixgbe_get_link_capabilities_82599, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_82599, - .blink_led_stop = &ixgbe_blink_led_stop_82599, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_82599, diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 63ab667..186a650 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -50,7 +50,6 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); /** @@ -1377,8 +1376,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, * Clear accounting of old secondary address list, * don't count RAR[0] */ - uc_addr_in_use = hw->addr_ctrl.rar_used_count - - hw->addr_ctrl.mc_addr_in_rar_count - 1; + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; hw->addr_ctrl.rar_used_count -= uc_addr_in_use; hw->addr_ctrl.overflow_promisc = 0; @@ -1493,40 +1491,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) } /** - * ixgbe_add_mc_addr - Adds a multicast address. - * @hw: pointer to hardware structure - * @mc_addr: new multicast address - * - * Adds it to unused receive address register or to the multicast table. - **/ -static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) -{ - u32 rar_entries = hw->mac.num_rar_entries; - u32 rar; - - hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", - mc_addr[0], mc_addr[1], mc_addr[2], - mc_addr[3], mc_addr[4], mc_addr[5]); - - /* - * Place this multicast address in the RAR if there is room, - * else put it in the MTA - */ - if (hw->addr_ctrl.rar_used_count < rar_entries) { - /* use RAR from the end up for multicast */ - rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; - hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); - hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar); - hw->addr_ctrl.rar_used_count++; - hw->addr_ctrl.mc_addr_in_rar_count++; - } else { - ixgbe_set_mta(hw, mc_addr); - } - - hw_dbg(hw, "ixgbe_add_mc_addr Complete\n"); -} - -/** * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses @@ -1542,7 +1506,6 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr next) { u32 i; - u32 rar_entries = hw->mac.num_rar_entries; u32 vmdq; /* @@ -1550,18 +1513,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, * use. */ hw->addr_ctrl.num_mc_addrs = mc_addr_count; - hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count; - hw->addr_ctrl.mc_addr_in_rar_count = 0; hw->addr_ctrl.mta_in_use = 0; - /* Zero out the other receive addresses. */ - hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count, - rar_entries - 1); - for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); - } - /* Clear the MTA */ hw_dbg(hw, " Clearing MTA\n"); for (i = 0; i < hw->mac.mcft_size; i++) @@ -1570,7 +1523,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, /* Add the new addresses */ for (i = 0; i < mc_addr_count; i++) { hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq)); + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); } /* Enable mta */ @@ -2071,3 +2024,58 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) return 0; } + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + autoc_reg |= IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + msleep(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 24f73e7..dd26089 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -76,6 +76,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #ifndef writeq diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 470b676..f4417fc 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -290,7 +290,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { - u32 i, reg; + u32 i, reg, rx_pba_size; /* If PFC is disabled globally then fall back to LFC. */ if (!dcb_config->pfc_mode_enable) { @@ -301,17 +301,23 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - /* Config and remember Tx */ + if (dcb_config->rx_pba_cfg == pba_equal) + rx_pba_size = IXGBE_RXPBSIZE_64KB; + else + rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB + : IXGBE_RXPBSIZE_48KB; + + reg = ((rx_pba_size >> 5) & 0xFFE0); if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || - dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) { - reg = hw->fc.high_water | IXGBE_FCRTH_FCEN; - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); - reg = hw->fc.low_water | IXGBE_FCRTL_XONE; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); - } else { - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); - } + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) + reg |= IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); + + reg = ((rx_pba_size >> 2) & 0xFFE0); + if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || + dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) + reg |= IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } /* Configure pause time (2 TCs per register) */ diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index aafc120..f0a20fa 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -943,6 +943,24 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, } +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_hw *hw = &adapter->hw; + int retval = 1; + + switch(hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + retval = 0; + break; + default: + wol->supported = 0; + retval = 0; + } + + return retval; +} + static void ixgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { @@ -952,7 +970,8 @@ static void ixgbe_get_wol(struct net_device *netdev, WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; - if (!device_can_wakeup(&adapter->pdev->dev)) + if (ixgbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) return; if (adapter->wol & IXGBE_WUFC_EX) @@ -974,6 +993,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; + if (ixgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + adapter->wol = 0; if (wol->wolopts & WAKE_UCAST) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9ef128a..07e778d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2723,17 +2723,21 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) **/ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) { - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) - return; + goto done; #endif if (ixgbe_set_rss_queues(adapter)) - return; + goto done; + + /* fallback to base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + +done: + /* Notify the stack of the (possibly) reduced Tx Queue count. */ + adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; } static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, @@ -2837,11 +2841,55 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) } ret = true; } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - for (i = 0; i < dcb_i; i++) { - adapter->rx_ring[i].reg_idx = i << 4; - adapter->tx_ring[i].reg_idx = i << 4; + if (dcb_i == 8) { + /* + * Tx TC0 starts at: descriptor queue 0 + * Tx TC1 starts at: descriptor queue 32 + * Tx TC2 starts at: descriptor queue 64 + * Tx TC3 starts at: descriptor queue 80 + * Tx TC4 starts at: descriptor queue 96 + * Tx TC5 starts at: descriptor queue 104 + * Tx TC6 starts at: descriptor queue 112 + * Tx TC7 starts at: descriptor queue 120 + * + * Rx TC0-TC7 are offset by 16 queues each + */ + for (i = 0; i < 3; i++) { + adapter->tx_ring[i].reg_idx = i << 5; + adapter->rx_ring[i].reg_idx = i << 4; + } + for ( ; i < 5; i++) { + adapter->tx_ring[i].reg_idx = + ((i + 2) << 4); + adapter->rx_ring[i].reg_idx = i << 4; + } + for ( ; i < dcb_i; i++) { + adapter->tx_ring[i].reg_idx = + ((i + 8) << 3); + adapter->rx_ring[i].reg_idx = i << 4; + } + + ret = true; + } else if (dcb_i == 4) { + /* + * Tx TC0 starts at: descriptor queue 0 + * Tx TC1 starts at: descriptor queue 64 + * Tx TC2 starts at: descriptor queue 96 + * Tx TC3 starts at: descriptor queue 112 + * + * Rx TC0-TC3 are offset by 32 queues each + */ + adapter->tx_ring[0].reg_idx = 0; + adapter->tx_ring[1].reg_idx = 64; + adapter->tx_ring[2].reg_idx = 96; + adapter->tx_ring[3].reg_idx = 112; + for (i = 0 ; i < dcb_i; i++) + adapter->rx_ring[i].reg_idx = i << 5; + + ret = true; + } else { + ret = false; } - ret = true; } else { ret = false; } @@ -2992,9 +3040,6 @@ try_msi: } out: - /* Notify the stack of the (possibly) reduced Tx Queue count. */ - adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; - return err; } @@ -3601,6 +3646,8 @@ static int ixgbe_resume(struct pci_dev *pdev) ixgbe_reset(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + if (netif_running(netdev)) { err = ixgbe_open(adapter->netdev); if (err) @@ -3611,9 +3658,9 @@ static int ixgbe_resume(struct pci_dev *pdev) return 0; } - #endif /* CONFIG_PM */ -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -3672,18 +3719,46 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) pci_enable_wake(pdev, PCI_D3cold, 0); } + *enable_wake = !!wufc; + ixgbe_release_hw_control(adapter); pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } return 0; } +#endif /* CONFIG_PM */ static void ixgbe_shutdown(struct pci_dev *pdev) { - ixgbe_suspend(pdev, PMSG_SUSPEND); + bool wake; + + __ixgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } } /** @@ -3917,7 +3992,7 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) } hw->mac.ops.setup_sfp(hw); - if (!adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK) + if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) /* This will also work for DA Twinax connections */ schedule_work(&adapter->multispeed_fiber_task); adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; @@ -4342,7 +4417,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int count = 0; unsigned int f; - r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; + r_idx = skb->queue_mapping; tx_ring = &adapter->tx_ring[r_idx]; if (adapter->vlgrp && vlan_tx_tag_present(skb)) { @@ -4502,7 +4577,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; int i, err, pci_using_dac; - u16 pm_value = 0; u32 part_num, eec; err = pci_enable_device(pdev); @@ -4690,11 +4764,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, switch (pdev->device) { case IXGBE_DEV_ID_82599_KX4: -#define IXGBE_PCIE_PMCSR 0x44 - adapter->wol = IXGBE_WUFC_MAG; - pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value); - pci_write_config_word(pdev, IXGBE_PCIE_PMCSR, - (pm_value | (1 << 8))); + adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | + IXGBE_WUFC_MC | IXGBE_WUFC_BC); break; default: adapter->wol = 0; diff --git a/drivers/net/jme.c b/drivers/net/jme.c index ece3504..621a7c0 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -2591,13 +2591,13 @@ static int jme_pci_dma64(struct pci_dev *pdev) { if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) - if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) return 1; if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_40BIT_MASK)) - if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) return 1; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 8e88486..22e74a0 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c @@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit) if (!MACH_IS_MAC) return ERR_PTR(-ENODEV); - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return ERR_PTR(-ENOMEM); @@ -481,15 +481,15 @@ void cleanup_module(void) static const struct net_device_ops mac8390_netdev_ops = { .ndo_open = mac8390_open, .ndo_stop = mac8390_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 380a1a5..384e072 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -168,6 +168,17 @@ writereg(struct net_device *dev, int portno, int value) nubus_writew(swab16(value), dev->mem_start + portno); } +static const struct net_device_ops mac89x0_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + /* Probe for the CS8900 card in slot E. We won't bother looking anywhere else until we have a really good reason to do so. */ struct net_device * __init mac89x0_probe(int unit) @@ -280,12 +291,7 @@ struct net_device * __init mac89x0_probe(int unit) printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); - dev->open = net_open; - dev->stop = net_close; - dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; - dev->set_multicast_list = &set_multicast_list; - dev->set_mac_address = &set_mac_address; + dev->netdev_ops = &mac89x0_netdev_ops; err = register_netdev(dev); if (err) diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f505010..e82aee4 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -316,10 +316,11 @@ static void macb_tx(struct macb *bp) dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", (unsigned long)status); - if (status & MACB_BIT(UND)) { + if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { int i; - printk(KERN_ERR "%s: TX underrun, resetting buffers\n", - bp->dev->name); + printk(KERN_ERR "%s: TX %s, resetting buffers\n", + bp->dev->name, status & MACB_BIT(UND) ? + "underrun" : "retry limit exceeded"); /* Transfer ongoing, disable transmitter, to avoid confusion */ if (status & MACB_BIT(TGO)) @@ -520,27 +521,10 @@ static int macb_poll(struct napi_struct *napi, int budget) macb_writel(bp, RSR, status); work_done = 0; - if (!status) { - /* - * This may happen if an interrupt was pending before - * this function was called last time, and no packets - * have been received since. - */ - napi_complete(napi); - goto out; - } dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); - if (!(status & MACB_BIT(REC))) { - dev_warn(&bp->pdev->dev, - "No RX buffers complete, status = %02lx\n", - (unsigned long)status); - napi_complete(napi); - goto out; - } - work_done = macb_rx(bp, budget); if (work_done < budget) napi_complete(napi); @@ -549,7 +533,6 @@ static int macb_poll(struct napi_struct *napi, int budget) * We've done what we can to clean the buffers. Make sure we * get notified when new packets arrive. */ -out: macb_writel(bp, IER, MACB_RX_INT_FLAGS); /* TODO: Handle errors */ @@ -590,7 +573,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } } - if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND))) + if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE))) macb_tx(bp); /* @@ -1100,6 +1084,18 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(phydev, if_mii(rq), cmd); } +static const struct net_device_ops macb_netdev_ops = { + .ndo_open = macb_open, + .ndo_stop = macb_close, + .ndo_start_xmit = macb_start_xmit, + .ndo_set_multicast_list = macb_set_rx_mode, + .ndo_get_stats = macb_get_stats, + .ndo_do_ioctl = macb_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init macb_probe(struct platform_device *pdev) { struct eth_platform_data *pdata; @@ -1175,12 +1171,7 @@ static int __init macb_probe(struct platform_device *pdev) goto err_out_iounmap; } - dev->open = macb_open; - dev->stop = macb_close; - dev->hard_start_xmit = macb_start_xmit; - dev->get_stats = macb_get_stats; - dev->set_multicast_list = macb_set_rx_mode; - dev->do_ioctl = macb_ioctl; + dev->netdev_ops = &macb_netdev_ops; netif_napi_add(dev, &bp->napi, macb_poll, 64); dev->ethtool_ops = &macb_ethtool_ops; diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 527166e..acd143d 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -167,6 +167,18 @@ static int macsonic_close(struct net_device* dev) return err; } +static const struct net_device_ops macsonic_netdev_ops = { + .ndo_open = macsonic_open, + .ndo_stop = macsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_set_multicast_list = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_get_stats = sonic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init macsonic_init(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); @@ -198,12 +210,7 @@ static int __init macsonic_init(struct net_device *dev) lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); - dev->open = macsonic_open; - dev->stop = macsonic_close; - dev->hard_start_xmit = sonic_send_packet; - dev->get_stats = sonic_get_stats; - dev->set_multicast_list = &sonic_multicast_list; - dev->tx_timeout = sonic_tx_timeout; + dev->netdev_ops = &macsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 70d3ef4a..214a8cf 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -376,7 +376,8 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (lowerdev->ethtool_ops->get_rx_csum == NULL) + if (lowerdev->ethtool_ops == NULL || + lowerdev->ethtool_ops->get_rx_csum == NULL) return 0; return lowerdev->ethtool_ops->get_rx_csum(lowerdev); } @@ -387,7 +388,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev, const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (!lowerdev->ethtool_ops->get_settings) + if (!lowerdev->ethtool_ops || + !lowerdev->ethtool_ops->get_settings) return -EOPNOTSUPP; return lowerdev->ethtool_ops->get_settings(lowerdev, cmd); @@ -398,7 +400,8 @@ static u32 macvlan_ethtool_get_flags(struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (!lowerdev->ethtool_ops->get_flags) + if (!lowerdev->ethtool_ops || + !lowerdev->ethtool_ops->get_flags) return 0; return lowerdev->ethtool_ops->get_flags(lowerdev); } diff --git a/drivers/net/meth.c b/drivers/net/meth.c index aa08987..dbd3436 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -127,11 +127,11 @@ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) static int mdio_probe(struct meth_private *priv) { int i; - unsigned long p2, p3; + unsigned long p2, p3, flags; /* check if phy is detected already */ if(priv->phy_addr>=0&&priv->phy_addr<32) return 0; - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); for (i=0;i<32;++i){ priv->phy_addr=i; p2=mdio_read(priv,2); @@ -157,7 +157,7 @@ static int mdio_probe(struct meth_private *priv) break; } } - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); if(priv->phy_addr<32) { return 0; } @@ -373,14 +373,14 @@ static int meth_release(struct net_device *dev) static void meth_rx(struct net_device* dev, unsigned long int_status) { struct sk_buff *skb; - unsigned long status; + unsigned long status, flags; struct meth_private *priv = netdev_priv(dev); unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; mace->eth.dma_ctrl = priv->dma_ctrl; - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); if (int_status & METH_INT_RX_UNDERFLOW) { fifo_rptr = (fifo_rptr - 1) & 0x0f; @@ -452,12 +452,12 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; ADVANCE_RX_PTR(priv->rx_write); } - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); /* In case there was underflow, and Rx DMA was disabled */ priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; mace->eth.dma_ctrl = priv->dma_ctrl; mace->eth.int_stat = METH_INT_RX_THRESHOLD; - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); } static int meth_tx_full(struct net_device *dev) @@ -470,11 +470,11 @@ static int meth_tx_full(struct net_device *dev) static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) { struct meth_private *priv = netdev_priv(dev); - unsigned long status; + unsigned long status, flags; struct sk_buff *skb; unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); /* Stop DMA notification */ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); @@ -527,12 +527,13 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) } mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); } static void meth_error(struct net_device* dev, unsigned status) { struct meth_private *priv = netdev_priv(dev); + unsigned long flags; printk(KERN_WARNING "meth: error status: 0x%08x\n",status); /* check for errors too... */ @@ -547,7 +548,7 @@ static void meth_error(struct net_device* dev, unsigned status) printk(KERN_WARNING "meth: Rx overflow\n"); if (status & (METH_INT_RX_UNDERFLOW)) { printk(KERN_WARNING "meth: Rx underflow\n"); - spin_lock(&priv->meth_lock); + spin_lock_irqsave(&priv->meth_lock, flags); mace->eth.int_stat = METH_INT_RX_UNDERFLOW; /* more underflow interrupts will be delivered, * effectively throwing us into an infinite loop. @@ -555,7 +556,7 @@ static void meth_error(struct net_device* dev, unsigned status) priv->dma_ctrl &= ~METH_DMA_RX_EN; mace->eth.dma_ctrl = priv->dma_ctrl; DPRINTK("Disabled meth Rx DMA temporarily\n"); - spin_unlock(&priv->meth_lock); + spin_unlock_irqrestore(&priv->meth_lock, flags); } mace->eth.int_stat = METH_INT_ERROR; } diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 91f50de..a276125 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c @@ -125,8 +125,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (cq->is_tx) del_timer(&cq->timer); - else + else { napi_disable(&cq->napi); + netif_napi_del(&cq->napi); + } mlx4_cq_free(mdev->dev, &cq->mcq); } diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index eda72dd..510633f 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c @@ -181,7 +181,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) mdev->workqueue = create_singlethread_workqueue("mlx4_en"); if (!mdev->workqueue) { err = -ENOMEM; - goto err_close_nic; + goto err_mr; } /* At this stage all non-port specific tasks are complete: @@ -214,9 +214,8 @@ err_free_netdev: flush_workqueue(mdev->workqueue); /* Stop event queue before we drop down to release shared SW state */ - -err_close_nic: destroy_workqueue(mdev->workqueue); + err_mr: mlx4_mr_free(dev, &mdev->mr); err_uar: diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 303c23d..7bcc49d 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -348,11 +348,9 @@ static void mlx4_en_tx_timeout(struct net_device *dev) if (netif_msg_timer(priv)) mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); - if (netif_carrier_ok(dev)) { - priv->port_stats.tx_timeout++; - mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); - queue_work(mdev->workqueue, &priv->watchdog_task); - } + priv->port_stats.tx_timeout++; + mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); + queue_work(mdev->workqueue, &priv->watchdog_task); } @@ -585,7 +583,7 @@ int mlx4_en_start_port(struct net_device *dev) err = mlx4_en_activate_cq(priv, cq); if (err) { mlx4_err(mdev, "Failed activating Rx CQ\n"); - goto rx_err; + goto cq_err; } for (j = 0; j < cq->size; j++) cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; @@ -761,9 +759,14 @@ static void mlx4_en_restart(struct work_struct *work) struct net_device *dev = priv->dev; mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); - mlx4_en_stop_port(dev); - if (mlx4_en_start_port(dev)) - mlx4_err(mdev, "Failed restarting port %d\n", priv->port); + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + mlx4_en_stop_port(dev); + if (mlx4_en_start_port(dev)) + mlx4_err(mdev, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&mdev->state_lock); } @@ -1054,7 +1057,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, * Set driver features */ dev->features |= NETIF_F_SG; - dev->features |= NETIF_F_HW_CSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c index c5a4c03..a29abe8 100644 --- a/drivers/net/mlx4/en_port.c +++ b/drivers/net/mlx4/en_port.c @@ -151,6 +151,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) struct mlx4_cmd_mailbox *mailbox; u64 in_mod = reset << 8 | port; int err; + int i; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) @@ -165,38 +166,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) spin_lock_bh(&priv->stats_lock); - stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) - - be32_to_cpu(mlx4_en_stats->RDROP); - stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_1) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_2) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_3) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_4) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_5) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_6) + - be64_to_cpu(mlx4_en_stats->TTOT_prio_7) + - be64_to_cpu(mlx4_en_stats->TTOT_novlan) + - be64_to_cpu(mlx4_en_stats->TTOT_loopbk); - stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_1) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_2) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_3) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_4) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_5) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_6) + - be64_to_cpu(mlx4_en_stats->ROCT_prio_7) + - be64_to_cpu(mlx4_en_stats->ROCT_novlan); - - stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) + - be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk); + stats->rx_packets = 0; + stats->rx_bytes = 0; + for (i = 0; i < priv->rx_ring_num; i++) { + stats->rx_packets += priv->rx_ring[i].packets; + stats->rx_bytes += priv->rx_ring[i].bytes; + } + stats->tx_packets = 0; + stats->tx_bytes = 0; + for (i = 0; i <= priv->tx_ring_num; i++) { + stats->tx_packets += priv->tx_ring[i].packets; + stats->tx_bytes += priv->tx_ring[i].bytes; + } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + be32_to_cpu(mlx4_en_stats->RdropLength) + diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c index a054520..65ca706 100644 --- a/drivers/net/mlx4/en_resources.c +++ b/drivers/net/mlx4/en_resources.c @@ -94,3 +94,9 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf) vunmap(buf->direct.buf); } + +void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) +{ + return; +} + diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 7e40741..9ee873e 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -436,8 +436,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) /* Initialize page allocators */ err = mlx4_en_init_allocator(priv, ring); if (err) { - mlx4_err(mdev, "Failed initializing ring allocator\n"); - goto err_allocator; + mlx4_err(mdev, "Failed initializing ring allocator\n"); + ring_ind--; + goto err_allocator; } /* Fill Rx buffers */ @@ -467,6 +468,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ring->wqres.db.dma, &ring->srq); if (err){ mlx4_err(mdev, "Failed to allocate srq\n"); + ring_ind--; goto err_srq; } ring->srq.event = mlx4_en_srq_event; @@ -608,6 +610,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, skb_shinfo(skb)->frags, page_alloc, length); + if (unlikely(!used_frags)) { + kfree_skb(skb); + return NULL; + } skb_shinfo(skb)->nr_frags = used_frags; /* Copy headers into the skb linear buffer */ @@ -926,12 +932,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, } } -static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) -{ - return; -} - - static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, int srqn, int cqn, enum mlx4_qp_state *state, @@ -951,7 +951,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, if (err) { mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); goto out; - return err; } qp->event = mlx4_en_sqp_event; diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 4afd599..e5c98a9 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -112,6 +112,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } + ring->qp.event = mlx4_en_sqp_event; return 0; @@ -425,7 +426,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) INC_PERF_COUNTER(priv->pstats.tx_poll); - if (!spin_trylock(&ring->comp_lock)) { + if (!spin_trylock_irq(&ring->comp_lock)) { mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); return; } @@ -438,7 +439,7 @@ void mlx4_en_poll_tx_cq(unsigned long data) if (inflight && priv->port_up) mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); - spin_unlock(&ring->comp_lock); + spin_unlock_irq(&ring->comp_lock); } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, @@ -481,9 +482,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) - if (spin_trylock(&ring->comp_lock)) { + if (spin_trylock_irq(&ring->comp_lock)) { mlx4_en_process_tx_cq(priv->dev, cq); - spin_unlock(&ring->comp_lock); + spin_unlock_irq(&ring->comp_lock); } } diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 102bac9..30bea96 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -976,7 +976,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) nreq = err; goto retry; } - + kfree(entries); goto no_msi; } diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index e9af32d..ef840ab 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h @@ -538,6 +538,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int srqn, struct mlx4_qp_context *context); +void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); int mlx4_en_map_buffer(struct mlx4_buf *buf); void mlx4_en_unmap_buffer(struct mlx4_buf *buf); diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 7cce334..606aa58 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c @@ -299,13 +299,14 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) struct mlx4_cmd_mailbox *mailbox; int err; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return 0; + mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, 256); - if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - return 0; ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index a56d9d2..6bb5af3 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -393,12 +393,12 @@ struct mv643xx_eth_private { struct work_struct tx_timeout_task; struct napi_struct napi; + u8 oom; u8 work_link; u8 work_tx; u8 work_tx_end; u8 work_rx; u8 work_rx_refill; - u8 work_rx_oom; int skb_size; struct sk_buff_head rx_recycle; @@ -569,7 +569,7 @@ static int rxq_process(struct rx_queue *rxq, int budget) if (rxq->rx_curr_desc == rxq->rx_ring_size) rxq->rx_curr_desc = 0; - dma_unmap_single(NULL, rx_desc->buf_ptr, + dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); rxq->rx_desc_count--; rx++; @@ -661,7 +661,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) dma_get_cache_alignment() - 1); if (skb == NULL) { - mp->work_rx_oom |= 1 << rxq->index; + mp->oom = 1; goto oom; } @@ -678,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget) rx_desc = rxq->rx_desc_area + rx; - rx_desc->buf_ptr = dma_map_single(NULL, skb->data, - mp->skb_size, DMA_FROM_DEVICE); + rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, + skb->data, mp->skb_size, + DMA_FROM_DEVICE); rx_desc->buf_size = mp->skb_size; rxq->rx_skb[rx] = skb; wmb(); @@ -718,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) { + struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int frag; @@ -746,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = this_frag->size; - desc->buf_ptr = dma_map_page(NULL, this_frag->page, - this_frag->page_offset, - this_frag->size, - DMA_TO_DEVICE); + desc->buf_ptr = dma_map_page(mp->dev->dev.parent, + this_frag->page, + this_frag->page_offset, + this_frag->size, DMA_TO_DEVICE); } } @@ -826,7 +828,8 @@ no_csum: desc->l4i_chk = l4i_chk; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, + length, DMA_TO_DEVICE); __skb_queue_tail(&txq->tx_skb, skb); @@ -956,10 +959,10 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) } if (cmd_sts & TX_FIRST_DESC) { - dma_unmap_single(NULL, desc->buf_ptr, + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } else { - dma_unmap_page(NULL, desc->buf_ptr, + dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } @@ -1255,7 +1258,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) spin_lock_bh(&mp->mib_counters_lock); p->good_octets_received += mib_read(mp, 0x00); - p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; p->bad_octets_received += mib_read(mp, 0x08); p->internal_mac_transmit_err += mib_read(mp, 0x0c); p->good_frames_received += mib_read(mp, 0x10); @@ -1269,7 +1271,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->frames_512_to_1023_octets += mib_read(mp, 0x30); p->frames_1024_to_max_octets += mib_read(mp, 0x34); p->good_octets_sent += mib_read(mp, 0x38); - p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32; p->good_frames_sent += mib_read(mp, 0x40); p->excessive_collision += mib_read(mp, 0x44); p->multicast_frames_sent += mib_read(mp, 0x48); @@ -1896,9 +1897,9 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) mp->rx_desc_sram_size); rxq->rx_desc_dma = mp->rx_desc_sram_addr; } else { - rxq->rx_desc_area = dma_alloc_coherent(NULL, size, - &rxq->rx_desc_dma, - GFP_KERNEL); + rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &rxq->rx_desc_dma, + GFP_KERNEL); } if (rxq->rx_desc_area == NULL) { @@ -1949,7 +1950,7 @@ out_free: if (index == 0 && size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, size, + dma_free_coherent(mp->dev->dev.parent, size, rxq->rx_desc_area, rxq->rx_desc_dma); @@ -1981,7 +1982,7 @@ static void rxq_deinit(struct rx_queue *rxq) rxq->rx_desc_area_size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, rxq->rx_desc_area_size, + dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, rxq->rx_desc_area, rxq->rx_desc_dma); kfree(rxq->rx_skb); @@ -2009,9 +2010,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; } else { - txq->tx_desc_area = dma_alloc_coherent(NULL, size, - &txq->tx_desc_dma, - GFP_KERNEL); + txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &txq->tx_desc_dma, + GFP_KERNEL); } if (txq->tx_desc_area == NULL) { @@ -2055,7 +2056,7 @@ static void txq_deinit(struct tx_queue *txq) txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else - dma_free_coherent(NULL, txq->tx_desc_area_size, + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); } @@ -2167,8 +2168,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) mp = container_of(napi, struct mv643xx_eth_private, napi); - mp->work_rx_refill |= mp->work_rx_oom; - mp->work_rx_oom = 0; + if (unlikely(mp->oom)) { + mp->oom = 0; + del_timer(&mp->rx_oom); + } work_done = 0; while (work_done < budget) { @@ -2182,8 +2185,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) continue; } - queue_mask = mp->work_tx | mp->work_tx_end | - mp->work_rx | mp->work_rx_refill; + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; + if (likely(!mp->oom)) + queue_mask |= mp->work_rx_refill; + if (!queue_mask) { if (mv643xx_eth_collect_events(mp)) continue; @@ -2204,7 +2209,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) txq_maybe_wake(mp->txq + queue); } else if (mp->work_rx & queue_mask) { work_done += rxq_process(mp->rxq + queue, work_tbd); - } else if (mp->work_rx_refill & queue_mask) { + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { work_done += rxq_refill(mp->rxq + queue, work_tbd); } else { BUG(); @@ -2212,7 +2217,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - if (mp->work_rx_oom) + if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); napi_complete(napi); wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); @@ -2274,8 +2279,6 @@ static void port_start(struct mv643xx_eth_private *mp) pscr |= FORCE_LINK_PASS; wrlp(mp, PORT_SERIAL_CONTROL, pscr); - wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); - /* * Configure TX path and queues. */ @@ -2374,7 +2377,7 @@ static int mv643xx_eth_open(struct net_device *dev) rxq_refill(mp->rxq + i, INT_MAX); } - if (mp->work_rx_oom) { + if (mp->oom) { mp->rx_oom.expires = jiffies + (HZ / 10); add_timer(&mp->rx_oom); } @@ -2957,6 +2960,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) netif_carrier_off(dev); + wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); + set_rx_coal(mp, 250); set_tx_coal(mp, 0); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 9eed126..f2c4a66 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -2447,6 +2447,7 @@ static int myri10ge_open(struct net_device *dev) lro_mgr->lro_arr = ss->rx_done.lro_desc; lro_mgr->get_frag_header = myri10ge_get_frag_header; lro_mgr->max_aggr = myri10ge_lro_max_pkts; + lro_mgr->frag_align_pad = 2; if (lro_mgr->max_aggr > MAX_SKB_FRAGS) lro_mgr->max_aggr = MAX_SKB_FRAGS; diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index eb66f65..7d83896 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -374,18 +374,17 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, dev->ethtool_ops = &ne2k_pci_ethtool_ops; NS8390_init(dev, 0); + memcpy(dev->dev_addr, SA_prom, 6); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + i = register_netdev(dev); if (i) goto err_out_free_netdev; - for(i = 0; i < 6; i++) - dev->dev_addr[i] = SA_prom[i]; printk("%s: %s found at %#lx, IRQ %d, %pM.\n", dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, dev->dev_addr); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - return 0; err_out_free_netdev: diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index eceadf7..bf4af52 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -664,7 +664,7 @@ static int netconsole_netdev_event(struct notifier_block *this, struct netconsole_target *nt; struct net_device *dev = ptr; - if (!(event == NETDEV_CHANGENAME)) + if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER)) goto done; spin_lock_irqsave(&target_list_lock, flags); @@ -675,6 +675,15 @@ static int netconsole_netdev_event(struct notifier_block *this, case NETDEV_CHANGENAME: strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); break; + case NETDEV_UNREGISTER: + if (!nt->enabled) + break; + netpoll_cleanup(&nt->np); + nt->enabled = 0; + printk(KERN_INFO "netconsole: network logging stopped" + ", interface %s unregistered\n", + dev->name); + break; } } netconsole_target_put(nt); diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 73cac6c..2b17453 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -4834,6 +4834,7 @@ static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) { u64 val = 0; + *ret = 0; switch (rp->rbr_block_size) { case 4 * 1024: val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); @@ -9542,7 +9543,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np, plat_dev = platform_device_register_simple("niu", niu_parent_index, NULL, 0); - if (!plat_dev) + if (IS_ERR(plat_dev)) return NULL; for (i = 0; attr_name(niu_parent_attributes[i]); i++) { diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 2fbf9f9..652a368 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1758,7 +1758,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), - PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "LA-PCM.cis"), + PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index cf24cc3..e707051 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -19,6 +19,7 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <linux/err.h> #define MII_REGS_NUM 29 @@ -207,8 +208,8 @@ static int __init fixed_mdio_bus_init(void) int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (!pdev) { - ret = -ENOMEM; + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); goto err_pdev; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index eb6411c..7a3ec9d 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -69,6 +69,11 @@ #define MII_M1111_COPPER 0 #define MII_M1111_FIBER 1 +#define MII_88E1121_PHY_LED_CTRL 16 +#define MII_88E1121_PHY_LED_PAGE 3 +#define MII_88E1121_PHY_LED_DEF 0x0030 +#define MII_88E1121_PHY_PAGE 22 + #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 #define MII_M1011_PHY_STATUS_100 0x4000 @@ -154,6 +159,30 @@ static int marvell_config_aneg(struct phy_device *phydev) return err; } +static int m88e1121_config_aneg(struct phy_device *phydev) +{ + int err, temp; + + err = phy_write(phydev, MII_BMCR, BMCR_RESET); + if (err < 0) + return err; + + err = phy_write(phydev, MII_M1011_PHY_SCR, + MII_M1011_PHY_SCR_AUTO_CROSS); + if (err < 0) + return err; + + temp = phy_read(phydev, MII_88E1121_PHY_PAGE); + + phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); + phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); + phy_write(phydev, MII_88E1121_PHY_PAGE, temp); + + err = genphy_config_aneg(phydev); + + return err; +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -429,6 +458,18 @@ static int marvell_read_status(struct phy_device *phydev) return 0; } +static int m88e1121_did_interrupt(struct phy_device *phydev) +{ + int imask; + + imask = phy_read(phydev, MII_M1011_IEVENT); + + if (imask & MII_M1011_IMASK_INIT) + return 1; + + return 0; +} + static struct phy_driver marvell_drivers[] = { { .phy_id = 0x01410c60, @@ -482,6 +523,19 @@ static struct phy_driver marvell_drivers[] = { .driver = {.owner = THIS_MODULE,}, }, { + .phy_id = 0x01410cb0, + .phy_id_mask = 0xfffffff0, + .name = "Marvell 88E1121R", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &m88e1121_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .driver = { .owner = THIS_MODULE }, + }, + { .phy_id = 0x01410cd0, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1145", diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3ff1f42..61755cb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -434,7 +434,7 @@ void phy_start_machine(struct phy_device *phydev, phydev->adjust_state = handler; INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); - schedule_delayed_work(&phydev->state_queue, jiffies + HZ); + schedule_delayed_work(&phydev->state_queue, HZ); } /** @@ -655,6 +655,10 @@ static void phy_change(struct work_struct *work) struct phy_device *phydev = container_of(work, struct phy_device, phy_queue); + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; + err = phy_disable_interrupts(phydev); if (err) @@ -681,6 +685,11 @@ static void phy_change(struct work_struct *work) return; +ignore: + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); + return; + irq_enable_err: disable_irq(phydev->irq); atomic_inc(&phydev->irq_disable); @@ -937,6 +946,5 @@ static void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - schedule_delayed_work(&phydev->state_queue, - jiffies + PHY_STATE_TIME * HZ); + schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); } diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index a5ac2bd..4f3ada6 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c @@ -2101,6 +2101,9 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl, if (ret) { pr_debug("%s: WEP/WPA setup failed %d\n", __func__, ret); + ret = -EPERM; + gelic_wl_send_iwap_event(wl, NULL); + goto out; } /* start association */ diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 5e8540b..6f97b47 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -160,6 +160,7 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," "Florian Fainelli <florian@openwrt.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); +MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); /* RX and TX interrupts that we handle */ #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 0b6e8c8..8247a94 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) int handled = 0; int status; + /* loop handling interrupts until we have no new ones or + * we hit a invalid/hotplug case. + */ status = RTL_R16(IntrStatus); + while (status && status != 0xffff) { + handled = 1; - /* hotplug/major error/no more work/shared irq */ - if ((status == 0xffff) || !status) - goto out; - - handled = 1; + /* Handle all of the error cases first. These will reset + * the chip, so just exit the loop. + */ + if (unlikely(!netif_running(dev))) { + rtl8169_asic_down(ioaddr); + break; + } - if (unlikely(!netif_running(dev))) { - rtl8169_asic_down(ioaddr); - goto out; - } + /* Work around for rx fifo overflow */ + if (unlikely(status & RxFIFOOver) && + (tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + break; + } - status &= tp->intr_mask; - RTL_W16(IntrStatus, - (status & RxFIFOOver) ? (status | RxOverflow) : status); + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(dev); + break; + } - if (!(status & tp->intr_event)) - goto out; + if (status & LinkChg) + rtl8169_check_link_status(dev, tp, ioaddr); - /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - goto out; - } + /* We need to see the lastest version of tp->intr_mask to + * avoid ignoring an MSI interrupt and having to wait for + * another event which may never come. + */ + smp_rmb(); + if (status & tp->intr_mask & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; + + if (likely(napi_schedule_prep(&tp->napi))) + __napi_schedule(&tp->napi); + else if (netif_msg_intr(tp)) { + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } + } - if (unlikely(status & SYSErr)) { - rtl8169_pcierr_interrupt(dev); - goto out; + /* We only get a new MSI interrupt when all active irq + * sources on the chip have been acknowledged. So, ack + * everything we've seen and check if new sources have become + * active to avoid blocking all interrupts from the chip. + */ + RTL_W16(IntrStatus, + (status & RxFIFOOver) ? (status | RxOverflow) : status); + status = RTL_R16(IntrStatus); } - if (status & LinkChg) - rtl8169_check_link_status(dev, tp, ioaddr); - - if (status & tp->napi_event) { - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; - - if (likely(napi_schedule_prep(&tp->napi))) - __napi_schedule(&tp->napi); - else if (netif_msg_intr(tp)) { - printk(KERN_INFO "%s: interrupt %04x in poll\n", - dev->name, status); - } - } -out: return IRQ_RETVAL(handled); } @@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - tp->intr_mask = 0xffff; - /* - * 20040426: the barrier is not strictly required but the - * behavior of the irq handler could be less predictable - * without it. Btw, the lack of flush for the posted pci - * write is safe - FR + + /* We need for force the visibility of tp->intr_mask + * for other CPUs, as we can loose an MSI interrupt + * and potentially wait for a retransmit timeout if we don't. + * The posted write to IntrMask is safe, as it will + * eventually make it to the chip and we won't loose anything + * until it does. */ + tp->intr_mask = 0xffff; smp_wmb(); RTL_W16(IntrMask, tp->intr_event); } diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index dee23b1..7269a42 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -448,9 +448,6 @@ static void efx_init_channels(struct efx_nic *efx) WARN_ON(channel->rx_pkt != NULL); efx_rx_strategy(channel); - - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); } } @@ -1321,6 +1318,8 @@ static int efx_init_napi(struct efx_nic *efx) efx_for_each_channel(channel, efx) { channel->napi_dev = efx->net_dev; + netif_napi_add(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); } return 0; } @@ -1330,6 +1329,8 @@ static void efx_fini_napi(struct efx_nic *efx) struct efx_channel *channel; efx_for_each_channel(channel, efx) { + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); channel->napi_dev = NULL; } } diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d4629ab..466a8ab 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1176,9 +1176,9 @@ void falcon_sim_phy_event(struct efx_nic *efx) EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); if (EFX_IS10G(efx)) - EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1); + EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); else - EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1); + EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); falcon_generate_event(&efx->channel[0], &phy_event); } diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 7b18827..3ab28bb 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -1188,6 +1188,19 @@ out: return ret; } +static const struct net_device_ops sh_eth_netdev_ops = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_multicast_list = sh_eth_set_multicast_list, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int sh_eth_drv_probe(struct platform_device *pdev) { int ret, i, devno = 0; @@ -1240,13 +1253,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->edmac_endian = pd->edmac_endian; /* set function */ - ndev->open = sh_eth_open; - ndev->hard_start_xmit = sh_eth_start_xmit; - ndev->stop = sh_eth_close; - ndev->get_stats = sh_eth_get_stats; - ndev->set_multicast_list = sh_eth_set_multicast_list; - ndev->do_ioctl = sh_eth_do_ioctl; - ndev->tx_timeout = sh_eth_tx_timeout; + ndev->netdev_ops = &sh_eth_netdev_ops; ndev->watchdog_timeo = TX_TIMEOUT; mdp->post_rx = POST_RX >> (devno << 1); diff --git a/drivers/net/skge.c b/drivers/net/skge.c index b8978d4..c11cdd0 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev) if (netif_msg_ifdown(skge)) printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); - netif_stop_queue(dev); + netif_tx_disable(dev); if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) del_timer_sync(&skge->link_timer); @@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev) } skge->tx_ring.to_clean = e; - netif_wake_queue(dev); } static void skge_tx_timeout(struct net_device *dev) @@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev) skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); skge_tx_clean(dev); + netif_wake_queue(dev); } static int skge_change_mtu(struct net_device *dev, int new_mtu) diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 912308e..329f890 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -369,7 +369,7 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, * MN10300/AM33 configuration */ -#include <asm/unit/smc91111.h> +#include <unit/smc91111.h> #else diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 6da6781..eb7db03 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -317,7 +317,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) goto out; } - SMSC_WARNING(HW, "Timed out waiting for MII write to finish"); + SMSC_WARNING(HW, "Timed out waiting for MII read to finish"); reg = -EIO; out: diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index e0d8477..a39c0b9 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -331,6 +331,18 @@ out: return ERR_PTR(err); } +static const struct net_device_ops sun3_82586_netdev_ops = { + .ndo_open = sun3_82586_open, + .ndo_stop = sun3_82586_close, + .ndo_start_xmit = sun3_82586_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = sun3_82586_timeout, + .ndo_get_stats = sun3_82586_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) { int i, size, retval; @@ -381,13 +393,8 @@ static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); - dev->open = sun3_82586_open; - dev->stop = sun3_82586_close; - dev->get_stats = sun3_82586_get_stats; - dev->tx_timeout = sun3_82586_timeout; + dev->netdev_ops = &sun3_82586_netdev_ops; dev->watchdog_timeo = HZ/20; - dev->hard_start_xmit = sun3_82586_send_packet; - dev->set_multicast_list = set_multicast_list; dev->if_port = 0; return 0; diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index d91e95b..0ce2db6 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -862,6 +862,22 @@ static int __devinit tc35815_init_dev_addr(struct net_device *dev) return 0; } +static const struct net_device_ops tc35815_netdev_ops = { + .ndo_open = tc35815_open, + .ndo_stop = tc35815_close, + .ndo_start_xmit = tc35815_send_packet, + .ndo_get_stats = tc35815_get_stats, + .ndo_set_multicast_list = tc35815_set_multicast_list, + .ndo_tx_timeout = tc35815_tx_timeout, + .ndo_do_ioctl = tc35815_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tc35815_poll_controller, +#endif +}; + static int __devinit tc35815_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -904,21 +920,12 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev, ioaddr = pcim_iomap_table(pdev)[1]; /* Initialize the device structure. */ - dev->open = tc35815_open; - dev->hard_start_xmit = tc35815_send_packet; - dev->stop = tc35815_close; - dev->get_stats = tc35815_get_stats; - dev->set_multicast_list = tc35815_set_multicast_list; - dev->do_ioctl = tc35815_ioctl; + dev->netdev_ops = &tc35815_netdev_ops; dev->ethtool_ops = &tc35815_ethtool_ops; - dev->tx_timeout = tc35815_tx_timeout; dev->watchdog_timeo = TC35815_TX_TIMEOUT; #ifdef TC35815_NAPI netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = tc35815_poll_controller; -#endif dev->irq = pdev->irq; dev->base_addr = (unsigned long)ioaddr; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6a736dd..201be42 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -2190,7 +2190,14 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp, if (!(tmp & EEPROM_ADDR_COMPLETE)) return -EBUSY; - *val = tr32(GRC_EEPROM_DATA); + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + return 0; } @@ -10663,7 +10670,13 @@ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, memcpy(&data, buf + i, 4); - tw32(GRC_EEPROM_DATA, be32_to_cpu(data)); + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); val = tr32(GRC_EEPROM_ADDR); tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index bb43e7f..0f78f99 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -1561,6 +1561,18 @@ static const struct ethtool_ops tsi108_ethtool_ops = { .set_settings = tsi108_set_settings, }; +static const struct net_device_ops tsi108_netdev_ops = { + .ndo_open = tsi108_open, + .ndo_stop = tsi108_close, + .ndo_start_xmit = tsi108_send_packet, + .ndo_set_multicast_list = tsi108_set_rx_mode, + .ndo_get_stats = tsi108_get_stats, + .ndo_do_ioctl = tsi108_do_ioctl, + .ndo_set_mac_address = tsi108_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int tsi108_init_one(struct platform_device *pdev) { @@ -1616,14 +1628,8 @@ tsi108_init_one(struct platform_device *pdev) data->phy_type = einfo->phy_type; data->irq_num = einfo->irq_num; data->id = pdev->id; - dev->open = tsi108_open; - dev->stop = tsi108_close; - dev->hard_start_xmit = tsi108_send_packet; - dev->set_mac_address = tsi108_set_mac; - dev->set_multicast_list = tsi108_set_rx_mode; - dev->get_stats = tsi108_get_stats; netif_napi_add(dev, &data->napi, tsi108_poll, 64); - dev->do_ioctl = tsi108_do_ioctl; + dev->netdev_ops = &tsi108_netdev_ops; dev->ethtool_ops = &tsi108_ethtool_ops; /* Apparently, the Linux networking code won't use scatter-gather diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a1b0697..735bf41 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -93,7 +93,6 @@ struct tun_file { atomic_t count; struct tun_struct *tun; struct net *net; - wait_queue_head_t read_wait; }; struct tun_sock; @@ -156,6 +155,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) tfile->tun = tun; tun->tfile = tfile; dev_hold(tun->dev); + sock_hold(tun->sk); atomic_inc(&tfile->count); out: @@ -165,11 +165,8 @@ out: static void __tun_detach(struct tun_struct *tun) { - struct tun_file *tfile = tun->tfile; - /* Detach from net device */ netif_tx_lock_bh(tun->dev); - tfile->tun = NULL; tun->tfile = NULL; netif_tx_unlock_bh(tun->dev); @@ -333,12 +330,19 @@ static void tun_net_uninit(struct net_device *dev) /* Inform the methods they need to stop using the dev. */ if (tfile) { - wake_up_all(&tfile->read_wait); + wake_up_all(&tun->socket.wait); if (atomic_dec_and_test(&tfile->count)) __tun_detach(tun); } } +static void tun_free_netdev(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + + sock_put(tun->sk); +} + /* Net device open. */ static int tun_net_open(struct net_device *dev) { @@ -393,7 +397,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* Notify and wake up reader process */ if (tun->flags & TUN_FASYNC) kill_fasync(&tun->fasync, SIGIO, POLL_IN); - wake_up_interruptible(&tun->tfile->read_wait); + wake_up_interruptible(&tun->socket.wait); return 0; drop: @@ -490,7 +494,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); - poll_wait(file, &tfile->read_wait, wait); + poll_wait(file, &tun->socket.wait, wait); if (!skb_queue_empty(&tun->readq)) mask |= POLLIN | POLLRDNORM; @@ -518,7 +522,7 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun, int err; /* Under a page? Don't bother with paged skb. */ - if (prepad + len < PAGE_SIZE) + if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, @@ -565,7 +569,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { align = NET_IP_ALIGN; - if (unlikely(len < ETH_HLEN)) + if (unlikely(len < ETH_HLEN || + (gso.hdr_len && gso.hdr_len < ETH_HLEN))) return -EINVAL; } @@ -762,7 +767,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, goto out; } - add_wait_queue(&tfile->read_wait, &wait); + add_wait_queue(&tun->socket.wait, &wait); while (len) { current->state = TASK_INTERRUPTIBLE; @@ -793,7 +798,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, } current->state = TASK_RUNNING; - remove_wait_queue(&tfile->read_wait, &wait); + remove_wait_queue(&tun->socket.wait, &wait); out: tun_put(tun); @@ -810,7 +815,7 @@ static void tun_setup(struct net_device *dev) tun->group = -1; dev->ethtool_ops = &tun_ethtool_ops; - dev->destructor = free_netdev; + dev->destructor = tun_free_netdev; } /* Trivial set of netlink ops to allow deleting tun or tap @@ -847,7 +852,7 @@ static void tun_sock_write_space(struct sock *sk) static void tun_sock_destruct(struct sock *sk) { - dev_put(container_of(sk, struct tun_sock, sk)->tun->dev); + free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev); } static struct proto tun_proto = { @@ -861,7 +866,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) struct sock *sk; struct tun_struct *tun; struct net_device *dev; - struct tun_file *tfile = file->private_data; int err; dev = __dev_get_by_name(net, ifr->ifr_name); @@ -919,13 +923,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!sk) goto err_free_dev; - /* This ref count is for tun->sk. */ - dev_hold(dev); + init_waitqueue_head(&tun->socket.wait); sock_init_data(&tun->socket, sk); sk->sk_write_space = tun_sock_write_space; - sk->sk_destruct = tun_sock_destruct; sk->sk_sndbuf = INT_MAX; - sk->sk_sleep = &tfile->read_wait; tun->sk = sk; container_of(sk, struct tun_sock, sk)->tun = tun; @@ -941,11 +942,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) err = -EINVAL; err = register_netdevice(tun->dev); if (err < 0) - goto err_free_dev; + goto err_free_sk; + + sk->sk_destruct = tun_sock_destruct; err = tun_attach(tun, file); if (err < 0) - goto err_free_dev; + goto failed; } DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); @@ -1265,7 +1268,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) atomic_set(&tfile->count, 0); tfile->tun = NULL; tfile->net = get_net(current->nsproxy->net_ns); - init_waitqueue_head(&tfile->read_wait); file->private_data = tfile; return 0; } @@ -1283,14 +1285,16 @@ static int tun_chr_close(struct inode *inode, struct file *file) __tun_detach(tun); /* If desireable, unregister the netdevice. */ - if (!(tun->flags & TUN_PERSIST)) { - sock_put(tun->sk); + if (!(tun->flags & TUN_PERSIST)) unregister_netdevice(tun->dev); - } rtnl_unlock(); } + tun = tfile->tun; + if (tun) + sock_put(tun->sk); + put_net(tfile->net); kfree(tfile); diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index d3f39e8..44f8392 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1394,7 +1394,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { - upsmr |= UCC_GETH_UPSMR_RPM; + if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) + upsmr |= UCC_GETH_UPSMR_RPM; switch (ugeth->max_speed) { case SPEED_10: upsmr |= UCC_GETH_UPSMR_R10M; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 8ee2103..dfc6cf7 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -180,6 +180,20 @@ config USB_NET_CDCETHER IEEE 802 "local assignment" bit is set in the address, a "usbX" name is used instead. +config USB_NET_CDC_EEM + tristate "CDC EEM support" + depends on USB_USBNET && EXPERIMENTAL + help + This option supports devices conforming to the Communication Device + Class (CDC) Ethernet Emulation Model, a specification that's easy to + implement in device firmware. The CDC EEM specifications are available + from <http://www.usb.org/>. + + This driver creates an interface named "ethX", where X depends on + what other networking devices you have in use. However, if the + IEEE 802 "local assignment" bit is set in the address, a "usbX" + name is used instead. + config USB_NET_DM9601 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" depends on USB_USBNET diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 88a87eeb3..c8aef62 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o obj-$(CONFIG_USB_HSO) += hso.o obj-$(CONFIG_USB_NET_AX8817X) += asix.o obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o +obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o obj-$(CONFIG_USB_NET_DM9601) += dm9601.o obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o obj-$(CONFIG_USB_NET_GL620A) += gl620a.o diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c new file mode 100644 index 0000000..80e0177 --- /dev/null +++ b/drivers/net/usb/cdc_eem.c @@ -0,0 +1,381 @@ +/* + * USB CDC EEM network interface driver + * Copyright (C) 2009 Oberthur Technologies + * by Omar Laazimani, Olivier Condemine + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ctype.h> +#include <linux/ethtool.h> +#include <linux/workqueue.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/crc32.h> +#include <linux/usb/cdc.h> +#include <linux/usb/usbnet.h> + + +/* + * This driver is an implementation of the CDC "Ethernet Emulation + * Model" (EEM) specification, which encapsulates Ethernet frames + * for transport over USB using a simpler USB device model than the + * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet"). + * + * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf + * + * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24, + * 2.6.27 and 2.6.30rc2 kernel. + * It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel). + * build on 23-April-2009 + */ + +#define EEM_HEAD 2 /* 2 byte header */ + +/*-------------------------------------------------------------------------*/ + +static void eem_linkcmd_complete(struct urb *urb) +{ + dev_kfree_skb(urb->context); + usb_free_urb(urb); +} + +static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb) +{ + struct urb *urb; + int status; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + goto fail; + + usb_fill_bulk_urb(urb, dev->udev, dev->out, + skb->data, skb->len, eem_linkcmd_complete, skb); + + status = usb_submit_urb(urb, GFP_ATOMIC); + if (status) { + usb_free_urb(urb); +fail: + dev_kfree_skb(skb); + devwarn(dev, "link cmd failure\n"); + return; + } +} + +static int eem_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status = 0; + + status = usbnet_get_endpoints(dev, intf); + if (status < 0) { + usb_set_intfdata(intf, NULL); + usb_driver_release_interface(driver_of(intf), intf); + return status; + } + + /* no jumbogram (16K) support for now */ + + dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; + + return 0; +} + +/* + * EEM permits packing multiple Ethernet frames into USB transfers + * (a "bundle"), but for TX we don't try to do that. + */ +static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + gfp_t flags) +{ + struct sk_buff *skb2 = NULL; + u16 len = skb->len; + u32 crc = 0; + int padlen = 0; + + /* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is + * zero, stick two bytes of zero length EEM packet on the end. + * Else the framework would add invalid single byte padding, + * since it can't know whether ZLPs will be handled right by + * all the relevant hardware and software. + */ + if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket)) + padlen += 2; + + if (!skb_cloned(skb)) { + int headroom = skb_headroom(skb); + int tailroom = skb_tailroom(skb); + + if ((tailroom >= ETH_FCS_LEN + padlen) + && (headroom >= EEM_HEAD)) + goto done; + + if ((headroom + tailroom) + > (EEM_HEAD + ETH_FCS_LEN + padlen)) { + skb->data = memmove(skb->head + + EEM_HEAD, + skb->data, + skb->len); + skb_set_tail_pointer(skb, len); + goto done; + } + } + + skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); + if (!skb2) + return NULL; + + dev_kfree_skb_any(skb); + skb = skb2; + +done: + /* we don't use the "no Ethernet CRC" option */ + crc = crc32_le(~0, skb->data, skb->len); + crc = ~crc; + + put_unaligned_le32(crc, skb_put(skb, 4)); + + /* EEM packet header format: + * b0..13: length of ethernet frame + * b14: bmCRC (1 == valid Ethernet CRC) + * b15: bmType (0 == data) + */ + len = skb->len; + put_unaligned_le16(BIT(14) | len, skb_push(skb, 2)); + + /* Bundle a zero length EEM packet if needed */ + if (padlen) + put_unaligned_le16(0, skb_put(skb, 2)); + + return skb; +} + +static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + /* + * Our task here is to strip off framing, leaving skb with one + * data frame for the usbnet framework code to process. But we + * may have received multiple EEM payloads, or command payloads. + * So we must process _everything_ as if it's a header, except + * maybe the last data payload + * + * REVISIT the framework needs updating so that when we consume + * all payloads (the last or only message was a command, or a + * zero length EEM packet) that is not accounted as an rx_error. + */ + do { + struct sk_buff *skb2 = NULL; + u16 header; + u16 len = 0; + + /* incomplete EEM header? */ + if (skb->len < EEM_HEAD) + return 0; + + /* + * EEM packet header format: + * b0..14: EEM type dependant (Data or Command) + * b15: bmType + */ + header = get_unaligned_le16(skb->data); + skb_pull(skb, EEM_HEAD); + + /* + * The bmType bit helps to denote when EEM + * packet is data or command : + * bmType = 0 : EEM data payload + * bmType = 1 : EEM (link) command + */ + if (header & BIT(15)) { + u16 bmEEMCmd; + + /* + * EEM (link) command packet: + * b0..10: bmEEMCmdParam + * b11..13: bmEEMCmd + * b14: bmReserved (must be 0) + * b15: 1 (EEM command) + */ + if (header & BIT(14)) { + devdbg(dev, "reserved command %04x\n", header); + continue; + } + + bmEEMCmd = (header >> 11) & 0x7; + switch (bmEEMCmd) { + + /* Responding to echo requests is mandatory. */ + case 0: /* Echo command */ + len = header & 0x7FF; + + /* bogus command? */ + if (skb->len < len) + return 0; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) + goto next; + skb_trim(skb2, len); + put_unaligned_le16(BIT(15) | (1 << 11) | len, + skb_push(skb2, 2)); + eem_linkcmd(dev, skb2); + break; + + /* + * Host may choose to ignore hints. + * - suspend: peripheral ready to suspend + * - response: suggest N millisec polling + * - response complete: suggest N sec polling + */ + case 2: /* Suspend hint */ + case 3: /* Response hint */ + case 4: /* Response complete hint */ + continue; + + /* + * Hosts should never receive host-to-peripheral + * or reserved command codes; or responses to an + * echo command we didn't send. + */ + case 1: /* Echo response */ + case 5: /* Tickle */ + default: /* reserved */ + devwarn(dev, "unexpected link command %d\n", + bmEEMCmd); + continue; + } + + } else { + u32 crc, crc2; + int is_last; + + /* zero length EEM packet? */ + if (header == 0) + continue; + + /* + * EEM data packet header : + * b0..13: length of ethernet frame + * b14: bmCRC + * b15: 0 (EEM data) + */ + len = header & 0x3FFF; + + /* bogus EEM payload? */ + if (skb->len < len) + return 0; + + /* bogus ethernet frame? */ + if (len < (ETH_HLEN + ETH_FCS_LEN)) + goto next; + + /* + * Treat the last payload differently: framework + * code expects our "fixup" to have stripped off + * headers, so "skb" is a data packet (or error). + * Else if it's not the last payload, keep "skb" + * for further processing. + */ + is_last = (len == skb->len); + if (is_last) + skb2 = skb; + else { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) + return 0; + } + + crc = get_unaligned_le32(skb2->data + + len - ETH_FCS_LEN); + skb_trim(skb2, len - ETH_FCS_LEN); + + /* + * The bmCRC helps to denote when the CRC field in + * the Ethernet frame contains a calculated CRC: + * bmCRC = 1 : CRC is calculated + * bmCRC = 0 : CRC = 0xDEADBEEF + */ + if (header & BIT(14)) + crc2 = ~crc32_le(~0, skb2->data, len); + else + crc2 = 0xdeadbeef; + + if (is_last) + return crc == crc2; + + if (unlikely(crc != crc2)) { + dev->stats.rx_errors++; + dev_kfree_skb_any(skb2); + } else + usbnet_skb_return(dev, skb2); + } + +next: + skb_pull(skb, len); + } while (skb->len); + + return 1; +} + +static const struct driver_info eem_info = { + .description = "CDC EEM Device", + .flags = FLAG_ETHER, + .bind = eem_bind, + .rx_fixup = eem_rx_fixup, + .tx_fixup = eem_tx_fixup, +}; + +/*-------------------------------------------------------------------------*/ + +static const struct usb_device_id products[] = { +{ + USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM, + USB_CDC_PROTO_EEM), + .driver_info = (unsigned long) &eem_info, +}, +{ + /* EMPTY == end of list */ +}, +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver eem_driver = { + .name = "cdc_eem", + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, +}; + + +static int __init eem_init(void) +{ + return usb_register(&eem_driver); +} +module_init(eem_init); + +static void __exit eem_exit(void) +{ + usb_deregister(&eem_driver); +} +module_exit(eem_exit); + +MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>"); +MODULE_DESCRIPTION("USB CDC EEM"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index a8228d8..2138535 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -899,6 +899,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net) /* cleanup should already have been scheduled */ break; case -ENODEV: /* disconnect() upcoming */ + case -EPERM: netif_device_detach(pegasus->net); break; default: diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index dc16653..5a72833 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -941,6 +941,16 @@ static int smsc95xx_reset(struct usbnet *dev) if (netif_msg_ifup(dev)) devdbg(dev, "ID_REV = 0x%08x", read_buf); + /* Configure GPIO pins as LED outputs */ + write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | + LED_GPIO_CFG_FDX_LED; + ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); + if (ret < 0) { + devwarn(dev, "Failed to write LED_GPIO_CFG register, ret=%d", + ret); + return ret; + } + /* Init Tx */ write_buf = 0; ret = smsc95xx_write_reg(dev, FLOW, write_buf); @@ -1231,6 +1241,11 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0424, 0x9500), .driver_info = (unsigned long) &smsc95xx_info, }, + { + /* SMSC9512/9514 USB Hub & Ethernet Device */ + USB_DEVICE(0x0424, 0xec00), + .driver_info = (unsigned long) &smsc95xx_info, + }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h index 66b5c84..86bc449 100644 --- a/drivers/net/usb/smsc95xx.h +++ b/drivers/net/usb/smsc95xx.h @@ -99,6 +99,9 @@ #define PM_CTL_WUPS_MULTI_ (0x00000003) #define LED_GPIO_CFG (0x24) +#define LED_GPIO_CFG_SPD_LED (0x01000000) +#define LED_GPIO_CFG_LNK_LED (0x00100000) +#define LED_GPIO_CFG_FDX_LED (0x00010000) #define GPIO_CFG (0x28) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 015db1c..8e56fcf 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -210,14 +210,11 @@ rx_drop: static struct net_device_stats *veth_get_stats(struct net_device *dev) { - struct veth_priv *priv; - struct net_device_stats *dev_stats; - int cpu; + struct veth_priv *priv = netdev_priv(dev); + struct net_device_stats *dev_stats = &dev->stats; + unsigned int cpu; struct veth_net_stats *stats; - priv = netdev_priv(dev); - dev_stats = &dev->stats; - dev_stats->rx_packets = 0; dev_stats->tx_packets = 0; dev_stats->rx_bytes = 0; @@ -225,16 +222,17 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev) dev_stats->tx_dropped = 0; dev_stats->rx_dropped = 0; - for_each_online_cpu(cpu) { - stats = per_cpu_ptr(priv->stats, cpu); + if (priv->stats) + for_each_online_cpu(cpu) { + stats = per_cpu_ptr(priv->stats, cpu); - dev_stats->rx_packets += stats->rx_packets; - dev_stats->tx_packets += stats->tx_packets; - dev_stats->rx_bytes += stats->rx_bytes; - dev_stats->tx_bytes += stats->tx_bytes; - dev_stats->tx_dropped += stats->tx_dropped; - dev_stats->rx_dropped += stats->rx_dropped; - } + dev_stats->rx_packets += stats->rx_packets; + dev_stats->tx_packets += stats->tx_packets; + dev_stats->rx_bytes += stats->rx_bytes; + dev_stats->tx_bytes += stats->tx_bytes; + dev_stats->tx_dropped += stats->tx_dropped; + dev_stats->rx_dropped += stats->rx_dropped; + } return dev_stats; } @@ -261,6 +259,8 @@ static int veth_close(struct net_device *dev) netif_carrier_off(dev); netif_carrier_off(priv->peer); + free_percpu(priv->stats); + priv->stats = NULL; return 0; } @@ -291,15 +291,6 @@ static int veth_dev_init(struct net_device *dev) return 0; } -static void veth_dev_free(struct net_device *dev) -{ - struct veth_priv *priv; - - priv = netdev_priv(dev); - free_percpu(priv->stats); - free_netdev(dev); -} - static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, @@ -317,7 +308,7 @@ static void veth_setup(struct net_device *dev) dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; - dev->destructor = veth_dev_free; + dev->destructor = free_netdev; } /* diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fb53ef8..754a4b1 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -377,7 +377,7 @@ static void velocity_print_info(struct velocity_info *vptr); static int velocity_open(struct net_device *dev); static int velocity_change_mtu(struct net_device *dev, int mtu); static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); -static int velocity_intr(int irq, void *dev_instance); +static irqreturn_t velocity_intr(int irq, void *dev_instance); static void velocity_set_multi(struct net_device *dev); static struct net_device_stats *velocity_get_stats(struct net_device *dev); static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); @@ -2215,7 +2215,7 @@ out: * efficiently as possible. */ -static int velocity_intr(int irq, void *dev_instance) +static irqreturn_t velocity_intr(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct velocity_info *vptr = netdev_priv(dev); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 9c82a39..4d1d479 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -616,10 +616,11 @@ static int virtnet_open(struct net_device *dev) static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *data, int out, int in) { - struct scatterlist sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; + struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned int tmp; + int i; if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { BUG(); /* Caller should know better */ @@ -637,7 +638,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, sg_init_table(sg, out + in); sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); - memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2)); + for_each_sg(data, s, out + in - 2, i) + sg_set_buf(&sg[i + 1], sg_virt(s), s->length); sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0) @@ -692,7 +694,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); - sg_set_buf(sg, &promisc, sizeof(promisc)); + sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, @@ -700,7 +702,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); - sg_set_buf(sg, &allmulti, sizeof(allmulti)); + sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, @@ -716,6 +718,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) return; } + sg_init_table(sg, 2); + /* Store the unicast list and count in the front of the buffer */ mac_data->entries = dev->uc_count; addr = dev->uc_list; @@ -744,24 +748,24 @@ static void virtnet_set_rx_mode(struct net_device *dev) kfree(buf); } -static void virnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) +static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - sg_set_buf(&sg, &vid, sizeof(vid)); + sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); } -static void virnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) +static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - sg_set_buf(&sg, &vid, sizeof(vid)); + sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) @@ -794,8 +798,8 @@ static const struct net_device_ops virtnet_netdev = { .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, - .ndo_vlan_rx_add_vid = virnet_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = virnet_vlan_rx_kill_vid, + .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 7be0ae10..c2eeac4 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c @@ -115,7 +115,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| - VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32), + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), &vp_reg->kdfcctl_errors_mask); __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3bf7d3f..765a7f5 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -1249,7 +1249,7 @@ static int __devinit hss_init_one(struct platform_device *pdev) return -ENOMEM; if ((port->npe = npe_request(0)) == NULL) { - err = -ENOSYS; + err = -ENODEV; goto err_free; } @@ -1311,7 +1311,7 @@ static int __init hss_init_module(void) if ((ixp4xx_read_feature_bits() & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) != (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) - return -ENOSYS; + return -ENODEV; spin_lock_init(&npe_lock); diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 02419bf..f9fc389 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -819,10 +819,9 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, roq_data = (struct i2400m_roq_data *) &skb->cb; i2400m_net_erx(i2400m, skb, roq_data->cs); } - else { + else __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); - __i2400m_roq_update_ws(i2400m, roq, sn + 1); - } + __i2400m_roq_update_ws(i2400m, roq, sn + 1); i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, old_ws, len, sn, nsn, roq->ws); } diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index ca4151a..1785132 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -505,27 +505,52 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) #ifdef CONFIG_PM struct usb_device *usb_dev = i2400mu->usb_dev; #endif + unsigned is_autosuspend = 0; struct i2400m *i2400m = &i2400mu->i2400m; +#ifdef CONFIG_PM + if (usb_dev->auto_pm > 0) + is_autosuspend = 1; +#endif + d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); if (i2400m->updown == 0) goto no_firmware; - d_printf(1, dev, "fw up, requesting standby\n"); + if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) { + /* ugh -- the device is connected and this suspend + * request is an autosuspend one (not a system standby + * / hibernate). + * + * The only way the device can go to standby is if the + * link with the base station is in IDLE mode; that + * were the case, we'd be in status + * I2400M_SS_CONNECTED_IDLE. But we are not. + * + * If we *tell* him to go power save now, it'll reset + * as a precautionary measure, so if this is an + * autosuspend thing, say no and it'll come back + * later, when the link is IDLE + */ + result = -EBADF; + d_printf(1, dev, "fw up, link up, not-idle, autosuspend: " + "not entering powersave\n"); + goto error_not_now; + } + d_printf(1, dev, "fw up: entering powersave\n"); atomic_dec(&i2400mu->do_autopm); result = i2400m_cmd_enter_powersave(i2400m); atomic_inc(&i2400mu->do_autopm); -#ifdef CONFIG_PM - if (result < 0 && usb_dev->auto_pm == 0) { + if (result < 0 && !is_autosuspend) { /* System suspend, can't fail */ dev_err(dev, "failed to suspend, will reset on resume\n"); result = 0; } -#endif if (result < 0) goto error_enter_powersave; i2400mu_notification_release(i2400mu); - d_printf(1, dev, "fw up, got standby\n"); + d_printf(1, dev, "powersave requested\n"); error_enter_powersave: +error_not_now: no_firmware: d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n", iface, pm_msg.event, result); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 8a08235..3d94e7d 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -430,6 +430,7 @@ config RTL8187 ASUS P5B Deluxe Toshiba Satellite Pro series of laptops Asus Wireless Link + Linksys WUSB54GC-EU Thanks to Realtek for their support! diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index f21a617..9eabf4d 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -6467,6 +6467,7 @@ static int airo_get_encode(struct net_device *dev, { struct airo_info *local = dev->ml_priv; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + int wep_key_len; u8 buf[16]; if (!local->wep_capable) @@ -6500,8 +6501,13 @@ static int airo_get_encode(struct net_device *dev, dwrq->flags |= index + 1; /* Copy the key to the user buffer */ - dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf)); - memcpy(extra, buf, dwrq->length); + wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf)); + if (wep_key_len < 0) { + dwrq->length = 0; + } else { + dwrq->length = wep_key_len; + memcpy(extra, buf, dwrq->length); + } return 0; } @@ -6614,7 +6620,7 @@ static int airo_get_encodeext(struct net_device *dev, struct airo_info *local = dev->ml_priv; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int idx, max_key_len; + int idx, max_key_len, wep_key_len; u8 buf[16]; if (!local->wep_capable) @@ -6658,8 +6664,13 @@ static int airo_get_encodeext(struct net_device *dev, memset(extra, 0, 16); /* Copy the key to the user buffer */ - ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); - memcpy(extra, buf, ext->key_len); + wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); + if (wep_key_len < 0) { + ext->key_len = 0; + } else { + ext->key_len = wep_key_len; + memcpy(extra, buf, ext->key_len); + } return 0; } @@ -6713,11 +6724,11 @@ static int airo_set_auth(struct net_device *dev, local->config.authType = AUTH_ENCRYPT; } else return -EINVAL; - break; /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); + break; } case IW_AUTH_WPA_ENABLED: diff --git a/drivers/net/wireless/ar9170/hw.h b/drivers/net/wireless/ar9170/hw.h index 13091bd..53e250a 100644 --- a/drivers/net/wireless/ar9170/hw.h +++ b/drivers/net/wireless/ar9170/hw.h @@ -310,7 +310,7 @@ struct ar9170_tx_control { struct ar9170_rx_head { u8 plcp[12]; -}; +} __packed; struct ar9170_rx_tail { union { @@ -318,16 +318,16 @@ struct ar9170_rx_tail { u8 rssi_ant0, rssi_ant1, rssi_ant2, rssi_ant0x, rssi_ant1x, rssi_ant2x, rssi_combined; - }; + } __packed; u8 rssi[7]; - }; + } __packed; u8 evm_stream0[6], evm_stream1[6]; u8 phy_err; u8 SAidx, DAidx; u8 error; u8 status; -}; +} __packed; #define AR9170_ENC_ALG_NONE 0x0 #define AR9170_ENC_ALG_WEP64 0x1 diff --git a/drivers/net/wireless/ar9170/usb.c b/drivers/net/wireless/ar9170/usb.c index ad29684..fddda47 100644 --- a/drivers/net/wireless/ar9170/usb.c +++ b/drivers/net/wireless/ar9170/usb.c @@ -59,6 +59,8 @@ static struct usb_device_id ar9170_usb_ids[] = { { USB_DEVICE(0x0cf3, 0x9170) }, /* Atheros TG121N */ { USB_DEVICE(0x0cf3, 0x1001) }, + /* Cace Airpcap NX */ + { USB_DEVICE(0xcace, 0x0300) }, /* D-Link DWA 160A */ { USB_DEVICE(0x07d1, 0x3c10) }, /* Netgear WNDA3100 */ @@ -67,6 +69,8 @@ static struct usb_device_id ar9170_usb_ids[] = { { USB_DEVICE(0x0846, 0x9001) }, /* Zydas ZD1221 */ { USB_DEVICE(0x0ace, 0x1221) }, + /* ZyXEL NWD271N */ + { USB_DEVICE(0x0586, 0x3417) }, /* Z-Com UB81 BG */ { USB_DEVICE(0x0cde, 0x0023) }, /* Z-Com UB82 ABG */ @@ -619,6 +623,39 @@ static int ar9170_usb_open(struct ar9170 *ar) return 0; } +static int ar9170_usb_init_device(struct ar9170_usb *aru) +{ + int err; + + err = ar9170_usb_alloc_rx_irq_urb(aru); + if (err) + goto err_out; + + err = ar9170_usb_alloc_rx_bulk_urbs(aru); + if (err) + goto err_unrx; + + err = ar9170_usb_upload_firmware(aru); + if (err) { + err = ar9170_echo_test(&aru->common, 0x60d43110); + if (err) { + /* force user invention, by disabling the device */ + err = usb_driver_set_configuration(aru->udev, -1); + dev_err(&aru->udev->dev, "device is in a bad state. " + "please reconnect it!\n"); + goto err_unrx; + } + } + + return 0; + +err_unrx: + ar9170_usb_cancel_urbs(aru); + +err_out: + return err; +} + static int ar9170_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -654,32 +691,16 @@ static int ar9170_usb_probe(struct usb_interface *intf, err = ar9170_usb_reset(aru); if (err) - goto err_unlock; + goto err_freehw; err = ar9170_usb_request_firmware(aru); if (err) - goto err_unlock; + goto err_freehw; - err = ar9170_usb_alloc_rx_irq_urb(aru); + err = ar9170_usb_init_device(aru); if (err) goto err_freefw; - err = ar9170_usb_alloc_rx_bulk_urbs(aru); - if (err) - goto err_unrx; - - err = ar9170_usb_upload_firmware(aru); - if (err) { - err = ar9170_echo_test(&aru->common, 0x60d43110); - if (err) { - /* force user invention, by disabling the device */ - err = usb_driver_set_configuration(aru->udev, -1); - dev_err(&aru->udev->dev, "device is in a bad state. " - "please reconnect it!\n"); - goto err_unrx; - } - } - err = ar9170_usb_open(ar); if (err) goto err_unrx; @@ -699,7 +720,7 @@ err_freefw: release_firmware(aru->init_values); release_firmware(aru->firmware); -err_unlock: +err_freehw: usb_set_intfdata(intf, NULL); usb_put_dev(udev); ieee80211_free_hw(ar->hw); @@ -726,12 +747,65 @@ static void ar9170_usb_disconnect(struct usb_interface *intf) ieee80211_free_hw(aru->common.hw); } +#ifdef CONFIG_PM +static int ar9170_suspend(struct usb_interface *intf, + pm_message_t message) +{ + struct ar9170_usb *aru = usb_get_intfdata(intf); + + if (!aru) + return -ENODEV; + + aru->common.state = AR9170_IDLE; + ar9170_usb_cancel_urbs(aru); + + return 0; +} + +static int ar9170_resume(struct usb_interface *intf) +{ + struct ar9170_usb *aru = usb_get_intfdata(intf); + int err; + + if (!aru) + return -ENODEV; + + usb_unpoison_anchored_urbs(&aru->rx_submitted); + usb_unpoison_anchored_urbs(&aru->tx_submitted); + + /* + * FIXME: firmware upload will fail on resume. + * but this is better than a hang! + */ + + err = ar9170_usb_init_device(aru); + if (err) + goto err_unrx; + + err = ar9170_usb_open(&aru->common); + if (err) + goto err_unrx; + + return 0; + +err_unrx: + aru->common.state = AR9170_IDLE; + ar9170_usb_cancel_urbs(aru); + + return err; +} +#endif /* CONFIG_PM */ + static struct usb_driver ar9170_driver = { .name = "ar9170usb", .probe = ar9170_usb_probe, .disconnect = ar9170_usb_disconnect, .id_table = ar9170_usb_ids, .soft_unbind = 1, +#ifdef CONFIG_PM + .suspend = ar9170_suspend, + .resume = ar9170_resume, +#endif /* CONFIG_PM */ }; static int __init ar9170_init(void) diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 0c02f1c..8d93ca4 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -250,6 +250,8 @@ static struct usb_device_id dev_table[] = { { USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A) }, /* Siemens Gigaset USB WLAN Adapter 11 */ { USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A) }, + /* OQO Model 01+ Internal Wi-Fi */ + { USB_DEVICE(0x1557, 0x0002), USB_DEVICE_DATA(BOARD_505A) }, /* * at76c505amx-rfmd */ @@ -1871,18 +1873,18 @@ static void at76_dwork_hw_scan(struct work_struct *work) if (ret != CMD_STATUS_COMPLETE) { queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan, SCAN_POLL_INTERVAL); - goto exit; + mutex_unlock(&priv->mtx); + return; } - ieee80211_scan_completed(priv->hw, false); - if (is_valid_ether_addr(priv->bssid)) at76_join(priv); - ieee80211_wake_queues(priv->hw); - -exit: mutex_unlock(&priv->mtx); + + ieee80211_scan_completed(priv->hw, false); + + ieee80211_wake_queues(priv->hw); } static int at76_hw_scan(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index a08bc8a..32df27a 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -214,7 +214,7 @@ static struct pci_driver ath5k_pci_driver = { * Prototypes - MAC 802.11 stack related functions */ static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); -static int ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel); +static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); static int ath5k_reset_wake(struct ath5k_softc *sc); static int ath5k_start(struct ieee80211_hw *hw); static void ath5k_stop(struct ieee80211_hw *hw); @@ -1038,16 +1038,13 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) if (chan->center_freq != sc->curchan->center_freq || chan->hw_value != sc->curchan->hw_value) { - sc->curchan = chan; - sc->curband = &sc->sbands[chan->band]; - /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ - return ath5k_reset(sc, true, true); + return ath5k_reset(sc, chan); } return 0; @@ -2314,7 +2311,7 @@ ath5k_init(struct ath5k_softc *sc) sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | AR5K_INT_FATAL | AR5K_INT_GLOBAL; - ret = ath5k_reset(sc, false, false); + ret = ath5k_reset(sc, NULL); if (ret) goto done; @@ -2599,18 +2596,25 @@ drop_packet: return NETDEV_TX_OK; } +/* + * Reset the hardware. If chan is not NULL, then also pause rx/tx + * and change to the given channel. + */ static int -ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel) +ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) { struct ath5k_hw *ah = sc->ah; int ret; ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); - if (stop) { + if (chan) { ath5k_hw_set_imr(ah, 0); ath5k_txq_cleanup(sc); ath5k_rx_stop(sc); + + sc->curchan = chan; + sc->curband = &sc->sbands[chan->band]; } ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); if (ret) { @@ -2648,7 +2652,7 @@ ath5k_reset_wake(struct ath5k_softc *sc) { int ret; - ret = ath5k_reset(sc, true, true); + ret = ath5k_reset(sc, sc->curchan); if (!ret) ieee80211_wake_queues(sc->hw); diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c index 9770bb3..4904a07 100644 --- a/drivers/net/wireless/ath5k/debug.c +++ b/drivers/net/wireless/ath5k/debug.c @@ -424,7 +424,7 @@ ath5k_debug_dump_bands(struct ath5k_softc *sc) for (b = 0; b < IEEE80211_NUM_BANDS; b++) { struct ieee80211_supported_band *band = &sc->sbands[b]; - char bname[5]; + char bname[6]; switch (band->band) { case IEEE80211_BAND_2GHZ: strcpy(bname, "2 GHz"); diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c index 9e2faae..b48b29d 100644 --- a/drivers/net/wireless/ath5k/phy.c +++ b/drivers/net/wireless/ath5k/phy.c @@ -1487,28 +1487,35 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, { s8 tmp; s16 min_pwrL, min_pwrR; - s16 pwr_i = pwrL[0]; - - do { - pwr_i--; - tmp = (s8) ath5k_get_interpolated_value(pwr_i, - pwrL[0], pwrL[1], - stepL[0], stepL[1]); - - } while (tmp > 1); - - min_pwrL = pwr_i; - - pwr_i = pwrR[0]; - do { - pwr_i--; - tmp = (s8) ath5k_get_interpolated_value(pwr_i, - pwrR[0], pwrR[1], - stepR[0], stepR[1]); - - } while (tmp > 1); + s16 pwr_i; + + if (pwrL[0] == pwrL[1]) + min_pwrL = pwrL[0]; + else { + pwr_i = pwrL[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrL[0], pwrL[1], + stepL[0], stepL[1]); + } while (tmp > 1); + + min_pwrL = pwr_i; + } - min_pwrR = pwr_i; + if (pwrR[0] == pwrR[1]) + min_pwrR = pwrR[0]; + else { + pwr_i = pwrR[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrR[0], pwrR[1], + stepR[0], stepR[1]); + } while (tmp > 1); + + min_pwrR = pwr_i; + } /* Keep the right boundary so that it works for both curves */ return max(min_pwrL, min_pwrR); diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c index 7a17d31..5f72c11 100644 --- a/drivers/net/wireless/ath5k/reset.c +++ b/drivers/net/wireless/ath5k/reset.c @@ -26,7 +26,7 @@ \*****************************/ #include <linux/pci.h> /* To determine if a card is pci-e */ -#include <linux/bitops.h> /* For get_bitmask_order */ +#include <linux/log2.h> #include "ath5k.h" #include "reg.h" #include "base.h" @@ -69,10 +69,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, /* Get exponent * ALGO: coef_exp = 14 - highest set bit position */ - coef_exp = get_bitmask_order(coef_scaled); + coef_exp = ilog2(coef_scaled); /* Doesn't make sense if it's zero*/ - if (!coef_exp) + if (!coef_scaled || !coef_exp) return -EINVAL; /* Note: we've shifted coef_scaled by 24 */ @@ -359,7 +359,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) mode |= AR5K_PHY_MODE_FREQ_5GHZ; if (ah->ah_radio == AR5K_RF5413) - clock |= AR5K_PHY_PLL_40MHZ_5413; + clock = AR5K_PHY_PLL_40MHZ_5413; else clock |= AR5K_PHY_PLL_40MHZ; diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c index 6dbc585..168411d 100644 --- a/drivers/net/wireless/ath9k/pci.c +++ b/drivers/net/wireless/ath9k/pci.c @@ -93,14 +93,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pci_enable_device(pdev)) return -EIO; - ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); goto bad; } - ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "ath9k: 32-bit DMA consistent " diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index 71cb18d..dd1f301 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c @@ -493,6 +493,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) int hdrlen, padsize, retval; bool decrypt_error = false; u8 keyix; + __le16 fc; spin_lock_bh(&sc->rx.rxbuflock); @@ -606,6 +607,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) /* see if any padding is done by the hw and remove it */ hdr = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_get_hdrlen_from_skb(skb); + fc = hdr->frame_control; /* The MAC header is padded to have 32-bit boundary if the * packet payload is non-zero. The general calculation for @@ -690,7 +692,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) sc->rx.rxotherant = 0; } - if (ieee80211_is_beacon(hdr->frame_control) && + if (ieee80211_is_beacon(fc) && (sc->sc_flags & SC_OP_WAIT_FOR_BEACON)) { sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP); diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 857d841..27eef8f 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1502,7 +1502,6 @@ static const struct net_device_ops atmel_netdev_ops = { .ndo_set_mac_address = atmel_set_mac_address, .ndo_start_xmit = start_tx, .ndo_do_ioctl = atmel_ioctl, - .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index e228c1d..eae680b 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -555,11 +555,32 @@ address_error: return 1; } +static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) +{ + unsigned char *f = skb->data + ring->frameoffset; + + return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); +} + +static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) +{ + struct b43_rxhdr_fw4 *rxhdr; + unsigned char *frame; + + /* This poisons the RX buffer to detect DMA failures. */ + + rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); + rxhdr->frame_len = 0; + + B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); + frame = skb->data + ring->frameoffset; + memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); +} + static int setup_rx_descbuffer(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, struct b43_dmadesc_meta *meta, gfp_t gfp_flags) { - struct b43_rxhdr_fw4 *rxhdr; dma_addr_t dmaaddr; struct sk_buff *skb; @@ -568,6 +589,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; + b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { /* ugh. try to realloc in zone_dma */ @@ -578,6 +600,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; + b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { @@ -592,9 +615,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); - rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); - rxhdr->frame_len = 0; - return 0; } @@ -1483,12 +1503,17 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { - /* recycle the descriptor buffer. */ - sync_descbuffer_for_device(ring, meta->dmaaddr, - ring->rx_buffersize); - goto drop; + dmaaddr = meta->dmaaddr; + goto drop_recycle_buffer; } } + if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { + /* Something went wrong with the DMA. + * The device did not touch the buffer and did not overwrite the poison. */ + b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); + dmaaddr = meta->dmaaddr; + goto drop_recycle_buffer; + } if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. @@ -1501,6 +1526,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ + b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); @@ -1519,8 +1545,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); - sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); - goto drop; + goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); @@ -1530,6 +1555,11 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) b43_rx(ring->dev, skb, rxhdr); drop: return; + +drop_recycle_buffer: + /* Poison and recycle the RX buffer. */ + b43_poison_rx_buffer(ring, skb); + sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } void b43_dma_rx(struct b43_dmaring *ring) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 4896e08..79b685e 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -3974,6 +3974,11 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev, phy->next_txpwr_check_time = jiffies; /* PHY TX errors counter. */ atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); + +#if B43_DEBUG + phy->phy_locked = 0; + phy->radio_locked = 0; +#endif } static void setup_struct_wldev_for_init(struct b43_wldev *dev) diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 026b61c..e176b6e 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -131,12 +131,16 @@ void b43_radio_lock(struct b43_wldev *dev) { u32 macctl; +#if B43_DEBUG + B43_WARN_ON(dev->phy.radio_locked); + dev->phy.radio_locked = 1; +#endif + macctl = b43_read32(dev, B43_MMIO_MACCTL); - B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK); macctl |= B43_MACCTL_RADIOLOCK; b43_write32(dev, B43_MMIO_MACCTL, macctl); - /* Commit the write and wait for the device - * to exit any radio register access. */ + /* Commit the write and wait for the firmware + * to finish any radio register access. */ b43_read32(dev, B43_MMIO_MACCTL); udelay(10); } @@ -145,11 +149,15 @@ void b43_radio_unlock(struct b43_wldev *dev) { u32 macctl; +#if B43_DEBUG + B43_WARN_ON(!dev->phy.radio_locked); + dev->phy.radio_locked = 0; +#endif + /* Commit any write */ b43_read16(dev, B43_MMIO_PHY_VER); /* unlock */ macctl = b43_read32(dev, B43_MMIO_MACCTL); - B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK)); macctl &= ~B43_MACCTL_RADIOLOCK; b43_write32(dev, B43_MMIO_MACCTL, macctl); } diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h index c9f5430..b2d9910 100644 --- a/drivers/net/wireless/b43/phy_common.h +++ b/drivers/net/wireless/b43/phy_common.h @@ -245,8 +245,10 @@ struct b43_phy { atomic_t txerr_cnt; #ifdef CONFIG_B43_DEBUG - /* PHY registers locked by b43_phy_lock()? */ + /* PHY registers locked (w.r.t. firmware) */ bool phy_locked; + /* Radio registers locked (w.r.t. firmware) */ + bool radio_locked; #endif /* B43_DEBUG */ }; diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 2399328..527525c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -1192,7 +1192,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv) return -ENOMEM; } } else - iwl_rx_queue_reset(priv, rxq); + iwl3945_rx_queue_reset(priv, rxq); iwl3945_rx_replenish(priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h index ab7aaf6..5518884 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h @@ -215,6 +215,7 @@ extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm); extern int iwl3945_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, int count, u32 id); extern void iwl3945_rx_replenish(void *data); +extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq); extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index e5ca251..9452461 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -46,7 +46,7 @@ #include "iwl-6000-hw.h" /* Highest firmware API version supported */ -#define IWL5000_UCODE_API_MAX 1 +#define IWL5000_UCODE_API_MAX 2 #define IWL5150_UCODE_API_MAX 2 /* Lowest firmware API version supported */ diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index edfa5e1..bd438d8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -101,8 +101,8 @@ struct iwl_cfg iwl6000_2agn_cfg = { .eeprom_ver = EEPROM_5000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, .mod_params = &iwl50_mod_params, - .valid_tx_ant = ANT_BC, - .valid_rx_ant = ANT_BC, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, .need_pll_cfg = false, }; @@ -117,8 +117,8 @@ struct iwl_cfg iwl6050_2agn_cfg = { .eeprom_ver = EEPROM_5000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, .mod_params = &iwl50_mod_params, - .valid_tx_ant = ANT_BC, - .valid_rx_ant = ANT_BC, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, .need_pll_cfg = false, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 3889158..f46ba24 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -669,13 +669,6 @@ static int iwl_set_mode(struct iwl_priv *priv, int mode) if (!iwl_is_ready_rf(priv)) return -EAGAIN; - cancel_delayed_work(&priv->scan_check); - if (iwl_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); - IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); - return -EAGAIN; - } - iwl_commit_rxon(priv); return 0; @@ -976,11 +969,9 @@ void iwl_rx_handle(struct iwl_priv *priv) rxq->queue[i] = NULL; - dma_sync_single_range_for_cpu( - &priv->pci_dev->dev, rxb->real_dma_addr, - rxb->aligned_dma_addr - rxb->real_dma_addr, - priv->hw_params.rx_buf_size, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, + priv->hw_params.rx_buf_size + 256, + PCI_DMA_FROMDEVICE); pkt = (struct iwl_rx_packet *)rxb->skb->data; /* Reclaim a command buffer only if this packet is a response @@ -1031,9 +1022,6 @@ void iwl_rx_handle(struct iwl_priv *priv) rxb->skb = NULL; } - pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, - priv->hw_params.rx_buf_size + 256, - PCI_DMA_FROMDEVICE); spin_lock_irqsave(&rxq->lock, flags); list_add_tail(&rxb->list, &priv->rxq.rx_used); spin_unlock_irqrestore(&rxq->lock, flags); @@ -3641,7 +3629,9 @@ static struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x0085, 0x1112, iwl6000_2ag_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1122, iwl6000_2ag_cfg)}, {IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, PCI_ANY_ID, iwl6000_2agn_cfg)}, {IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, PCI_ANY_ID, iwl6000_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, PCI_ANY_ID, iwl6000_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, PCI_ANY_ID, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)}, diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 2f12424..6e98314 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -223,7 +223,7 @@ #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) /* EEPROM GP */ -#define CSR_EEPROM_GP_VALID_MSK (0x00000006) +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) #define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000) #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index ec9a138..cf7f0db 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -360,12 +360,16 @@ struct iwl_host_cmd { /** * struct iwl_rx_queue - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @dma_addr: bus address of buffer of receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free * @rx_free: list of free SKBs for use * @rx_used: List of Rx buffers with no SKB * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 23644cf..6330b91 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -227,9 +227,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, /* The HW is no longer scanning */ clear_bit(STATUS_SCAN_HW, &priv->status); - /* The scan completion notification came in, so kill that timer... */ - cancel_delayed_work(&priv->scan_check); - IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n", (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? "2.4" : "5.2", @@ -712,6 +709,8 @@ static void iwl_bg_request_scan(struct work_struct *data) mutex_lock(&priv->mutex); + cancel_delayed_work(&priv->scan_check); + if (!iwl_is_ready(priv)) { IWL_WARN(priv, "request scan called when driver not ready.\n"); goto done; @@ -925,11 +924,13 @@ void iwl_bg_scan_completed(struct work_struct *work) IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; + cancel_delayed_work(&priv->scan_check); ieee80211_scan_completed(priv->hw, false); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + /* Since setting the TXPOWER may have been deferred while * performing the scan, fire one off */ mutex_lock(&priv->mutex); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 5798fe4..44ab03a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -719,6 +719,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, { unsigned long flags; int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == priv->hw_params.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; @@ -738,6 +746,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); + priv->stations[sta_id].sta.key.key_flags = key_flags; + + /* This copy is acutally not needed: we get the key with each TX */ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); @@ -754,9 +765,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv, { u8 sta_id = IWL_INVALID_STATION; unsigned long flags; - __le16 key_flags = 0; int i; - DECLARE_MAC_BUF(mac); sta_id = iwl_find_station(priv, addr); if (sta_id == IWL_INVALID_STATION) { @@ -771,16 +780,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv, return; } - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == priv->hw_params.bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; for (i = 0; i < 5; i++) diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 1f117a4..71d5b8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -799,6 +799,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); + + iwl_update_tx_stats(priv, le16_to_cpu(fc), len); + /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together @@ -819,21 +835,30 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) else len_org = 0; + /* Tell NIC about any 2-byte padding after MAC header */ + if (len_org) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ txcmd_phys = pci_map_single(priv->pci_dev, - out_cmd, sizeof(struct iwl_cmd), + &out_cmd->hdr, len, PCI_DMA_BIDIRECTIONAL); pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); - pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); + pci_unmap_len_set(&out_cmd->meta, len, len); /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ - txcmd_phys += offsetof(struct iwl_cmd, hdr); priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, txcmd_phys, len, 1, 0); - if (info->control.hw_key) - iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + if (qc) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ @@ -846,41 +871,29 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 0, 0); } - /* Tell NIC about any 2-byte padding after MAC header */ - if (len_org) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - - /* Total # bytes to be transmitted */ - len = (u16)skb->len; - tx_cmd->len = cpu_to_le16(len); - /* TODO need this for burst mode later on */ - iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); - - /* set is_hcca to 0; it probably will never be implemented */ - iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); - - iwl_update_tx_stats(priv, le16_to_cpu(fc), len); - scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); + offsetof(struct iwl_tx_cmd, scratch); + + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, + len, PCI_DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - if (qc) - priv->stations[sta_id].tid[tid].seq_number = seq_number; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - + IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ - priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, + len, PCI_DMA_BIDIRECTIONAL); /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); @@ -968,18 +981,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) INDEX_TO_SEQ(q->write_ptr)); if (out_cmd->meta.flags & CMD_SIZE_HUGE) out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; - len = (idx == TFD_CMD_SLOTS) ? - IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); - - phys_addr = pci_map_single(priv->pci_dev, out_cmd, - len, PCI_DMA_BIDIRECTIONAL); - pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); - pci_unmap_len_set(&out_cmd->meta, len, len); - phys_addr += offsetof(struct iwl_cmd, hdr); + len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta); + len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0; - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, fix_size, 1, - U32_PAD(cmd->len)); #ifdef CONFIG_IWLWIFI_DEBUG switch (out_cmd->hdr.cmd) { @@ -1007,6 +1011,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) /* Set up entry in queue's byte count circular buffer */ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, + fix_size, PCI_DMA_BIDIRECTIONAL); + pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); + pci_unmap_len_set(&out_cmd->meta, len, fix_size); + + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, fix_size, 1, + U32_PAD(cmd->len)); + /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); ret = iwl_txq_update_write_ptr(priv, txq); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index ce72928..ff4d0e4 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -782,13 +782,6 @@ static int iwl3945_set_mode(struct iwl_priv *priv, int mode) if (!iwl_is_ready_rf(priv)) return -EAGAIN; - cancel_delayed_work(&priv->scan_check); - if (iwl_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); - IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); - return -EAGAIN; - } - iwl3945_commit_rxon(priv); return 0; @@ -972,7 +965,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) dma_addr_t phys_addr; dma_addr_t txcmd_phys; int txq_id = skb_get_queue_mapping(skb); - u16 len, idx, len_org, hdr_len; + u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */ u8 id; u8 unicast; u8 sta_id; @@ -1074,6 +1067,40 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Copy MAC header from skb into command buffer */ memcpy(tx->hdr, hdr, hdr_len); + + if (info->control.hw_key) + iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx->len = cpu_to_le16(len); + + + tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + if (qc) + priv->stations_39[sta_id].tid[tid].seq_number = seq_number; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, + ieee80211_hdrlen(fc)); + /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together @@ -1096,22 +1123,18 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ - txcmd_phys = pci_map_single(priv->pci_dev, - out_cmd, sizeof(struct iwl_cmd), - PCI_DMA_TODEVICE); + txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, + len, PCI_DMA_TODEVICE); + /* we do not map meta data ... so we can safely access address to + * provide to unmap command*/ pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); - pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); - /* Add buffer containing Tx command and MAC(!) header to TFD's - * first entry */ - txcmd_phys += offsetof(struct iwl_cmd, hdr); + pci_unmap_len_set(&out_cmd->meta, len, len); /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, txcmd_phys, len, 1, 0); - if (info->control.hw_key) - iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ @@ -1124,32 +1147,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 0, U32_PAD(len)); } - /* Total # bytes to be transmitted */ - len = (u16)skb->len; - tx->len = cpu_to_le16(len); - - /* TODO need this for burst mode later on */ - iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); - - /* set is_hcca to 0; it probably will never be implemented */ - iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); - - tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; - tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; - - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - if (qc) - priv->stations_39[sta_id].tid[tid].seq_number = seq_number; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); - - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, - ieee80211_hdrlen(fc)); /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); @@ -1661,6 +1658,36 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv) spin_unlock_irqrestore(&rxq->lock, flags); } +void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].real_dma_addr, + priv->hw_params.rx_buf_size, + PCI_DMA_FROMDEVICE); + priv->alloc_rxb_skb--; + dev_kfree_skb(rxq->pool[i].skb); + rxq->pool[i].skb = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + /* * this should be called while priv->lock is locked */ @@ -1685,6 +1712,33 @@ void iwl3945_rx_replenish(void *data) spin_unlock_irqrestore(&priv->lock, flags); } +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].real_dma_addr, + priv->hw_params.rx_buf_size, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + } + } + + pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + + /* Convert linear signal-to-noise ratio into dB */ static u8 ratio2dB[100] = { /* 0 1 2 3 4 5 6 7 8 9 */ @@ -1802,9 +1856,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) rxq->queue[i] = NULL; - pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr, - priv->hw_params.rx_buf_size, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, + priv->hw_params.rx_buf_size, + PCI_DMA_FROMDEVICE); pkt = (struct iwl_rx_packet *)rxb->skb->data; /* Reclaim a command buffer only if this packet is a response @@ -1852,9 +1906,6 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) rxb->skb = NULL; } - pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, - priv->hw_params.rx_buf_size, - PCI_DMA_FROMDEVICE); spin_lock_irqsave(&rxq->lock, flags); list_add_tail(&rxb->list, &priv->rxq.rx_used); spin_unlock_irqrestore(&rxq->lock, flags); @@ -3240,6 +3291,8 @@ static void iwl3945_bg_request_scan(struct work_struct *data) mutex_lock(&priv->mutex); + cancel_delayed_work(&priv->scan_check); + if (!iwl_is_ready(priv)) { IWL_WARN(priv, "request scan called when driver not ready.\n"); goto done; @@ -4075,7 +4128,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (!static_key) { sta_id = iwl3945_hw_find_station(priv, addr); if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_MAC80211(priv, "leave - %pMnot in station map.\n", + IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", addr); return -EINVAL; } @@ -4913,6 +4966,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) hw->wiphy->custom_regulatory = true; + hw->wiphy->max_scan_ssids = 1; /* WILL FIX */ + /* Default value; 4 EDCA QOS priorities */ hw->queues = 4; @@ -5194,12 +5249,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); iwl_rfkill_unregister(priv); - cancel_delayed_work(&priv->rfkill_poll); + cancel_delayed_work_sync(&priv->rfkill_poll); iwl3945_dealloc_ucode_pci(priv); if (priv->rxq.bd) - iwl_rx_queue_free(priv, &priv->rxq); + iwl3945_rx_queue_free(priv, &priv->rxq); iwl3945_hw_txq_ctx_free(priv); iwl3945_unset_hw_params(priv); diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 63d7e19..8e66977 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -170,6 +170,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) lbs_deb_rx("rx err: frame received with bad length\n"); dev->stats.rx_length_errors++; ret = 0; + dev_kfree_skb(skb); goto done; } @@ -181,6 +182,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) lbs_pr_alert("rxpd not ok\n"); dev->stats.rx_errors++; ret = 0; + dev_kfree_skb(skb); goto done; } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 57a0268..a9a9704 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -893,8 +893,7 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) rx_desc->next_rx_desc_phys_addr = cpu_to_le32(rxq->rx_desc_dma + nexti * sizeof(*rx_desc)); - rx_desc->rx_ctrl = - cpu_to_le32(MWL8K_RX_CTRL_OWNED_BY_HOST); + rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST; } return 0; @@ -3720,12 +3719,12 @@ err_free_reg: return rc; } -static void __devexit mwl8k_remove(struct pci_dev *pdev) +static void __devexit mwl8k_shutdown(struct pci_dev *pdev) { printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__); } -static void __devexit mwl8k_shutdown(struct pci_dev *pdev) +static void __devexit mwl8k_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct mwl8k_priv *priv; diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 081428d..632fac8 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c @@ -372,15 +372,13 @@ int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx, } /* Wait upto 100ms for tx queue to empty */ - k = 100; - do { - k--; + for (k = 100; k > 0; k--) { udelay(1000); ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY, &xmitting); - if (ret) + if (ret || !xmitting) break; - } while ((k > 0) && xmitting); + } if (k == 0) ret = -ETIMEDOUT; diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h index 2dda5fe..ecf8b6e 100644 --- a/drivers/net/wireless/p54/p54.h +++ b/drivers/net/wireless/p54/p54.h @@ -14,9 +14,9 @@ * published by the Free Software Foundation. */ -#ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS #include <linux/leds.h> -#endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ enum p54_control_frame_types { P54_CONTROL_TYPE_SETUP = 0, @@ -116,7 +116,7 @@ enum fw_state { FW_STATE_RESETTING, }; -#ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS #define P54_LED_MAX_NAME_LEN 31 @@ -129,7 +129,7 @@ struct p54_led_dev { unsigned int registered; }; -#endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ struct p54_common { struct ieee80211_hw *hw; @@ -177,10 +177,10 @@ struct p54_common { u8 privacy_caps; u8 rx_keycache_size; /* LED management */ - #ifdef CONFIG_MAC80211_LEDS +#ifdef CONFIG_P54_LEDS struct p54_led_dev assoc_led; struct p54_led_dev tx_led; - #endif /* CONFIG_MAC80211_LEDS */ +#endif /* CONFIG_P54_LEDS */ u16 softled_state; /* bit field of glowing LEDs */ }; diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c index 0c1b057..c8f0232 100644 --- a/drivers/net/wireless/p54/p54common.c +++ b/drivers/net/wireless/p54/p54common.c @@ -2543,8 +2543,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len) priv->basic_rate_mask = 0x15f; skb_queue_head_init(&priv->tx_queue); dev->flags = IEEE80211_HW_RX_INCLUDES_FCS | - IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index e3569a0..b1610ea 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -492,8 +492,8 @@ static int __devinit p54p_probe(struct pci_dev *pdev, goto err_disable_dev; } - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || - pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 2b222aa..d1fe577 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -457,9 +457,10 @@ static int p54spi_wq_tx(struct p54s_priv *priv) struct ieee80211_tx_info *info; struct p54_tx_info *minfo; struct p54s_tx_info *dinfo; + unsigned long flags; int ret = 0; - spin_lock_bh(&priv->tx_lock); + spin_lock_irqsave(&priv->tx_lock, flags); while (!list_empty(&priv->tx_pending)) { entry = list_entry(priv->tx_pending.next, @@ -467,7 +468,7 @@ static int p54spi_wq_tx(struct p54s_priv *priv) list_del_init(&entry->tx_list); - spin_unlock_bh(&priv->tx_lock); + spin_unlock_irqrestore(&priv->tx_lock, flags); dinfo = container_of((void *) entry, struct p54s_tx_info, tx_list); @@ -479,16 +480,14 @@ static int p54spi_wq_tx(struct p54s_priv *priv) ret = p54spi_tx_frame(priv, skb); - spin_lock_bh(&priv->tx_lock); - if (ret < 0) { p54_free_skb(priv->hw, skb); - goto out; + return ret; } - } -out: - spin_unlock_bh(&priv->tx_lock); + spin_lock_irqsave(&priv->tx_lock, flags); + } + spin_unlock_irqrestore(&priv->tx_lock, flags); return ret; } @@ -498,12 +497,13 @@ static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb) struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data; struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data; + unsigned long flags; BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data))); - spin_lock_bh(&priv->tx_lock); + spin_lock_irqsave(&priv->tx_lock, flags); list_add_tail(&di->tx_list, &priv->tx_pending); - spin_unlock_bh(&priv->tx_lock); + spin_unlock_irqrestore(&priv->tx_lock, flags); queue_work(priv->hw->workqueue, &priv->work); } @@ -604,6 +604,7 @@ out: static void p54spi_op_stop(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; + unsigned long flags; if (mutex_lock_interruptible(&priv->mutex)) { /* FIXME: how to handle this error? */ @@ -615,9 +616,9 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) cancel_work_sync(&priv->work); p54spi_power_off(priv); - spin_lock_bh(&priv->tx_lock); + spin_lock_irqsave(&priv->tx_lock, flags); INIT_LIST_HEAD(&priv->tx_pending); - spin_unlock_bh(&priv->tx_lock); + spin_unlock_irqrestore(&priv->tx_lock, flags); priv->fw_state = FW_STATE_OFF; mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index da6640a..6cc6cbc 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -71,6 +71,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x1260, 0xee22)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */ {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ + {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index db91db7..bebf735 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2558,6 +2558,11 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf) mutex_init(&priv->command_lock); spin_lock_init(&priv->stats_lock); + /* because rndis_command() sleeps we need to use workqueue */ + priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + INIT_WORK(&priv->work, rndis_wext_worker); + INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); + /* try bind rndis_host */ retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); if (retval < 0) @@ -2603,16 +2608,17 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf) disassociate(usbdev, 1); netif_carrier_off(usbdev->net); - /* because rndis_command() sleeps we need to use workqueue */ - priv->workqueue = create_singlethread_workqueue("rndis_wlan"); - INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); queue_delayed_work(priv->workqueue, &priv->stats_work, round_jiffies_relative(STATS_UPDATE_JIFFIES)); - INIT_WORK(&priv->work, rndis_wext_worker); return 0; fail: + cancel_delayed_work_sync(&priv->stats_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + kfree(priv); return retval; } diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index 07d378e..7b3ee8c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -138,7 +138,7 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, if (cipher == CIPHER_TKIP_NO_MIC) cipher = CIPHER_TKIP; - if (cipher == CIPHER_NONE || cipher > CIPHER_MAX) + if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX) return; /* Remove CIPHER_NONE index */ diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 05f94e2..5752aaa 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -646,10 +646,8 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) * Register HW. */ status = ieee80211_register_hw(rt2x00dev->hw); - if (status) { - rt2x00lib_remove_hw(rt2x00dev); + if (status) return status; - } set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 43fa0f8..9730b4f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -369,8 +369,6 @@ int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) if (retval) return retval; - rt2x00pci_free_reg(rt2x00dev); - pci_save_state(pci_dev); pci_disable_device(pci_dev); return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); @@ -381,7 +379,6 @@ int rt2x00pci_resume(struct pci_dev *pci_dev) { struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); struct rt2x00_dev *rt2x00dev = hw->priv; - int retval; if (pci_set_power_state(pci_dev, PCI_D0) || pci_enable_device(pci_dev) || @@ -390,20 +387,7 @@ int rt2x00pci_resume(struct pci_dev *pci_dev) return -EIO; } - retval = rt2x00pci_alloc_reg(rt2x00dev); - if (retval) - return retval; - - retval = rt2x00lib_resume(rt2x00dev); - if (retval) - goto exit_free_reg; - - return 0; - -exit_free_reg: - rt2x00pci_free_reg(rt2x00dev); - - return retval; + return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00pci_resume); #endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 7d50ca8..5015448 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -702,8 +702,6 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) if (retval) return retval; - rt2x00usb_free_reg(rt2x00dev); - /* * Decrease usbdev refcount. */ @@ -717,24 +715,10 @@ int rt2x00usb_resume(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; - int retval; usb_get_dev(interface_to_usbdev(usb_intf)); - retval = rt2x00usb_alloc_reg(rt2x00dev); - if (retval) - return retval; - - retval = rt2x00lib_resume(rt2x00dev); - if (retval) - goto exit_free_reg; - - return 0; - -exit_free_reg: - rt2x00usb_free_reg(rt2x00dev); - - return retval; + return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00usb_resume); #endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 420fff4..853b2b2 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2369,6 +2369,8 @@ static struct usb_device_id rt73usb_device_table[] = { /* Buffalo */ { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, /* CNet */ { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) }, diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h index 9718f61..edeff82 100644 --- a/drivers/net/wireless/rtl818x/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187.h @@ -120,6 +120,12 @@ struct rtl8187_priv { __le64 buf; struct sk_buff_head queue; } b_tx_status; /* This queue is used by both -b and non-b devices */ + struct mutex io_mutex; + union { + u8 bits8; + __le16 bits16; + __le32 bits32; + } *io_dmabuf; }; void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); @@ -129,10 +135,14 @@ static inline u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv, { u8 val; + mutex_lock(&priv->io_mutex); usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits8; + mutex_unlock(&priv->io_mutex); return val; } @@ -147,10 +157,14 @@ static inline u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv, { __le16 val; + mutex_lock(&priv->io_mutex); usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits16; + mutex_unlock(&priv->io_mutex); return le16_to_cpu(val); } @@ -165,10 +179,14 @@ static inline u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv, { __le32 val; + mutex_lock(&priv->io_mutex); usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), RTL8187_REQ_GET_REG, RTL8187_REQT_READ, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits32; + mutex_unlock(&priv->io_mutex); return le32_to_cpu(val); } @@ -181,10 +199,15 @@ static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr) static inline void rtl818x_iowrite8_idx(struct rtl8187_priv *priv, u8 *addr, u8 val, u8 idx) { + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits8 = val; usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - (unsigned long)addr, idx & 0x03, &val, - sizeof(val), HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); } static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val) @@ -195,12 +218,15 @@ static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val) static inline void rtl818x_iowrite16_idx(struct rtl8187_priv *priv, __le16 *addr, u16 val, u8 idx) { - __le16 buf = cpu_to_le16(val); + mutex_lock(&priv->io_mutex); + priv->io_dmabuf->bits16 = cpu_to_le16(val); usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - (unsigned long)addr, idx & 0x03, &buf, sizeof(buf), - HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); } static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr, @@ -212,12 +238,15 @@ static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr, static inline void rtl818x_iowrite32_idx(struct rtl8187_priv *priv, __le32 *addr, u32 val, u8 idx) { - __le32 buf = cpu_to_le32(val); + mutex_lock(&priv->io_mutex); + priv->io_dmabuf->bits32 = cpu_to_le32(val); usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - (unsigned long)addr, idx & 0x03, &buf, sizeof(buf), - HZ / 2); + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); } static inline void rtl818x_iowrite32(struct rtl8187_priv *priv, __le32 *addr, diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index fd81884..d51ba0a 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -71,6 +71,8 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187}, /* AirLive */ {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187}, + /* Linksys */ + {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B}, {} }; @@ -1329,6 +1331,14 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, priv = dev->priv; priv->is_rtl8187b = (id->driver_info == DEVICE_RTL8187B); + /* allocate "DMA aware" buffer for register accesses */ + priv->io_dmabuf = kmalloc(sizeof(*priv->io_dmabuf), GFP_KERNEL); + if (!priv->io_dmabuf) { + err = -ENOMEM; + goto err_free_dev; + } + mutex_init(&priv->io_mutex); + SET_IEEE80211_DEV(dev, &intf->dev); usb_set_intfdata(intf, dev); priv->udev = udev; @@ -1495,7 +1505,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, err = ieee80211_register_hw(dev); if (err) { printk(KERN_ERR "rtl8187: Cannot register device\n"); - goto err_free_dev; + goto err_free_dmabuf; } mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); @@ -1506,6 +1516,8 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, return 0; + err_free_dmabuf: + kfree(priv->io_dmabuf); err_free_dev: ieee80211_free_hw(dev); usb_set_intfdata(intf, NULL); @@ -1526,6 +1538,7 @@ static void __devexit rtl8187_disconnect(struct usb_interface *intf) priv = dev->priv; usb_reset_device(priv->udev); usb_put_dev(interface_to_usbdev(intf)); + kfree(priv->io_dmabuf); ieee80211_free_hw(dev); } diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index 78df281..a098193 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -88,9 +88,15 @@ static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); udelay(10); + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits16 = data; usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, - addr, 0x8225, &data, sizeof(data), HZ / 2); + addr, 0x8225, &priv->io_dmabuf->bits16, sizeof(data), + HZ / 2); + + mutex_unlock(&priv->io_mutex); rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); udelay(10); diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index a12a721..5a4ad15 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c @@ -108,6 +108,18 @@ static int xtsonic_close(struct net_device *dev) return err; } +static const struct net_device_ops xtsonic_netdev_ops = { + .ndo_open = xtsonic_open, + .ndo_stop = xtsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_get_stats = sonic_get_stats, + .ndo_set_multicast_list = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init sonic_probe1(struct net_device *dev) { static unsigned version_printed = 0; @@ -205,12 +217,7 @@ static int __init sonic_probe1(struct net_device *dev) lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); - dev->open = xtsonic_open; - dev->stop = xtsonic_close; - dev->hard_start_xmit = sonic_send_packet; - dev->get_stats = sonic_get_stats; - dev->set_multicast_list = &sonic_multicast_list; - dev->tx_timeout = sonic_tx_timeout; + dev->netdev_ops = &xtsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* |