diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
23 files changed, 1418 insertions, 641 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 52c9603..2fa5b86 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -130,7 +130,7 @@ config BNX2X_SRIOV config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA + depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX select PHYLIB ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index b1bcd4b..8ac48fb 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, - GFP_KERNEL | __GFP_ZERO); + p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_freeirq_tx; @@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev) /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, - GFP_KERNEL | __GFP_ZERO); + p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { ret = -ENOMEM; goto out_free_rx_ring; @@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev) if (!bcm_enet_shared_base[0]) return -ENODEV; - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); - if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) + if (!res_irq || !res_irq_rx || !res_irq_tx) return -ENODEV; ret = 0; @@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev) if (ret) goto out; - priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); - if (priv->base == NULL) { - ret = -ENOMEM; + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); goto out; } @@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->rx_ring_size = BCMENET_DEF_RX_DESC; priv->tx_ring_size = BCMENET_DEF_TX_DESC; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); priv->has_phy = pd->has_phy; @@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev) } else { struct bcm63xx_enet_platform_data *pd; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd && pd->mii_config) pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii); @@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) priv->tx_ring_size = BCMENET_DEF_TX_DESC; priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; - pd = pdev->dev.platform_data; + pd = dev_get_platdata(&pdev->dev); if (pd) { memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); memcpy(priv->used_ports, pd->used_ports, @@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 6a2de1d..e838a3f 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1,6 +1,6 @@ /* bnx2.c: Broadcom NX2 network driver. * - * Copyright (c) 2004-2011 Broadcom Corporation + * Copyright (c) 2004-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -58,8 +58,8 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.3" -#define DRV_MODULE_RELDATE "June 27, 2012" +#define DRV_MODULE_VERSION "2.2.4" +#define DRV_MODULE_RELDATE "Aug 05, 2013" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" @@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp) bp->status_stats_size = status_blk_size + sizeof(struct statistics_block); - status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, - &bp->status_blk_mapping, - GFP_KERNEL | __GFP_ZERO); + status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); if (status_blk == NULL) goto alloc_mem_err; @@ -3908,136 +3907,121 @@ init_cpu_err: return rc; } -static int -bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) +static void +bnx2_setup_wol(struct bnx2 *bp) { - u16 pmcsr; + int i; + u32 val, wol_msg; - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + if (bp->wol) { + u32 advertising; + u8 autoneg; - switch (state) { - case PCI_D0: { - u32 val; + autoneg = bp->autoneg; + advertising = bp->advertising; - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | - PCI_PM_CTRL_PME_STATUS); + if (bp->phy_port == PORT_TP) { + bp->autoneg = AUTONEG_SPEED; + bp->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + } - if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ - msleep(20); + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); - val = BNX2_RD(bp, BNX2_EMAC_MODE); - val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; - val &= ~BNX2_EMAC_MODE_MPKT; - BNX2_WR(bp, BNX2_EMAC_MODE, val); + bp->autoneg = autoneg; + bp->advertising = advertising; - val = BNX2_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - BNX2_WR(bp, BNX2_RPM_CONFIG, val); - break; - } - case PCI_D3hot: { - int i; - u32 val, wol_msg; - - if (bp->wol) { - u32 advertising; - u8 autoneg; - - autoneg = bp->autoneg; - advertising = bp->advertising; - - if (bp->phy_port == PORT_TP) { - bp->autoneg = AUTONEG_SPEED; - bp->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_Autoneg; - } + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - spin_lock_bh(&bp->phy_lock); - bnx2_setup_phy(bp, bp->phy_port); - spin_unlock_bh(&bp->phy_lock); + val = BNX2_RD(bp, BNX2_EMAC_MODE); - bp->autoneg = autoneg; - bp->advertising = advertising; + /* Enable port mode. */ + val &= ~BNX2_EMAC_MODE_PORT; + val |= BNX2_EMAC_MODE_MPKT_RCVD | + BNX2_EMAC_MODE_ACPI_RCVD | + BNX2_EMAC_MODE_MPKT; + if (bp->phy_port == PORT_TP) { + val |= BNX2_EMAC_MODE_PORT_MII; + } else { + val |= BNX2_EMAC_MODE_PORT_GMII; + if (bp->line_speed == SPEED_2500) + val |= BNX2_EMAC_MODE_25G_MODE; + } - bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + BNX2_WR(bp, BNX2_EMAC_MODE, val); - val = BNX2_RD(bp, BNX2_EMAC_MODE); + /* receive all multicast */ + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE); - /* Enable port mode. */ - val &= ~BNX2_EMAC_MODE_PORT; - val |= BNX2_EMAC_MODE_MPKT_RCVD | - BNX2_EMAC_MODE_ACPI_RCVD | - BNX2_EMAC_MODE_MPKT; - if (bp->phy_port == PORT_TP) - val |= BNX2_EMAC_MODE_PORT_MII; - else { - val |= BNX2_EMAC_MODE_PORT_GMII; - if (bp->line_speed == SPEED_2500) - val |= BNX2_EMAC_MODE_25G_MODE; - } + val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN; + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA); - BNX2_WR(bp, BNX2_EMAC_MODE, val); + /* Need to enable EMAC and RPM for WOL. */ + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); - /* receive all multicast */ - for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); - } - BNX2_WR(bp, BNX2_EMAC_RX_MODE, - BNX2_EMAC_RX_MODE_SORT_MODE); + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); - val = 1 | BNX2_RPM_SORT_USER0_BC_EN | - BNX2_RPM_SORT_USER0_MC_EN; - BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); - BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | - BNX2_RPM_SORT_USER0_ENA); + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + } else { + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + } - /* Need to enable EMAC and RPM for WOL. */ - BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); + if (!(bp->flags & BNX2_FLAG_NO_WOL)) + bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); - val = BNX2_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - BNX2_WR(bp, BNX2_RPM_CONFIG, val); +} - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; - } - else { - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; - } +static int +bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) +{ + switch (state) { + case PCI_D0: { + u32 val; + + pci_enable_wake(bp->pdev, PCI_D0, false); + pci_set_power_state(bp->pdev, PCI_D0); - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, - 1, 0); + val = BNX2_RD(bp, BNX2_EMAC_MODE); + val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; + val &= ~BNX2_EMAC_MODE_MPKT; + BNX2_WR(bp, BNX2_EMAC_MODE, val); - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); + break; + } + case PCI_D3hot: { + bnx2_setup_wol(bp); + pci_wake_from_d3(bp->pdev, bp->wol); if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { if (bp->wol) - pmcsr |= 3; - } - else { - pmcsr |= 3; - } - if (bp->wol) { - pmcsr |= PCI_PM_CTRL_PME_ENABLE; + pci_set_power_state(bp->pdev, PCI_D3hot); + } else { + pci_set_power_state(bp->pdev, PCI_D3hot); } - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - pmcsr); /* No more memory access after this point until * device is brought back to D0. */ - udelay(50); break; } default: @@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev) netif_carrier_off(dev); - bnx2_set_power_state(bp, PCI_D0); bnx2_disable_int(bp); rc = bnx2_setup_int_mode(bp, disable_msi); @@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev) bnx2_del_napi(bp); bp->link_up = 0; netif_carrier_off(bp->dev); - bnx2_set_power_state(bp, PCI_D3hot); return 0; } @@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) else { bp->wol = 0; } + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + return 0; } @@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct bnx2 *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) - return -EAGAIN; - /* parameters already validated in ethtool_get_eeprom */ rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); @@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct bnx2 *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) - return -EAGAIN; - /* parameters already validated in ethtool_set_eeprom */ rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); @@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct bnx2 *bp = netdev_priv(dev); - bnx2_set_power_state(bp, PCI_D0); - memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); if (etest->flags & ETH_TEST_FL_OFFLINE) { int i; @@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) etest->flags |= ETH_TEST_FL_FAILED; } - if (!netif_running(bp->dev)) - bnx2_set_power_state(bp, PCI_D3hot); } static void @@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) switch (state) { case ETHTOOL_ID_ACTIVE: - bnx2_set_power_state(bp, PCI_D0); - bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); return 1; /* cycle on/off once per second */ @@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) case ETHTOOL_ID_INACTIVE: BNX2_WR(bp, BNX2_EMAC_LED, 0); BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); - - if (!netif_running(dev)) - bnx2_set_power_state(bp, PCI_D3hot); break; } @@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) goto err_out_release; } - bnx2_set_power_state(bp, PCI_D0); - /* Configure byte swap and enable write to the reg_window registers. * Rely on CPU to do target byte swapping on big endian systems * The chip's target access swapping will not swap all accesses @@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) + if (pdev->msix_cap) bp->flags |= BNX2_FLAG_MSIX_CAP; } if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) + if (pdev->msi_cap) bp->flags |= BNX2_FLAG_MSI_CAP; } @@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->wol = 0; } + if (bp->flags & BNX2_FLAG_NO_WOL) + device_set_wakeup_capable(&bp->pdev->dev, false); + else + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bp->tx_quick_cons_trip_int = bp->tx_quick_cons_trip; @@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev) } static int -bnx2_suspend(struct pci_dev *pdev, pm_message_t state) +bnx2_suspend(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - /* PCI register 4 needs to be saved whether netif_running() or not. - * MSI address and data need to be saved if using MSI and - * netif_running(). - */ - pci_save_state(pdev); - if (!netif_running(dev)) - return 0; - - cancel_work_sync(&bp->reset_task); - bnx2_netif_stop(bp, true); - netif_device_detach(dev); - del_timer_sync(&bp->timer); - bnx2_shutdown_chip(bp); - bnx2_free_skbs(bp); - bnx2_set_power_state(bp, pci_choose_state(pdev, state)); + if (netif_running(dev)) { + cancel_work_sync(&bp->reset_task); + bnx2_netif_stop(bp, true); + netif_device_detach(dev); + del_timer_sync(&bp->timer); + bnx2_shutdown_chip(bp); + __bnx2_free_irq(bp); + bnx2_free_skbs(bp); + } + bnx2_setup_wol(bp); return 0; } static int -bnx2_resume(struct pci_dev *pdev) +bnx2_resume(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - pci_restore_state(pdev); if (!netif_running(dev)) return 0; bnx2_set_power_state(bp, PCI_D0); netif_device_attach(dev); + bnx2_request_irq(bp); bnx2_init_nic(bp, 1); bnx2_netif_start(bp, true); return 0; } +#ifdef CONFIG_PM_SLEEP +static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); +#define BNX2_PM_OPS (&bnx2_pm_ops) + +#else + +#define BNX2_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ /** * bnx2_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - pci_ers_result_t result; - int err; + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + int err = 0; rtnl_lock(); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); - result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); - if (netif_running(dev)) { - bnx2_set_power_state(bp, PCI_D0); - bnx2_init_nic(bp, 1); - } - result = PCI_ERS_RESULT_RECOVERED; + if (netif_running(dev)) + err = bnx2_init_nic(bp, 1); + + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + } + + if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) { + bnx2_napi_enable(bp); + dev_close(dev); } rtnl_unlock(); @@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev) rtnl_unlock(); } +static void bnx2_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + if (netif_running(dev)) + dev_close(bp->dev); + + if (system_state == SYSTEM_POWER_OFF) + bnx2_set_power_state(bp, PCI_D3hot); + + rtnl_unlock(); +} + static const struct pci_error_handlers bnx2_err_handler = { .error_detected = bnx2_io_error_detected, .slot_reset = bnx2_io_slot_reset, @@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = { .id_table = bnx2_pci_tbl, .probe = bnx2_init_one, .remove = bnx2_remove_one, - .suspend = bnx2_suspend, - .resume = bnx2_resume, + .driver.pm = BNX2_PM_OPS, .err_handler = &bnx2_err_handler, + .shutdown = bnx2_shutdown, }; module_pci_driver(bnx2_pci_driver); diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 172efbe..18cb2d2 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -1,6 +1,6 @@ /* bnx2.h: Broadcom NX2 network driver. * - * Copyright (c) 2004-2011 Broadcom Corporation + * Copyright (c) 2004-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 00b88cb..0c33802 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ -#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#define BNX2X_DB_SHIFT 3 /* 8 bytes*/ #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #error "Min DB doorbell stride is 8" #endif -#define DPM_TRIGER_TYPE 0x40 #define DOORBELL(bp, cid, val) \ do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ - DPM_TRIGER_TYPE); \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ } while (0) /* TX CSUM helpers */ @@ -1100,13 +1098,27 @@ struct bnx2x_port { extern struct workqueue_struct *bnx2x_wq; #define BNX2X_MAX_NUM_OF_VFS 64 -#define BNX2X_VF_CID_WND 0 +#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) -#define BNX2X_CLIENTS_PER_VF 1 -#define BNX2X_FIRST_VF_CID 256 + +/* We need to reserve doorbell addresses for all VF and queue combinations */ #define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) + +/* The doorbell is configured to have the same number of CIDs for PFs and for + * VFs. For this reason the PF CID zone is as large as the VF zone. + */ +#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS +#define BNX2X_MAX_NUM_VF_QUEUES 64 #define BNX2X_VF_ID_INVALID 0xFF +/* the number of VF CIDS multiplied by the amount of bytes reserved for each + * cid must not exceed the size of the VF doorbell + */ +#define BNX2X_VF_BAR_SIZE 512 +#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT)) +#error "VF doorbell bar size is 512" +#endif + /* * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * control by the number of fast-path status blocks supported by the @@ -1331,7 +1343,7 @@ enum { BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, - BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, + BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_TX_RESUME, @@ -1650,10 +1662,10 @@ struct bnx2x { dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz; - /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB + /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB * context size we need 8 ILT entries. */ -#define ILT_MAX_L2_LINES 8 +#define ILT_MAX_L2_LINES 32 struct hw_context context[ILT_MAX_L2_LINES]; struct bnx2x_ilt *ilt; @@ -1869,7 +1881,7 @@ extern int num_queues; #define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - +#define FUNC_FLG_LEADING_STATS 0x0040 struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ @@ -2069,9 +2081,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf); -#define BNX2X_ILT_ZALLOC(x, y, size) \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO) +#define BNX2X_ILT_ZALLOC(x, y, size) \ + x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) #define BNX2X_ILT_FREE(x, y, size) \ do { \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 0cc2611..2361bf2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1948,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) } } -static int bnx2x_init_rss_pf(struct bnx2x *bp) +static int bnx2x_init_rss(struct bnx2x *bp) { int i; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); @@ -1972,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); } -int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash) +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable) { struct bnx2x_config_rss_params params = {NULL}; @@ -1988,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - - /* RSS configuration */ - __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v4) - __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v6) - __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + if (enable) { + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) + __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v6) + __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + } else { + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + } /* Hash bits */ params.rss_result_mask = MULTI_MASK; @@ -2007,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (config_hash) { /* RSS keys */ - prandom_bytes(params.rss_key, sizeof(params.rss_key)); + prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4); __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } - return bnx2x_config_rss(bp, ¶ms); + if (IS_PF(bp)) + return bnx2x_config_rss(bp, ¶ms); + else + return bnx2x_vfpf_config_rss(bp, ¶ms); } static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) @@ -2066,7 +2073,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp) rparam.mcast_obj = &bp->mcast_obj; __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); - /* Add a DEL command... */ + /* Add a DEL command... - Since we're doing a driver cleanup only, + * we take a lock surrounding both the initial send and the CONTs, + * as we don't want a true completion to disrupt us in the middle. + */ + netif_addr_lock_bh(bp->dev); rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", @@ -2078,11 +2089,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp) if (rc < 0) { BNX2X_ERR("Failed to clean multi-cast object: %d\n", rc); + netif_addr_unlock_bh(bp->dev); return; } rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); } + netif_addr_unlock_bh(bp->dev); } #ifndef BNX2X_STOP_ON_ERROR @@ -2438,9 +2451,7 @@ int bnx2x_load_cnic(struct bnx2x *bp) } /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* re-read iscsi info */ bnx2x_get_iscsi_info(bp); @@ -2647,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* initialize FW coalescing state machines in RAM */ bnx2x_update_coalesce(bp); + } - /* setup the leading queue */ - rc = bnx2x_setup_leading(bp); - if (rc) { - BNX2X_ERR("Setup leading failed!\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - - /* set up the rest of the queues */ - for_each_nondefault_eth_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - } + /* setup the leading queue */ + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } - /* setup rss */ - rc = bnx2x_init_rss_pf(bp); + /* set up the rest of the queues */ + for_each_nondefault_eth_queue(bp, i) { + if (IS_PF(bp)) + rc = bnx2x_setup_queue(bp, &bp->fp[i], false); + else /* VF */ + rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); if (rc) { - BNX2X_ERR("PF RSS init failed\n"); + BNX2X_ERR("Queue %d setup failed\n", i); LOAD_ERROR_EXIT(bp, load_error3); } + } - } else { /* vf */ - for_each_eth_queue(bp, i) { - rc = bnx2x_vfpf_setup_q(bp, i); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - } + /* setup rss */ + rc = bnx2x_init_rss(bp); + if (rc) { + BNX2X_ERR("PF RSS init failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); } /* Now when Clients are configured we are ready to work */ @@ -2710,9 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start fast path */ /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* Start the Tx */ switch (load_mode) { @@ -4789,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev) void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { + if (!cxt) { + BNX2X_ERR("bad context pointer %p\n", cxt); + return; + } + /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c07a6d0..da8fcaa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -51,8 +51,7 @@ extern int int_mode; #define BNX2X_PCI_ALLOC(x, y, size) \ do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO); \ + x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ if (x == NULL) \ goto alloc_mem_err; \ DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ @@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); * @rss_obj: RSS object to use * @ind_table: indirection table to configure * @config_hash: re-configure RSS hash keys configuration + * @enable: enabled or disabled configuration */ -int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash); +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable); /** * bnx2x__init_func_obj - init function object @@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); * netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev); +void bnx2x_set_rx_mode_inner(struct bnx2x *bp); /** * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. @@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn) static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) { - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); + return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); } /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c5f2251..2612e3c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); } else if ((info->flow_type == UDP_V6_FLOW) && (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); } return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9d64b98..6645684 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params, struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; if (vars->line_speed == SPEED_AUTO_NEG && (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp))) + CHIP_IS_E2(bp))) { bnx2x_set_parallel_detection(phy, params); if (params->phy[INT_PHY].config_init) params->phy[INT_PHY].config_init(phy, params, vars); + } } /* Init external phy*/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 815f2de..634a793 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); - REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) /* enable hw interrupt from doorbell Q */ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); @@ -8063,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) int bnx2x_setup_leading(struct bnx2x *bp) { - return bnx2x_setup_queue(bp, &bp->fp[0], 1); + if (IS_PF(bp)) + return bnx2x_setup_queue(bp, &bp->fp[0], true); + else /* VF */ + return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); } /** @@ -8077,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp) { int rc = 0; - if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) + if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { + BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); return -EINVAL; + } switch (int_mode) { case BNX2X_INT_MODE_MSIX: @@ -9647,11 +9652,9 @@ sp_rtnl_not_reset: } } - if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, - &bp->sp_rtnl_state)) { - DP(BNX2X_MSG_SP, - "sending set storm rx mode vf pf channel message from rtnl sp-task\n"); - bnx2x_vfpf_storm_rx_mode(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); + bnx2x_set_rx_mode_inner(bp); } if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, @@ -11649,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp) * second status block for the L2 queue, and a third status block for * CNIC if supported. */ - if (CNIC_SUPPORT(bp)) + if (IS_VF(bp)) + bp->min_msix_vec_cnt = 1; + else if (CNIC_SUPPORT(bp)) bp->min_msix_vec_cnt = 3; - else + else /* PF w/o cnic */ bp->min_msix_vec_cnt = 2; BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); @@ -11868,34 +11873,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - u32 rx_mode = BNX2X_RX_MODE_NORMAL; if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); return; + } else { + /* Schedule an SP task to handle rest of change */ + DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); } +} + +void bnx2x_set_rx_mode_inner(struct bnx2x *bp) +{ + u32 rx_mode = BNX2X_RX_MODE_NORMAL; DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); - if (dev->flags & IFF_PROMISC) + netif_addr_lock_bh(bp->dev); + + if (bp->dev->flags & IFF_PROMISC) { rx_mode = BNX2X_RX_MODE_PROMISC; - else if ((dev->flags & IFF_ALLMULTI) || - ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && - CHIP_IS_E1(bp))) + } else if ((bp->dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) { rx_mode = BNX2X_RX_MODE_ALLMULTI; - else { + } else { if (IS_PF(bp)) { /* some multicasts */ if (bnx2x_set_mc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_ALLMULTI; + /* release bh lock, as bnx2x_set_uc_list might sleep */ + netif_addr_unlock_bh(bp->dev); if (bnx2x_set_uc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_PROMISC; + netif_addr_lock_bh(bp->dev); } else { /* configuring mcast to a vf involves sleeping (when we - * wait for the pf's response). Since this function is - * called from non sleepable context we must schedule - * a work item for this purpose + * wait for the pf's response). */ smp_mb__before_clear_bit(); set_bit(BNX2X_SP_RTNL_VFPF_MCAST, @@ -11913,22 +11932,20 @@ void bnx2x_set_rx_mode(struct net_device *dev) /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + netif_addr_unlock_bh(bp->dev); return; } if (IS_PF(bp)) { bnx2x_set_storm_rx_mode(bp); + netif_addr_unlock_bh(bp->dev); } else { - /* configuring rx mode to storms in a vf involves sleeping (when - * we wait for the pf's response). Since this function is - * called from non sleepable context we must schedule - * a work item for this purpose + /* VF will need to request the PF to make this change, and so + * the VF needs to release the bottom-half lock prior to the + * request (as it will likely require sleep on the VF side) */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + netif_addr_unlock_bh(bp->dev); + bnx2x_vfpf_storm_rx_mode(bp); } } @@ -12550,19 +12567,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) * @dev: pci device * */ -static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, - int cnic_cnt, bool is_vf) +static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) { - int pos, index; + int index; u16 control = 0; - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - /* * If MSI-X is not supported - return number of SBs needed to support * one fast path queue: one FP queue + SB for CNIC */ - if (!pos) { + if (!pdev->msix_cap) { dev_info(&pdev->dev, "no msix capability found\n"); return 1 + cnic_cnt; } @@ -12575,11 +12589,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, * without the default SB. * For VFs there is no default SB, then we return (index+1). */ - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); index = control & PCI_MSIX_FLAGS_QSIZE; - return is_vf ? index + 1 : index; + return index; } static int set_max_cos_est(int chip_id) @@ -12659,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev, is_vf = set_is_vf(ent->driver_data); cnic_cnt = is_vf ? 0 : 1; - max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); + + /* add another SB for VF as it has no default SB */ + max_non_def_sbs += is_vf ? 1 : 0; /* Maximum number of RSS queues: one IGU SB goes to CNIC */ - rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; + rss_count = max_non_def_sbs - cnic_cnt; if (rss_count < 1) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 8e627b8..5ecf267 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -6335,6 +6335,7 @@ #define PCI_ID_VAL2 0x438 #define PCI_ID_VAL3 0x43c +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C #define GRC_CONFIG_REG_PF_INIT_VF 0x624 #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf /* First VF_NUM for PF is encoded in this register. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8f03c98..9fbeee5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending( } } -static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o) -{ - spin_lock_bh(&o->lock); - - __bnx2x_exe_queue_reset_pending(bp, o); - - spin_unlock_bh(&o->lock); -} - /** * bnx2x_exe_queue_step - execute one execution chunk atomically * @@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicity is ensured using the exe_queue->lock). + * (Should be called while holding the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, memset(&spacer, 0, sizeof(spacer)); - spin_lock_bh(&o->lock); - /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW @@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); __bnx2x_exe_queue_reset_pending(bp, o); } else { - spin_unlock_bh(&o->lock); return 1; } } @@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, } /* Sanity check */ - if (!cur_len) { - spin_unlock_bh(&o->lock); + if (!cur_len) return 0; - } rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) @@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ __bnx2x_exe_queue_reset_pending(bp, o); - spin_unlock_bh(&o->lock); return rc; } @@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) return true; } +/** + * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (o->head_reader) { + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n"); + return -EBUSY; + } + + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n"); + return 0; +} + +/** + * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n", + ramrod_flags); + o->head_exe_request = false; + o->saved_ramrod_flags = 0; + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); + if (rc != 0) { + BNX2X_ERR("execution of pending commands failed with rc %d\n", + rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } +} + +/** + * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run + * + * @bp: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = true; + o->saved_ramrod_flags = ramrod_flags; + DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n", + ramrod_flags); +} + +/** + * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n"); + __bnx2x_vlan_mac_h_exec_pending(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would perform it - + * possibly releasing and reclaiming the execution queue lock. + */ +void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_write_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + +/** + * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n", + o->head_reader); + + return 0; +} + +/** + * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + + spin_lock_bh(&o->exe_queue.lock); + rc = __bnx2x_vlan_mac_h_read_lock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + +/** + * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (!o->head_reader) { + BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } else { + o->head_reader--; + DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n"); + + /* Writer release will do the trick */ + __bnx2x_vlan_mac_h_write_unlock(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_read_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, int n, u8 *base, u8 stride, u8 size) { struct bnx2x_vlan_mac_registry_elem *pos; u8 *next = base; int counter = 0; + int read_lock; + + DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); /* traverse list */ list_for_each_entry(pos, &o->head, link) { @@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, next += stride + size; } } + + if (read_lock == 0) { + DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + } + return counter * ETH_ALEN; } @@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp, return -EBUSY; } +static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = 0; + + spin_lock_bh(&o->exe_queue.lock); + + DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n"); + rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); + + if (rc != 0) { + __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); + + /* Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = 1; + } else { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + } + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + /** * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod * @@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; int rc; + /* Clearing the pending list & raw state should be made + * atomically (as execution flow assumes they represent the same). + */ + spin_lock_bh(&o->exe_queue.lock); + /* Reset pending list */ - bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); /* Clear pending */ r->clear_pending(r); + spin_unlock_bh(&o->exe_queue.lock); + /* If ramrod failed this is most likely a SW bug */ if (cqe->message.error) return -EINVAL; /* Run the next bulk of pending commands if requested */ if (test_bit(RAMROD_CONT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); + if (rc < 0) return rc; } @@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd( * @p: * */ -int bnx2x_config_vlan_mac( - struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p) +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) { int rc = 0; struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; @@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac( /* Execute commands if required */ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac( return rc; /* Make a next step */ - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, - ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, + p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, unsigned long *ramrod_flags) { struct bnx2x_vlan_mac_registry_elem *pos = NULL; - int rc = 0; struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + int read_lock; + int rc = 0; /* Clear pending commands first */ @@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, __clear_bit(RAMROD_EXEC, &p.ramrod_flags); __clear_bit(RAMROD_CONT, &p.ramrod_flags); + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + return read_lock; + list_for_each_entry(pos, &o->head, link) { if (pos->vlan_mac_flags == *vlan_mac_flags) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; @@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, rc = bnx2x_config_vlan_mac(bp, &p); if (rc < 0) { BNX2X_ERR("Failed to add a new DEL command\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); return rc; } } } + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + p.ramrod_flags = *ramrod_flags; __set_bit(RAMROD_CONT, &p.ramrod_flags); @@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, struct bnx2x_credit_pool_obj *vlans_pool) { INIT_LIST_HEAD(&o->head); + o->head_reader = 0; + o->head_exe_request = false; + o->saved_ramrod_flags = 0; o->macs_pool = macs_pool; o->vlans_pool = vlans_pool; @@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, rss_obj->config_rss = bnx2x_setup_rss; } +int validate_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac) +{ + if (!vlan_mac->get_n_elements) { + BNX2X_ERR("vlan mac object was not intialized\n"); + return -EINVAL; + } + return 0; +} + /********************** Queue state object ***********************************/ /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 798dfe9..658f4e3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj { * entries. */ struct list_head head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + u8 head_reader; /* Num. of readers accessing head list */ + bool head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ /* TODO: Add it's initialization in the init functions */ struct bnx2x_exe_queue_obj exe_queue; @@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, struct bnx2x_credit_pool_obj *macs_pool, struct bnx2x_credit_pool_obj *vlans_pool); +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); int bnx2x_config_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p); + struct bnx2x_vlan_mac_ramrod_params *p); int bnx2x_vlan_mac_move(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p, @@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); +int validate_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac); #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e8706e1..b26eb83 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state { BNX2X_VFOP_QTEARDOWN_DONE }; +enum bnx2x_vfop_rss_state { + BNX2X_VFOP_RSS_CONFIG, + BNX2X_VFOP_RSS_DONE +}; + #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); - if (vfq_is_leading(q)) { - __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); - __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); - } - /* Setup-op rx parameters */ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; @@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) BNX2X_Q_LOGICAL_STATE_STOPPED) { DP(BNX2X_MSG_IOV, "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - goto op_done; + + /* next state */ + vfop->state = BNX2X_VFOP_QDTOR_DONE; + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); } /* next state */ @@ -432,8 +436,10 @@ op_err: op_done: case BNX2X_VFOP_QDTOR_DONE: /* invalidate the context */ - qdtor->cxt->ustorm_ag_context.cdu_usage = 0; - qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + if (qdtor->cxt) { + qdtor->cxt->ustorm_ag_context.cdu_usage = 0; + qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + } bnx2x_vfop_end(bp, vf, vfop); return; default: @@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, cmd->block); } - DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); + DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", + vf->abs_vfid, vfop->rc); return -ENOMEM; } @@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) { struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); if (vf) { + /* the first igu entry belonging to VFs of this PF */ + if (!BP_VFDB(bp)->first_vf_igu_entry) + BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; + + /* the first igu entry belonging to this VF */ if (!vf_sb_count(vf)) vf->igu_base_id = igu_sb_id; + ++vf_sb_count(vf); + ++vf->sb_count; } + BP_VFDB(bp)->vf_sbs_pool++; } /* VFOP MAC/VLAN helpers */ @@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp, * and a valid credit counter */ if (!vfop->rc && args->credit) { - int cnt = 0; struct list_head *pos; + int read_lock; + int cnt = 0; + + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); list_for_each(pos, &obj->head) cnt++; + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); + atomic_set(args->credit, cnt); } } @@ -692,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -711,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); /* set extra args */ @@ -731,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -753,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); /* set extra args */ @@ -773,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, int qid, u16 vid, bool add) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -793,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ramrod->user_req.u.vlan.vlan = vid; /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -812,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -831,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -851,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -870,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -980,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) case BNX2X_VFOP_QFLR_CLR_VLAN: /* vlan-clear-all: driver-only, don't consume credit */ vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); + if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) + vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, + true); if (vfop->rc) goto op_err; - return; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); case BNX2X_VFOP_QFLR_CLR_MAC: /* mac-clear-all: driver only consume credit */ vfop->state = BNX2X_VFOP_QFLR_TERMINATE; - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); + if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) + vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, + true); DP(BNX2X_MSG_IOV, "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", vf->abs_vfid, vfop->rc); if (vfop->rc) goto op_err; - return; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); case BNX2X_VFOP_QFLR_TERMINATE: qstate = &vfop->op_p->qctor.qstate; @@ -1291,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + /* for non leading queues skip directly to qdown sate */ if (vfop) { vfop->args.qx.qid = qid; - bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, - bnx2x_vfop_qdown, cmd->done); + bnx2x_vfop_opset(qid == LEADING_IDX ? + BNX2X_VFOP_QTEARDOWN_RXMODE : + BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, + cmd->done); return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, cmd->block); } @@ -1447,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) * both known */ static void -bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) +bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { + struct vf_pf_resc_request *resc = &vf->alloc_resc; u16 vlan_count = 0; /* will be set only during VF-ACQUIRE */ resc->num_rxqs = 0; resc->num_txqs = 0; - /* no credit calculcis for macs (just yet) */ + /* no credit calculations for macs (just yet) */ resc->num_mac_filters = 1; /* divvy up vlan rules */ @@ -1467,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) resc->num_mc_filters = 0; /* num_sbs already set */ + resc->num_sbs = vf->sb_count; } /* FLR routines: */ static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { /* reset the state variables */ - bnx2x_iov_static_resc(bp, &vf->alloc_resc); + bnx2x_iov_static_resc(bp, vf); vf->state = VF_FREE; } @@ -1693,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match * the Pf doorbell size although the 2 are independent. */ - REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, - BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); + REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); /* No security checks for now - * configure single rule (out of 16) mask = 0x1, value = 0x0, @@ -1761,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { int sb_id; u32 val; - u8 fid; + u8 fid, current_pf = 0; /* IGU in normal mode - read CAM */ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { @@ -1769,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) continue; fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); - if (!(fid & IGU_FID_ENCODE_IS_PF)) + if (fid & IGU_FID_ENCODE_IS_PF) + current_pf = fid & IGU_FID_PF_NUM_MASK; + else if (current_pf == BP_ABS_FUNC(bp)) bnx2x_vf_set_igu_info(bp, sb_id, (fid & IGU_FID_VF_NUM_MASK)); - DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : (fid & IGU_FID_VF_NUM_MASK)), sb_id, GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); } + DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); } static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) @@ -1844,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) return 0; } -static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) -{ - int i; - u8 queue_count = 0; - - if (IS_SRIOV(bp)) - for_each_vf(bp, i) - queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); - - return queue_count; -} - /* must be called after PF bars are mapped */ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, - int num_vfs_param) + int num_vfs_param) { - int err, i, qcount; + int err, i; struct bnx2x_sriov *iov; struct pci_dev *dev = bp->pdev; @@ -1958,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ bnx2x_get_vf_igu_cam_info(bp); - /* get the total queue count and allocate the global queue arrays */ - qcount = bnx2x_iov_get_max_queue_count(bp); - /* allocate the queue arrays for all VFs */ - bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), - GFP_KERNEL); + bp->vfdb->vfqs = kzalloc( + BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), + GFP_KERNEL); + + DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); + if (!bp->vfdb->vfqs) { BNX2X_ERR("failed to allocate vf queue array\n"); err = -ENOMEM; @@ -2084,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, q_type); DP(BNX2X_MSG_IOV, - "initialized vf %d's queue object. func id set to %d\n", - vf->abs_vfid, q->sp_obj.func_id); - - /* mac/vlan objects are per queue, but only those - * that belong to the leading queue are initialized - */ - if (vfq_is_leading(q)) { - /* mac */ - bnx2x_init_mac_obj(bp, &q->mac_obj, - cl_id, q->cid, func_id, - bnx2x_vf_sp(bp, vf, mac_rdata), - bnx2x_vf_sp_map(bp, vf, mac_rdata), - BNX2X_FILTER_MAC_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX, - &bp->macs_pool); - /* vlan */ - bnx2x_init_vlan_obj(bp, &q->vlan_obj, - cl_id, q->cid, func_id, - bnx2x_vf_sp(bp, vf, vlan_rdata), - bnx2x_vf_sp_map(bp, vf, vlan_rdata), - BNX2X_FILTER_VLAN_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX, - &bp->vlans_pool); - - /* mcast */ - bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, - q->cid, func_id, func_id, - bnx2x_vf_sp(bp, vf, mcast_rdata), - bnx2x_vf_sp_map(bp, vf, mcast_rdata), - BNX2X_FILTER_MCAST_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX); - - vf->leading_rss = cl_id; - } + "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", + vf->abs_vfid, q->sp_obj.func_id, q->cid); } /* called by bnx2x_nic_load */ int bnx2x_iov_nic_init(struct bnx2x *bp) { - int vfid, qcount, i; + int vfid; if (!IS_SRIOV(bp)) { DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); @@ -2155,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); /* init statically provisioned resources */ - bnx2x_iov_static_resc(bp, &vf->alloc_resc); + bnx2x_iov_static_resc(bp, vf); /* queues are initialized during VF-ACQUIRE */ @@ -2191,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) } /* Final VF init */ - qcount = 0; - for_each_vf(bp, i) { - struct bnx2x_virtf *vf = BP_VF(bp, i); + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); /* fill in the BDF and bars */ - vf->bus = bnx2x_vf_bus(bp, i); - vf->devfn = bnx2x_vf_devfn(bp, i); + vf->bus = bnx2x_vf_bus(bp, vfid); + vf->devfn = bnx2x_vf_devfn(bp, vfid); bnx2x_vf_set_bars(bp, vf); DP(BNX2X_MSG_IOV, @@ -2206,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) (unsigned)vf->bars[0].bar, vf->bars[0].size, (unsigned)vf->bars[1].bar, vf->bars[1].size, (unsigned)vf->bars[2].bar, vf->bars[2].size); - - /* set local queue arrays */ - vf->vfqs = &bp->vfdb->vfqs[qcount]; - qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); } return 0; @@ -2515,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) for_each_vfq(vf, j) { struct bnx2x_vf_queue *rxq = vfq_get(vf, j); + dma_addr_t q_stats_addr = + vf->fw_stat_map + j * vf->stats_stride; + /* collect stats fro active queues only */ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == BNX2X_Q_LOGICAL_STATE_STOPPED) @@ -2522,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) /* create stats query entry for this queue */ cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = vfq_cl_id(vf, rxq); + cur_query_entry->index = vfq_stat_id(vf, rxq); cur_query_entry->funcID = cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); cur_query_entry->address.hi = - cpu_to_le32(U64_HI(vf->fw_stat_map)); + cpu_to_le32(U64_HI(q_stats_addr)); cur_query_entry->address.lo = - cpu_to_le32(U64_LO(vf->fw_stat_map)); + cpu_to_le32(U64_LO(q_stats_addr)); DP(BNX2X_MSG_IOV, "added address %x %x for vf %d queue %d client %d\n", cur_query_entry->address.hi, @@ -2537,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) cur_query_entry++; cur_data_offset += sizeof(struct per_queue_stats); stats_count++; + + /* all stats are coalesced to the leading queue */ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + break; } } bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; @@ -2555,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp) for_each_vf(bp, i) { struct bnx2x_virtf *vf = BP_VF(bp, i); + if (!vf) { + BNX2X_ERR("VF was null! skipping...\n"); + continue; + } + if (!list_empty(&vf->op_list_head) && atomic_read(&vf->op_in_progress)) { DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); @@ -2702,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q = vfq_get(vf, i); if (!q) { - DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); + BNX2X_ERR("q number %d was not allocated\n", i); return -EINVAL; } @@ -2930,6 +2944,43 @@ op_done: bnx2x_vfop_end(bp, vf, vfop); } +static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + enum bnx2x_vfop_rss_state state; + + if (!vfop) { + BNX2X_ERR("vfop was null\n"); + return; + } + + state = vfop->state; + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_RSS_CONFIG: + /* next state */ + vfop->state = BNX2X_VFOP_RSS_DONE; + bnx2x_config_rss(bp, &vfop->op_p->rss); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); +op_err: + BNX2X_ERR("RSS error: rc %d\n", vfop->rc); +op_done: + case BNX2X_VFOP_RSS_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +op_pending: + return; +} + int bnx2x_vfop_release_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd) @@ -2944,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, return -ENOMEM; } +int bnx2x_vfop_rss_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, + cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, + cmd->block); + } + return -ENOMEM; +} + /* VF release ~ VF close + VF release-resources * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. @@ -2955,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) .block = block, }; int rc; + + DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); @@ -2983,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { + /* we don't lock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(tlv)) { + BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); + return; + } + /* lock the channel */ mutex_lock(&vf->op_mutex); @@ -2997,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs expected_tlv) { + enum channel_tlvs current_tlv; + + if (!vf) { + BNX2X_ERR("VF was %p\n", vf); + return; + } + + current_tlv = vf->op_current; + + /* we don't unlock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(expected_tlv)) + return; + WARN(expected_tlv != vf->op_current, "lock mismatch: expected %d found %d", expected_tlv, vf->op_current); + /* record the locking op */ + vf->op_current = CHANNEL_TLV_NONE; + /* lock the channel */ mutex_unlock(&vf->op_mutex); /* log the unlock */ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", vf->abs_vfid, vf->op_current); - - /* record the locking op */ - vf->op_current = CHANNEL_TLV_NONE; } int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) @@ -3040,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) return bnx2x_enable_sriov(bp); } } +#define IGU_ENTRY_SIZE 4 int bnx2x_enable_sriov(struct bnx2x *bp) { int rc = 0, req_vfs = bp->requested_nr_virtfn; + int vf_idx, sb_idx, vfq_idx, qcount, first_vf; + u32 igu_entry, address; + u16 num_vf_queues; + + if (req_vfs == 0) + return 0; + + first_vf = bp->vfdb->sriov.first_vf_in_pf; + + /* statically distribute vf sb pool between VFs */ + num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, + BP_VFDB(bp)->vf_sbs_pool / req_vfs); + + /* zero previous values learned from igu cam */ + for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + vf->sb_count = 0; + vf_sb_count(BP_VF(bp, vf_idx)) = 0; + } + bp->vfdb->vf_sbs_pool = 0; + + /* prepare IGU cam */ + sb_idx = BP_VFDB(bp)->first_vf_igu_entry; + address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { + igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | + vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | + IGU_REG_MAPPING_MEMORY_VALID; + DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", + sb_idx, vf_idx); + REG_WR(bp, address, igu_entry); + sb_idx++; + address += IGU_ENTRY_SIZE; + } + } + + /* Reinitialize vf database according to igu cam */ + bnx2x_get_vf_igu_cam_info(bp); + + DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", + BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); + + qcount = 0; + for_each_vf(bp, vf_idx) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + /* set local queue arrays */ + vf->vfqs = &bp->vfdb->vfqs[qcount]; + qcount += vf_sb_count(vf); + } + /* prepare msix vectors in VF configuration space */ + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); + REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, + num_vf_queues); + } + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* enable sriov. This will probe all the VFs, and consequentially cause + * the "acquire" messages to appear on the VF PF channel. + */ + DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); + pci_disable_sriov(bp->pdev); rc = pci_enable_sriov(bp->pdev, req_vfs); if (rc) { BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); @@ -3072,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp) pci_disable_sriov(bp->pdev); } -static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, - struct bnx2x_virtf **vf, - struct pf_vf_bulletin_content **bulletin) +int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin) { if (bp->state != BNX2X_STATE_OPEN) { BNX2X_ERR("vf ndo called though PF is down\n"); @@ -3097,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, *bulletin = BP_VF_BULLETIN(bp, vfidx); if (!*vf) { - BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", + BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", + vfidx); + return -EINVAL; + } + + if (!(*vf)->vfqs) { + BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", vfidx); return -EINVAL; } @@ -3125,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); if (rc) return rc; - mac_obj = &bnx2x_vfq(vf, 0, mac_obj); - vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); if (!mac_obj || !vlan_obj) { BNX2X_ERR("VF partially initialized\n"); return -EINVAL; @@ -3138,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ - mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, - 0, ETH_ALEN); - vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, - 0, VLAN_HLEN); + if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) + mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, + 0, ETH_ALEN); + if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) + vlan_obj->get_n_elements(bp, vlan_obj, 1, + (u8 *)&ivi->vlan, 0, + VLAN_HLEN); } else { /* mac */ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) @@ -3209,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) return rc; } - /* is vf initialized and queue set up? */ q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the mac in device on this vf's queue */ unsigned long ramrod_flags = 0; - struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); + struct bnx2x_vlan_mac_obj *mac_obj = + &bnx2x_leading_vfq(vf, mac_obj); + + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); @@ -3276,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) /* is vf initialized and queue set up? */ q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the vlan in device on this vf's queue */ unsigned long ramrod_flags = 0; unsigned long vlan_mac_flags = 0; struct bnx2x_vlan_mac_obj *vlan_obj = - &bnx2x_vfq(vf, 0, vlan_obj); + &bnx2x_leading_vfq(vf, vlan_obj); struct bnx2x_vlan_mac_ramrod_params ramrod_param; struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_update_params *update_params; + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* must lock vfpf channel to protect against vf flows */ @@ -3307,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); update_params = &q_params.params.update; __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, &update_params->update_flags); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d143a7c..2a8c1dc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -81,6 +81,7 @@ struct bnx2x_vf_queue { u32 cid; u16 index; u16 sb_idx; + bool is_leading; }; /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: @@ -194,6 +195,7 @@ struct bnx2x_virtf { #define VF_CFG_INT_SIMD 0x0008 #define VF_CACHE_LINE 0x0010 #define VF_CFG_VLAN 0x0020 +#define VF_CFG_STATS_COALESCE 0x0040 u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ @@ -213,6 +215,7 @@ struct bnx2x_virtf { /* dma */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + u16 stats_stride; dma_addr_t spq_map; dma_addr_t bulletin_map; @@ -239,7 +242,10 @@ struct bnx2x_virtf { u8 igu_base_id; /* base igu status block id */ struct bnx2x_vf_queue *vfqs; -#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define LEADING_IDX 0 +#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) +#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) u8 index; /* index in the vf array */ u8 abs_vfid; @@ -358,6 +364,10 @@ struct bnx2x_vf_sp { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; } q_data; + + union { + struct eth_rss_update_ramrod_data e2; + } rss_rdata; }; struct hw_dma { @@ -403,6 +413,10 @@ struct bnx2x_vfdb { #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) u32 flrd_vfs[FLRD_VFS_DWORDS]; + + /* the number of msix vectors belonging to this PF designated for VFs */ + u16 vf_sbs_pool; + u16 first_vf_igu_entry; }; /* queue access */ @@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) return &(vf->vfqs[index]); } -static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq) -{ - return (vfq->index == 0); -} - /* FW ids */ static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) { @@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) { - return vfq_cl_id(vf, q); + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + return vf->leading_rss; + else + return vfq_cl_id(vf, q); } static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) @@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd); +int bnx2x_vfop_rss_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd); + /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an @@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); -int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading); int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params); int bnx2x_vfpf_set_mcast(struct net_device *dev); int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); @@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp); static inline int bnx2x_vf_headroom(struct bnx2x *bp) { - return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; + return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); @@ -793,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} -static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } +static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) {return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 2088063..6cfb887 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* humble our request */ req->resc_request.num_txqs = - bp->acquire_resp.resc.num_txqs; + min(req->resc_request.num_txqs, + bp->acquire_resp.resc.num_txqs); req->resc_request.num_rxqs = - bp->acquire_resp.resc.num_rxqs; + min(req->resc_request.num_rxqs, + bp->acquire_resp.resc.num_rxqs); req->resc_request.num_sbs = - bp->acquire_resp.resc.num_sbs; + min(req->resc_request.num_sbs, + bp->acquire_resp.resc.num_sbs); req->resc_request.num_mac_filters = - bp->acquire_resp.resc.num_mac_filters; + min(req->resc_request.num_mac_filters, + bp->acquire_resp.resc.num_mac_filters); req->resc_request.num_vlan_filters = - bp->acquire_resp.resc.num_vlan_filters; + min(req->resc_request.num_vlan_filters, + bp->acquire_resp.resc.num_vlan_filters); req->resc_request.num_mc_filters = - bp->acquire_resp.resc.num_mc_filters; + min(req->resc_request.num_mc_filters, + bp->acquire_resp.resc.num_mc_filters); /* Clear response buffer */ memset(&bp->vf2pf_mbox->resp, 0, @@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bp->common.flash_size = 0; bp->flags |= NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; - bp->igu_sb_cnt = 1; + bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, sizeof(bp->fw_ver)); @@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp) req->stats_addr = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats); + req->stats_stride = sizeof(struct per_queue_stats); + /* add list termination tlv */ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -452,12 +460,60 @@ free_irq: bnx2x_free_irq(bp); } +static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + + /* mac */ + bnx2x_init_mac_obj(bp, &q->mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, mac_rdata), + bnx2x_vf_sp_map(bp, vf, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->macs_pool); + /* vlan */ + bnx2x_init_vlan_obj(bp, &q->vlan_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->vlans_pool); + + /* mcast */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, + q->cid, func_id, func_id, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* rss */ + bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, + func_id, func_id, + bnx2x_vf_sp(bp, vf, rss_rdata), + bnx2x_vf_sp_map(bp, vf, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + vf->leading_rss = cl_id; + q->is_leading = true; +} + /* ask the pf to open a queue for the vf */ -int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading) { struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; - struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 fp_idx = fp->index; u16 tpa_agg_size = 0, flags = 0; int rc; @@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) tpa_agg_size = TPA_AGG_SIZE; } + if (is_leading) + flags |= VFPF_QUEUE_FLG_LEADING_RSS; + /* calculate queue flags */ flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; @@ -646,6 +705,71 @@ out: return 0; } +/* request pf to config rss table for vf queues*/ +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) +{ + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, + sizeof(*req)); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key)); + req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + req->rss_key_size = T_ETH_RSS_KEY; + req->rss_result_mask = params->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) + req->rss_flags |= VFPF_RSS_MODE_DISABLED; + if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) + req->rss_flags |= VFPF_RSS_MODE_REGULAR; + if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH)) + req->rss_flags |= VFPF_RSS_SET_SRCH; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4)) + req->rss_flags |= VFPF_RSS_IPV4; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP)) + req->rss_flags |= VFPF_RSS_IPV4_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP)) + req->rss_flags |= VFPF_RSS_IPV4_UDP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6)) + req->rss_flags |= VFPF_RSS_IPV6; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP)) + req->rss_flags |= VFPF_RSS_IPV6_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP)) + req->rss_flags |= VFPF_RSS_IPV6_UDP; + + DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n", + resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return 0; +} + int bnx2x_vfpf_set_mcast(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, /* fill in pfdev info */ resp->pfdev_info.chip_num = bp->common.chip_id; - resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); + resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); @@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, /* record ghost addresses from vf message */ vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; + vf->stats_stride = init->stats_stride; vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + /* set VF multiqueue statistics collection mode */ + if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) + vf->cfg_flags |= VF_CFG_STATS_COALESCE; + /* response */ bnx2x_vf_mbx_resp(bp, vf); } @@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS) + __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags); /* outer vlan removal is set according to PF's multi function mode */ if (IS_MF_SD(bp)) @@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_setup_params *setup_p; + if (bnx2x_vfq_is_leading(q)) + bnx2x_leading_vfq_init(bp, vf, q); + /* re-init the VF operation context */ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); setup_p = &vf->op_params.qctor.prep_qsetup; @@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_resp(bp, vf); } +static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; + struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + + if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || + rss_tlv->rss_key_size != T_ETH_RSS_KEY) { + BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", + vf->index); + vf->op_rc = -EINVAL; + goto mbx_resp; + } + + /* set vfop params according to rss tlv */ + memcpy(vf_op_params->ind_table, rss_tlv->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(vf_op_params->rss_key, rss_tlv->rss_key, + sizeof(rss_tlv->rss_key)); + vf_op_params->rss_obj = &vf->rss_conf_obj; + vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) + __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) + __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) + __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4) + __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) + __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) + __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6) + __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) + __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) + __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); + + if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || + (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { + BNX2X_ERR("about to hit a FW assert. aborting...\n"); + vf->op_rc = -EINVAL; + goto mbx_resp; + } + + vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); + +mbx_resp: + if (vf->op_rc) + bnx2x_vf_mbx_resp(bp, vf); +} + /* dispatch request */ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) @@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, case CHANNEL_TLV_RELEASE: bnx2x_vf_mbx_release_vf(bp, vf, mbx); break; + case CHANNEL_TLV_UPDATE_RSS: + bnx2x_vf_mbx_update_rss(bp, vf, mbx); + break; } } else { @@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, /* test whether we can respond to the VF (do we have an address * for it?) */ - if (vf->state == VF_ACQUIRED) { + if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { /* mbx_resp uses the op_rc of the VF */ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index f3ad174..1179fe0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -51,6 +51,7 @@ struct hw_sb_info { #define VFPF_QUEUE_FLG_COS 0x0080 #define VFPF_QUEUE_FLG_HC 0x0100 #define VFPF_QUEUE_FLG_DHC 0x0200 +#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400 #define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) #define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) @@ -131,6 +132,27 @@ struct vfpf_q_op_tlv { u8 padding[3]; }; +/* receive side scaling tlv */ +struct vfpf_rss_tlv { + struct vfpf_first_tlv first_tlv; + u32 rss_flags; +#define VFPF_RSS_MODE_DISABLED (1 << 0) +#define VFPF_RSS_MODE_REGULAR (1 << 1) +#define VFPF_RSS_SET_SRCH (1 << 2) +#define VFPF_RSS_IPV4 (1 << 3) +#define VFPF_RSS_IPV4_TCP (1 << 4) +#define VFPF_RSS_IPV4_UDP (1 << 5) +#define VFPF_RSS_IPV6 (1 << 6) +#define VFPF_RSS_IPV6_TCP (1 << 7) +#define VFPF_RSS_IPV6_UDP (1 << 8) + u8 rss_result_mask; + u8 ind_table_size; + u8 rss_key_size; + u8 padding; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + /* acquire response tlv - carries the allocated resources */ struct pfvf_acquire_resp_tlv { struct pfvf_tlv hdr; @@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv { } resc; }; +#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues + * stats will be coalesced on + * the leading RSS queue + */ + /* Init VF */ struct vfpf_init_tlv { struct vfpf_first_tlv first_tlv; aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ aligned_u64 spq_addr; aligned_u64 stats_addr; + u16 stats_stride; + u32 flags; + u32 padding[2]; }; /* Setup Queue */ @@ -293,13 +323,14 @@ union vfpf_tlvs { struct vfpf_q_op_tlv q_op; struct vfpf_setup_q_tlv setup_q; struct vfpf_set_q_filters_tlv set_q_filters; - struct vfpf_release_tlv release; - struct channel_list_end_tlv list_end; + struct vfpf_release_tlv release; + struct vfpf_rss_tlv update_rss; + struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; union pfvf_tlvs { - struct pfvf_general_resp_tlv general_resp; + struct pfvf_general_resp_tlv general_resp; struct pfvf_acquire_resp_tlv acquire_resp; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; @@ -355,14 +386,18 @@ enum channel_tlvs { CHANNEL_TLV_INIT, CHANNEL_TLV_SETUP_Q, CHANNEL_TLV_SET_Q_FILTERS, + CHANNEL_TLV_ACTIVATE_Q, + CHANNEL_TLV_DEACTIVATE_Q, CHANNEL_TLV_TEARDOWN_Q, CHANNEL_TLV_CLOSE, CHANNEL_TLV_RELEASE, + CHANNEL_TLV_UPDATE_RSS_DEPRECATED, CHANNEL_TLV_PF_RELEASE_VF, CHANNEL_TLV_LIST_END, CHANNEL_TLV_FLR, CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_VLAN, + CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index d78d4cf..8142480 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,6 +1,6 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1184,6 +1184,7 @@ error: static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int ctx_blk_size = cp->ethdev->ctx_blk_size; int total_mem, blks, i; @@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) cp->ctx_blks = blks; cp->ctx_blk_size = ctx_blk_size; - if (!BNX2X_CHIP_IS_57710(cp->chip_id)) + if (!CHIP_IS_E1(bp)) cp->ctx_align = 0; else cp->ctx_align = ctx_blk_size; @@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_eth_dev *ethdev = cp->ethdev; u32 start_cid = ethdev->starting_cid; int i, j, n, ret, pages; @@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->iscsi_start_cid = start_cid; cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cp->max_cid_space += dev->max_fcoe_conn; cp->fcoe_init_cid = ethdev->fcoe_init_cid; if (!cp->fcoe_init_cid) @@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; - if (CNIC_SUPPORTS_FCOE(cp)) { + if (CNIC_SUPPORTS_FCOE(bp)) { ret = cnic_alloc_kcq(dev, &cp->kcq2, true); if (ret) goto error; @@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, u32 type, union l5cm_specific_data *l5_data) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct l5cm_spe kwqe; struct kwqe_16 *kwq[1]; u16 type_16; @@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, kwqe.hdr.conn_and_cmd_data = cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | - BNX2X_HW_CID(cp, cid))); + BNX2X_HW_CID(bp, cid))); type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & + type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & SPE_HDR_FUNCTION_ID; kwqe.hdr.type = cpu_to_le16(type_16); @@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, rcu_read_unlock(); } +static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, + int en_tcp_dack) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; + u16 tstorm_flags = 0; + + if (time_stamps) { + xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + } + if (en_tcp_dack) + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN; + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags); + + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags); +} + static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) { struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; int hq_bds, pages; - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; cp->num_iscsi_tasks = req1->num_tasks_per_conn; cp->num_ccells = req1->num_ccells_per_conn; @@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), hq_bds); + cnic_bnx2x_set_tcp_options(dev, + req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE, + req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE); + return 0; } static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) { struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; @@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], u32 num) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct iscsi_kwqe_conn_offload1 *req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; struct iscsi_kwqe_conn_offload2 *req2 = @@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; struct cnic_iscsi *iscsi = ctx->proto.iscsi; u32 cid = ctx->cid; - u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 hw_cid = BNX2X_HW_CID(bp, cid); struct iscsi_context *ictx; struct regpair context_addr; int i, j, n = 2, n_max; - u8 port = CNIC_PORT(cp); + u8 port = BP_PORT(bp); ctx->ctx_flags = 0; if (!req2->num_additional_wqes) @@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = ETH_P_8021Q; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && - cp->port_mode == CHIP_2_PORT_MODE) { + if (BNX2X_CHIP_IS_E2_PLUS(bp) && + bp->common.chip_port_mode == CHIP_2_PORT_MODE) { port = 0; } @@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], struct iscsi_kwqe_conn_offload1 *req1; struct iscsi_kwqe_conn_offload2 *req2; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_context *ctx; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; @@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], } kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; - kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); + kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); done: cqes[0] = (struct kcqe *) &kcqe; @@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; union l5cm_specific_data l5_data; int ret; @@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) init_waitqueue_head(&ctx->waitq); ctx->wait_cond = 0; memset(&l5_data, 0, sizeof(l5_data)); - hw_cid = BNX2X_HW_CID(cp, ctx->cid); + hw_cid = BNX2X_HW_CID(bp, ctx->cid); ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, hw_cid, NONE_CONNECTION_TYPE, &l5_data); @@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, xstorm_buf->pseudo_header_checksum = swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); - if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) - tstorm_buf->params |= - L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; if (kwqe3->ka_timeout) { tstorm_buf->ka_enable = 1; tstorm_buf->ka_timeout = kwqe3->ka_timeout; @@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, static void cnic_init_bnx2x_mac(struct cnic_dev *dev) { - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; u8 *mac = dev->mac_addr; CNIC_WR8(dev, BAR_XSTRORM_INTMEM + @@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev) mac[0]); } -static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) -{ - struct cnic_local *cp = dev->cnic_priv; - struct bnx2x *bp = netdev_priv(dev->netdev); - u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; - u16 tstorm_flags = 0; - - if (tcp_ts) { - xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - } - - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); - - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); -} - static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], u32 num, int *work) { @@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); - - cnic_bnx2x_set_tcp_timestamp(dev, - kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id); ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); @@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) struct fcoe_stat_ramrod_params *fcoe_stat; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int ret; u32 cid; req = (struct fcoe_kwqe_stat *) kwqe; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); if (!fcoe_stat) @@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], { int ret; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); u32 cid; struct fcoe_init_ramrod_params *fcoe_init; struct fcoe_kwqe_init1 *req1; @@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; cp->kcq2.sw_prod_idx = 0; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, FCOE_CONNECTION_TYPE, &l5_data); *work = 3; @@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], int ret = 0; u32 cid = -1, l5_cid; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct fcoe_kwqe_conn_offload1 *req1; struct fcoe_kwqe_conn_offload2 *req2; struct fcoe_kwqe_conn_offload3 *req3; @@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); if (fctx) { - u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 hw_cid = BNX2X_HW_CID(bp, cid); u32 val; val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, @@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); - cid = BNX2X_HW_CID(cp, cid); + cid = BNX2X_HW_CID(bp, cid); ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, FCOE_CONNECTION_TYPE, &l5_data); if (!ret) @@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) struct fcoe_kwqe_destroy *req; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int ret; u32 cid; cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); req = (struct fcoe_kwqe_destroy *) kwqe; - cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); memset(&l5_data, 0, sizeof(l5_data)); ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, @@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], u32 num_wqes) { - struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); int i, work, ret; u32 opcode; struct kwqe *kwqe; @@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) return -EAGAIN; /* bnx2 is down */ - if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + if (!BNX2X_CHIP_IS_E2_PLUS(bp)) return -EINVAL; for (i = 0; i < num_wqes; ) { @@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance) static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, u16 index, u8 op, u8 update) { - struct cnic_local *cp = dev->cnic_priv; - u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 + COMMAND_REG_INT_ACK); struct igu_ack_register igu_ack; @@ -3603,6 +3611,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, csk1->rcv_buf = DEF_RCV_BUF; csk1->snd_buf = DEF_SND_BUF; csk1->seed = DEF_SEED; + csk1->tcp_flags = 0; *csk = csk1; return 0; @@ -4020,15 +4029,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) cnic_cm_upcall(cp, csk, opcode); break; - case L5CM_RAMROD_CMD_ID_CLOSE: - if (l4kcqe->status != 0) { - netdev_warn(dev->netdev, "RAMROD CLOSE compl with " - "status 0x%x\n", l4kcqe->status); + case L5CM_RAMROD_CMD_ID_CLOSE: { + struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe; + + if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) { + netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", + l4kcqe->status, l5kcqe->completion_status); opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; /* Fall through */ } else { break; } + } case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: @@ -4213,13 +4225,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) { - struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; - u32 port = CNIC_PORT(cp); + u32 pfid = bp->pfid; + u32 port = BP_PORT(bp); cnic_init_bnx2x_mac(dev); - cnic_bnx2x_set_tcp_timestamp(dev, 1); + cnic_bnx2x_set_tcp_options(dev, 0, 1); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); @@ -4897,6 +4908,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, struct client_init_ramrod_data *data) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_uio_dev *udev = cp->udev; union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; dma_addr_t buf_map, ring_map = udev->l2_ring_map; @@ -4925,7 +4937,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + if (BNX2X_CHIP_IS_E2_PLUS(bp)) pbd_e2->parsing_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); else @@ -4962,6 +4974,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct client_init_ramrod_data *data) { struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_uio_dev *udev = cp->udev; struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + BNX2_PAGE_SIZE); @@ -4970,7 +4983,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; int i; u32 cli = cp->ethdev->iscsi_l2_client_id; - int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); + int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); u32 val; dma_addr_t ring_map = udev->l2_ring_map; @@ -4979,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, data->general.activate_flg = 1; data->general.sp_client_id = cli; data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); - data->general.func_id = cp->pfid; + data->general.func_id = bp->pfid; for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { dma_addr_t buf_map; @@ -5029,13 +5042,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); - u32 pfid = cp->pfid; + u32 pfid = bp->pfid; cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); cp->kcq1.sw_prod_idx = 0; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { struct host_hc_status_block_e2 *sb = cp->status_blk.gen; cp->kcq1.hw_prod_idx_ptr = @@ -5051,7 +5064,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) &sb->sb.running_index[SM_RX_ID]; } - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { struct host_hc_status_block_e2 *sb = cp->status_blk.gen; cp->kcq2.io_addr = BAR_USTRORM_INTMEM + @@ -5073,12 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) u32 pfid; dev->stats_addr = ethdev->addr_drv_info_to_mcp; - cp->port_mode = bp->common.chip_port_mode; - cp->pfid = bp->pfid; cp->func = bp->pf_num; func = CNIC_FUNC(cp); - pfid = cp->pfid; + pfid = bp->pfid; ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, cp->iscsi_start_cid, 0); @@ -5086,7 +5097,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) if (ret) return -ENOMEM; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, cp->fcoe_start_cid, 0); @@ -5168,12 +5179,12 @@ static void cnic_init_rings(struct cnic_dev *dev) rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; barrier(); - cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); + cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); off = BAR_USTRORM_INTMEM + - (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? + (BNX2X_CHIP_IS_E2_PLUS(bp) ? USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : - USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); + USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli)); for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); @@ -5271,6 +5282,13 @@ static int cnic_register_netdev(struct cnic_dev *dev) if (err) netdev_err(dev->netdev, "register_cnic failed\n"); + /* Read iSCSI config again. On some bnx2x device, iSCSI config + * can change after firmware is downloaded. + */ + dev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + dev->max_iscsi_conn = 0; + return err; } @@ -5353,7 +5371,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) cnic_free_irq(dev); - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { idx_off = offsetof(struct hc_status_block_e2, index_values) + (hc_index * sizeof(u16)); @@ -5370,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) *cp->kcq1.hw_prod_idx_ptr = 0; CNIC_WR(dev, BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); + CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0); CNIC_WR16(dev, cp->kcq1.io_addr, 0); cnic_free_resc(dev); } @@ -5544,7 +5562,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) cdev->max_iscsi_conn = ethdev->max_iscsi_conn; - if (CNIC_SUPPORTS_FCOE(cp)) { + if (CNIC_SUPPORTS_FCOE(bp)) { cdev->max_fcoe_conn = ethdev->max_fcoe_conn; cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; } @@ -5564,7 +5582,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cp->stop_cm = cnic_cm_stop_bnx2x_hw; cp->enable_int = cnic_enable_bnx2x_int; cp->disable_int_sync = cnic_disable_bnx2x_int_sync; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cp->ack_int = cnic_ack_bnx2x_e2_msix; cp->arm_int = cnic_arm_bnx2x_e2_msix; } else { @@ -5628,7 +5646,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, dev = cnic_from_netdev(netdev); - if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { + if (!dev && event == NETDEV_REGISTER) { /* Check for the hot-plug device */ dev = is_cnic_dev(netdev); if (dev) { @@ -5644,7 +5662,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, else if (event == NETDEV_UNREGISTER) cnic_ulp_exit(dev); - if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { + if (event == NETDEV_UP) { if (cnic_register_netdev(dev) != 0) { cnic_put(dev); goto done; @@ -5693,21 +5711,8 @@ static struct notifier_block cnic_netdev_notifier = { static void cnic_release(void) { - struct cnic_dev *dev; struct cnic_uio_dev *udev; - while (!list_empty(&cnic_dev_list)) { - dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - } - - cnic_ulp_exit(dev); - cnic_unregister_netdev(dev); - list_del_init(&dev->list); - cnic_free_dev(dev); - } while (!list_empty(&cnic_udev_list)) { udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, list); diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 62c6706..0121a5d 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -1,6 +1,6 @@ /* cnic.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2011 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -303,8 +303,6 @@ struct cnic_local { u32 chip_id; int func; - u32 pfid; - u8 port_mode; u32 shmem_base; @@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next { #define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ -#define BNX2X_CHIP_NUM_57710 0x164e -#define BNX2X_CHIP_NUM_57711 0x164f -#define BNX2X_CHIP_NUM_57711E 0x1650 -#define BNX2X_CHIP_NUM_57712 0x1662 -#define BNX2X_CHIP_NUM_57712E 0x1663 -#define BNX2X_CHIP_NUM_57713 0x1651 -#define BNX2X_CHIP_NUM_57713E 0x1652 -#define BNX2X_CHIP_NUM_57800 0x168a -#define BNX2X_CHIP_NUM_57810 0x168e -#define BNX2X_CHIP_NUM_57840 0x168d - -#define BNX2X_CHIP_NUM(x) (x >> 16) -#define BNX2X_CHIP_IS_57710(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710) -#define BNX2X_CHIP_IS_57711(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) -#define BNX2X_CHIP_IS_57711E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) -#define BNX2X_CHIP_IS_E1H(x) \ - (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) -#define BNX2X_CHIP_IS_57712(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712) -#define BNX2X_CHIP_IS_57712E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E) -#define BNX2X_CHIP_IS_57713(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713) -#define BNX2X_CHIP_IS_57713E(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E) -#define BNX2X_CHIP_IS_57800(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800) -#define BNX2X_CHIP_IS_57810(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810) -#define BNX2X_CHIP_IS_57840(x) \ - (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840) -#define BNX2X_CHIP_IS_E2(x) \ - (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \ - BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x)) -#define BNX2X_CHIP_IS_E3(x) \ - (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \ - BNX2X_CHIP_IS_57840(x)) -#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x)) +#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) #define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \ sizeof(struct eth_rx_bd)) @@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next { #define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H #endif -#define CNIC_PORT(cp) ((cp)->pfid & 1) #define CNIC_FUNC(cp) ((cp)->func) -#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \ - 0 : (CNIC_FUNC(cp) & 1)) -#define CNIC_E1HVN(cp) ((cp)->pfid >> 1) -#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \ - (CNIC_E1HVN(cp) << 17) | (x)) +#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << 17) | (x)) #define BNX2X_SW_CID(x) (x & 0x1ffff) -#define BNX2X_CL_QZONE_ID(cp, cli) \ - (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \ - cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) +#define BNX2X_CL_QZONE_ID(bp, cli) \ + (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \ + cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H)) #ifndef MAX_STAT_COUNTER_ID #define MAX_STAT_COUNTER_ID \ - (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \ - ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\ + (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \ + ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \ MAX_STAT_COUNTER_ID_E1)) #endif -#define CNIC_SUPPORTS_FCOE(cp) \ - (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ - !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) +#define CNIC_SUPPORTS_FCOE(cp) \ + (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp)) #define CNIC_RAMROD_TMO (HZ / 4) diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index ede3db3..95a8e4b 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -1,7 +1,7 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags { u16 flags; #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 -#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) -#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12) +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12 #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 #define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index ec9bb9a..0658b43 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.16" -#define CNIC_MODULE_RELDATE "Dec 05, 2012" +#define CNIC_MODULE_VERSION "2.5.18" +#define CNIC_MODULE_RELDATE "Sept 01, 2013" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -238,8 +238,8 @@ struct cnic_sock { u16 src_port; u16 dst_port; u16 vlan_id; - unsigned char old_ha[6]; - unsigned char ha[6]; + unsigned char old_ha[ETH_ALEN]; + unsigned char ha[ETH_ALEN]; u32 mtu; u32 cid; u32 l5_cid; @@ -308,7 +308,7 @@ struct cnic_dev { #define CNIC_F_BNX2_CLASS 3 #define CNIC_F_BNX2X_CLASS 4 atomic_t ref_count; - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; int max_iscsi_conn; int max_fcoe_conn; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0da2214..5701f3d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 132 +#define TG3_MIN_NUM 133 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "May 21, 2013" +#define DRV_MODULE_RELDATE "Jul 29, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp) return false; } +static bool tg3_phy_led_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5719: + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) } return; } else if (do_low_power) { - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_FORCE_LED_OFF); + if (!tg3_phy_led_bug(tp)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | @@ -4226,8 +4240,6 @@ static int tg3_power_down_prepare(struct tg3 *tp) static void tg3_power_down(struct tg3 *tp) { - tg3_power_down_prepare(tp); - pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); pci_set_power_state(tp->pdev, PCI_D3hot); } @@ -6095,10 +6107,12 @@ static u64 tg3_refclk_read(struct tg3 *tp) /* tp->lock must be held */ static void tg3_refclk_write(struct tg3 *tp, u64 newval) { - tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); + u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); - tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME); + tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); } static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); @@ -6214,6 +6228,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp, static int tg3_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + u32 clock_ctl; + int rval = 0; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + if (rq->perout.index != 0) + return -EINVAL; + + tg3_full_lock(tp, 0); + clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; + + if (on) { + u64 nsec; + + nsec = rq->perout.start.sec * 1000000000ULL + + rq->perout.start.nsec; + + if (rq->perout.period.sec || rq->perout.period.nsec) { + netdev_warn(tp->dev, + "Device supports only a one-shot timesync output, period must be 0\n"); + rval = -EINVAL; + goto err_out; + } + + if (nsec & (1ULL << 63)) { + netdev_warn(tp->dev, + "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); + rval = -EINVAL; + goto err_out; + } + + tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); + tw32(TG3_EAV_WATCHDOG0_MSB, + TG3_EAV_WATCHDOG0_EN | + ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); + + tw32(TG3_EAV_REF_CLCK_CTL, + clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); + } else { + tw32(TG3_EAV_WATCHDOG0_MSB, 0); + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); + } + +err_out: + tg3_full_unlock(tp); + return rval; + + default: + break; + } + return -EOPNOTSUPP; } @@ -6223,7 +6290,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .max_adj = 250000000, .n_alarm = 0, .n_ext_ts = 0, - .n_per_out = 0, + .n_per_out = 1, .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, @@ -8538,10 +8605,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) if (!i && tg3_flag(tp, ENABLE_RSS)) continue; - tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, - TG3_RX_RCB_RING_BYTES(tp), - &tnapi->rx_rcb_mapping, - GFP_KERNEL | __GFP_ZERO); + tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); if (!tnapi->rx_rcb) goto err_out; } @@ -8590,10 +8657,9 @@ static int tg3_alloc_consistent(struct tg3 *tp) { int i; - tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, - sizeof(struct tg3_hw_stats), - &tp->stats_mapping, - GFP_KERNEL | __GFP_ZERO); + tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, GFP_KERNEL); if (!tp->hw_stats) goto err_out; @@ -8601,10 +8667,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; - tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, - TG3_HW_STATUS_SIZE, - &tnapi->status_mapping, - GFP_KERNEL | __GFP_ZERO); + tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); if (!tnapi->hw_status) goto err_out; @@ -10367,6 +10433,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, 5755_PLUS)) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; + if (tg3_flag(tp, ENABLE_RSS)) tp->rx_mode |= RX_MODE_RSS_ENABLE | RX_MODE_RSS_ITBL_HASH_BITS_7 | @@ -11502,7 +11571,7 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down(tp); + tg3_power_down_prepare(tp); tg3_carrier_off(tp); @@ -11724,9 +11793,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (tg3_flag(tp, NO_NVRAM)) return -EINVAL; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - offset = eeprom->offset; len = eeprom->len; eeprom->len = 0; @@ -11784,9 +11850,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf; __be32 start, end; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; @@ -13515,7 +13578,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_phy_start(tp); } if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - tg3_power_down(tp); + tg3_power_down_prepare(tp); } @@ -15917,7 +15980,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) */ if (tg3_flag(tp, 5780_CLASS)) { tg3_flag_set(tp, 40BIT_DMA_BUG); - tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); + tp->msi_cap = tp->pdev->msi_cap; } else { struct pci_dev *bridge = NULL; @@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_asic_rev(tp) == ASIC_REV_5762) tg3_flag_set(tp, PTP_CAPABLE); - if (tg3_flag(tp, 5717_PLUS)) { - /* Resume a low-power mode */ - tg3_frob_aux_power(tp, false); - } - tg3_timer_init(tp); tg3_carrier_off(tp); @@ -17755,6 +17813,23 @@ out: static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +static void tg3_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + + rtnl_lock(); + netif_device_detach(dev); + + if (netif_running(dev)) + dev_close(dev); + + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); + + rtnl_unlock(); +} + /** * tg3_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -17914,6 +17989,7 @@ static struct pci_driver tg3_driver = { .remove = tg3_remove_one, .err_handler = &tg3_err_handler, .driver.pm = &tg3_pm_ops, + .shutdown = tg3_shutdown, }; module_pci_driver(tg3_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index cd63d11..ddb8be1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -532,6 +532,7 @@ #define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 #define RX_MODE_RSS_ENABLE 0x00800000 #define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 +#define RX_MODE_IPV4_FRAG_FIX 0x02000000 #define MAC_RX_STATUS 0x0000046c #define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 #define RX_STATUS_XOFF_RCVD 0x00000002 @@ -1818,12 +1819,21 @@ #define TG3_EAV_REF_CLCK_CTL 0x00006908 #define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 #define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 +#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16) +#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17) + +#define TG3_EAV_WATCHDOG0_LSB 0x00006918 +#define TG3_EAV_WATCHDOG0_MSB 0x0000691c +#define TG3_EAV_WATCHDOG0_EN (1 << 31) +#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff + #define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 #define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) #define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) #define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff -/* 0x690c --> 0x7000 unused */ + +/* 0x692c --> 0x7000 unused */ /* NVRAM Control registers */ #define NVRAM_CMD 0x00007000 |