summaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-20 17:43:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-20 17:43:29 -0700
commitdb6d8c7a4027b48d797b369a53f8470aaeed7063 (patch)
treee140c104a89abc2154e1f41a7db8ebecbb6fa0b4 /drivers/net/bnx2.c
parent3a533374283aea50eab3976d8a6d30532175f009 (diff)
parentfb65a7c091529bfffb1262515252c0d0f6241c5c (diff)
downloadop-kernel-dev-db6d8c7a4027b48d797b369a53f8470aaeed7063.zip
op-kernel-dev-db6d8c7a4027b48d797b369a53f8470aaeed7063.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (1232 commits) iucv: Fix bad merging. net_sched: Add size table for qdiscs net_sched: Add accessor function for packet length for qdiscs net_sched: Add qdisc_enqueue wrapper highmem: Export totalhigh_pages. ipv6 mcast: Omit redundant address family checks in ip6_mc_source(). net: Use standard structures for generic socket address structures. ipv6 netns: Make several "global" sysctl variables namespace aware. netns: Use net_eq() to compare net-namespaces for optimization. ipv6: remove unused macros from net/ipv6.h ipv6: remove unused parameter from ip6_ra_control tcp: fix kernel panic with listening_get_next tcp: Remove redundant checks when setting eff_sacks tcp: options clean up tcp: Fix MD5 signatures for non-linear skbs sctp: Update sctp global memory limit allocations. sctp: remove unnecessary byteshifting, calculate directly in big-endian sctp: Allow only 1 listening socket with SO_REUSEADDR sctp: Do not leak memory on multiple listen() calls sctp: Support ipv6only AF_INET6 sockets. ...
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c1339
1 files changed, 806 insertions, 533 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 367b6d4..5ebde67 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -47,6 +47,7 @@
#include <linux/prefetch.h>
#include <linux/cache.h>
#include <linux/zlib.h>
+#include <linux/log2.h>
#include "bnx2.h"
#include "bnx2_fw.h"
@@ -56,8 +57,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.7.5"
-#define DRV_MODULE_RELDATE "April 29, 2008"
+#define DRV_MODULE_VERSION "1.7.9"
+#define DRV_MODULE_RELDATE "July 18, 2008"
#define RUN_AT(x) (jiffies + (x))
@@ -68,7 +69,7 @@ static char version[] __devinitdata =
"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -87,6 +88,7 @@ typedef enum {
BCM5708S,
BCM5709,
BCM5709S,
+ BCM5716,
} board_t;
/* indexed by board_t, above */
@@ -102,9 +104,10 @@ static struct {
{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+ { "Broadcom NetXtreme II BCM5716 1000Base-T" },
};
-static struct pci_device_id bnx2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
@@ -123,6 +126,8 @@ static struct pci_device_id bnx2_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
+ { PCI_VENDOR_ID_BROADCOM, 0x163b,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
{ 0, }
};
@@ -226,7 +231,7 @@ static struct flash_spec flash_5709 = {
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
-static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
+static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{
u32 diff;
@@ -235,7 +240,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
/* The ring uses 256 indices for 255 entries, one of them
* needs to be skipped.
*/
- diff = bp->tx_prod - bnapi->tx_cons;
+ diff = txr->tx_prod - txr->tx_cons;
if (unlikely(diff >= TX_DESC_CNT)) {
diff &= 0xffff;
if (diff == TX_DESC_CNT)
@@ -289,7 +294,6 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
REG_WR(bp, BNX2_CTX_CTX_CTRL,
offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
for (i = 0; i < 5; i++) {
- u32 val;
val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
break;
@@ -488,7 +492,7 @@ bnx2_netif_start(struct bnx2 *bp)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
- netif_wake_queue(bp->dev);
+ netif_tx_wake_all_queues(bp->dev);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
}
@@ -496,99 +500,162 @@ bnx2_netif_start(struct bnx2 *bp)
}
static void
-bnx2_free_mem(struct bnx2 *bp)
+bnx2_free_tx_mem(struct bnx2 *bp)
{
int i;
- for (i = 0; i < bp->ctx_pages; i++) {
- if (bp->ctx_blk[i]) {
- pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
- bp->ctx_blk[i],
- bp->ctx_blk_mapping[i]);
- bp->ctx_blk[i] = NULL;
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+ if (txr->tx_desc_ring) {
+ pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
+ txr->tx_desc_ring,
+ txr->tx_desc_mapping);
+ txr->tx_desc_ring = NULL;
}
+ kfree(txr->tx_buf_ring);
+ txr->tx_buf_ring = NULL;
}
- if (bp->status_blk) {
- pci_free_consistent(bp->pdev, bp->status_stats_size,
- bp->status_blk, bp->status_blk_mapping);
- bp->status_blk = NULL;
- bp->stats_blk = NULL;
+}
+
+static void
+bnx2_free_rx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
+
+ for (j = 0; j < bp->rx_max_ring; j++) {
+ if (rxr->rx_desc_ring[j])
+ pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
+ rxr->rx_desc_ring[j],
+ rxr->rx_desc_mapping[j]);
+ rxr->rx_desc_ring[j] = NULL;
+ }
+ if (rxr->rx_buf_ring)
+ vfree(rxr->rx_buf_ring);
+ rxr->rx_buf_ring = NULL;
+
+ for (j = 0; j < bp->rx_max_pg_ring; j++) {
+ if (rxr->rx_pg_desc_ring[j])
+ pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
+ rxr->rx_pg_desc_ring[i],
+ rxr->rx_pg_desc_mapping[i]);
+ rxr->rx_pg_desc_ring[i] = NULL;
+ }
+ if (rxr->rx_pg_ring)
+ vfree(rxr->rx_pg_ring);
+ rxr->rx_pg_ring = NULL;
}
- if (bp->tx_desc_ring) {
- pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
- bp->tx_desc_ring, bp->tx_desc_mapping);
- bp->tx_desc_ring = NULL;
- }
- kfree(bp->tx_buf_ring);
- bp->tx_buf_ring = NULL;
- for (i = 0; i < bp->rx_max_ring; i++) {
- if (bp->rx_desc_ring[i])
- pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- bp->rx_desc_ring[i],
- bp->rx_desc_mapping[i]);
- bp->rx_desc_ring[i] = NULL;
- }
- vfree(bp->rx_buf_ring);
- bp->rx_buf_ring = NULL;
- for (i = 0; i < bp->rx_max_pg_ring; i++) {
- if (bp->rx_pg_desc_ring[i])
- pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- bp->rx_pg_desc_ring[i],
- bp->rx_pg_desc_mapping[i]);
- bp->rx_pg_desc_ring[i] = NULL;
- }
- if (bp->rx_pg_ring)
- vfree(bp->rx_pg_ring);
- bp->rx_pg_ring = NULL;
}
static int
-bnx2_alloc_mem(struct bnx2 *bp)
+bnx2_alloc_tx_mem(struct bnx2 *bp)
{
- int i, status_blk_size;
+ int i;
- bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
- if (bp->tx_buf_ring == NULL)
- return -ENOMEM;
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
- bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
- &bp->tx_desc_mapping);
- if (bp->tx_desc_ring == NULL)
- goto alloc_mem_err;
+ txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
+ if (txr->tx_buf_ring == NULL)
+ return -ENOMEM;
- bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
- if (bp->rx_buf_ring == NULL)
- goto alloc_mem_err;
+ txr->tx_desc_ring =
+ pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
+ &txr->tx_desc_mapping);
+ if (txr->tx_desc_ring == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+bnx2_alloc_rx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
- memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ rxr->rx_buf_ring =
+ vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ if (rxr->rx_buf_ring == NULL)
+ return -ENOMEM;
- for (i = 0; i < bp->rx_max_ring; i++) {
- bp->rx_desc_ring[i] =
- pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
- &bp->rx_desc_mapping[i]);
- if (bp->rx_desc_ring[i] == NULL)
- goto alloc_mem_err;
+ memset(rxr->rx_buf_ring, 0,
+ SW_RXBD_RING_SIZE * bp->rx_max_ring);
- }
+ for (j = 0; j < bp->rx_max_ring; j++) {
+ rxr->rx_desc_ring[j] =
+ pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
+ &rxr->rx_desc_mapping[j]);
+ if (rxr->rx_desc_ring[j] == NULL)
+ return -ENOMEM;
- if (bp->rx_pg_ring_size) {
- bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
- bp->rx_max_pg_ring);
- if (bp->rx_pg_ring == NULL)
- goto alloc_mem_err;
+ }
+
+ if (bp->rx_pg_ring_size) {
+ rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ if (rxr->rx_pg_ring == NULL)
+ return -ENOMEM;
+
+ memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ }
+
+ for (j = 0; j < bp->rx_max_pg_ring; j++) {
+ rxr->rx_pg_desc_ring[j] =
+ pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
+ &rxr->rx_pg_desc_mapping[j]);
+ if (rxr->rx_pg_desc_ring[j] == NULL)
+ return -ENOMEM;
- memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
- bp->rx_max_pg_ring);
+ }
}
+ return 0;
+}
- for (i = 0; i < bp->rx_max_pg_ring; i++) {
- bp->rx_pg_desc_ring[i] =
- pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
- &bp->rx_pg_desc_mapping[i]);
- if (bp->rx_pg_desc_ring[i] == NULL)
- goto alloc_mem_err;
+static void
+bnx2_free_mem(struct bnx2 *bp)
+{
+ int i;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+ bnx2_free_tx_mem(bp);
+ bnx2_free_rx_mem(bp);
+ for (i = 0; i < bp->ctx_pages; i++) {
+ if (bp->ctx_blk[i]) {
+ pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
+ bp->ctx_blk[i],
+ bp->ctx_blk_mapping[i]);
+ bp->ctx_blk[i] = NULL;
+ }
}
+ if (bnapi->status_blk.msi) {
+ pci_free_consistent(bp->pdev, bp->status_stats_size,
+ bnapi->status_blk.msi,
+ bp->status_blk_mapping);
+ bnapi->status_blk.msi = NULL;
+ bp->stats_blk = NULL;
+ }
+}
+
+static int
+bnx2_alloc_mem(struct bnx2 *bp)
+{
+ int i, status_blk_size, err;
+ struct bnx2_napi *bnapi;
+ void *status_blk;
/* Combine status and statistics blocks into one allocation. */
status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
@@ -598,27 +665,37 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
- &bp->status_blk_mapping);
- if (bp->status_blk == NULL)
+ status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
+ &bp->status_blk_mapping);
+ if (status_blk == NULL)
goto alloc_mem_err;
- memset(bp->status_blk, 0, bp->status_stats_size);
+ memset(status_blk, 0, bp->status_stats_size);
- bp->bnx2_napi[0].status_blk = bp->status_blk;
+ bnapi = &bp->bnx2_napi[0];
+ bnapi->status_blk.msi = status_blk;
+ bnapi->hw_tx_cons_ptr =
+ &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
+ bnapi->hw_rx_cons_ptr =
+ &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
if (bp->flags & BNX2_FLAG_MSIX_CAP) {
for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
- struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct status_block_msix *sblk;
- bnapi->status_blk_msix = (void *)
- ((unsigned long) bp->status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ bnapi = &bp->bnx2_napi[i];
+
+ sblk = (void *) (status_blk +
+ BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ bnapi->status_blk.msix = sblk;
+ bnapi->hw_tx_cons_ptr =
+ &sblk->status_tx_quick_consumer_index;
+ bnapi->hw_rx_cons_ptr =
+ &sblk->status_rx_quick_consumer_index;
bnapi->int_num = i << 24;
}
}
- bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
- status_blk_size);
+ bp->stats_blk = status_blk + status_blk_size;
bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
@@ -634,6 +711,15 @@ bnx2_alloc_mem(struct bnx2 *bp)
goto alloc_mem_err;
}
}
+
+ err = bnx2_alloc_rx_mem(bp);
+ if (err)
+ goto alloc_mem_err;
+
+ err = bnx2_alloc_tx_mem(bp);
+ if (err)
+ goto alloc_mem_err;
+
return 0;
alloc_mem_err:
@@ -993,9 +1079,9 @@ bnx2_copper_linkup(struct bnx2 *bp)
}
static void
-bnx2_init_rx_context0(struct bnx2 *bp)
+bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
{
- u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
+ u32 val, rx_cid_addr = GET_CID_ADDR(cid);
val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
@@ -1028,6 +1114,19 @@ bnx2_init_rx_context0(struct bnx2 *bp)
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
}
+static void
+bnx2_init_all_rx_contexts(struct bnx2 *bp)
+{
+ int i;
+ u32 cid;
+
+ for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
+ if (i == 1)
+ cid = RX_RSS_CID;
+ bnx2_init_rx_context(bp, cid);
+ }
+}
+
static int
bnx2_set_mac_link(struct bnx2 *bp)
{
@@ -1093,7 +1192,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
if (CHIP_NUM(bp) == CHIP_NUM_5709)
- bnx2_init_rx_context0(bp);
+ bnx2_init_all_rx_contexts(bp);
return 0;
}
@@ -1392,7 +1491,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
return adv;
}
-static int bnx2_fw_sync(struct bnx2 *, u32, int);
+static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
static int
bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
@@ -1445,7 +1544,7 @@ bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
spin_unlock_bh(&bp->phy_lock);
- bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
spin_lock_bh(&bp->phy_lock);
return 0;
@@ -1875,7 +1974,7 @@ bnx2_setup_phy(struct bnx2 *bp, u8 port)
}
static int
-bnx2_init_5709s_phy(struct bnx2 *bp)
+bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
@@ -1890,7 +1989,8 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
@@ -1924,11 +2024,12 @@ bnx2_init_5709s_phy(struct bnx2 *bp)
}
static int
-bnx2_init_5708s_phy(struct bnx2 *bp)
+bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
bp->mii_up1 = BCM5708S_UP1;
@@ -1981,9 +2082,10 @@ bnx2_init_5708s_phy(struct bnx2 *bp)
}
static int
-bnx2_init_5706s_phy(struct bnx2 *bp)
+bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
{
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
@@ -2018,11 +2120,12 @@ bnx2_init_5706s_phy(struct bnx2 *bp)
}
static int
-bnx2_init_copper_phy(struct bnx2 *bp)
+bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
- bnx2_reset_phy(bp);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
bnx2_write_phy(bp, 0x18, 0x0c00);
@@ -2070,7 +2173,7 @@ bnx2_init_copper_phy(struct bnx2 *bp)
static int
-bnx2_init_phy(struct bnx2 *bp)
+bnx2_init_phy(struct bnx2 *bp, int reset_phy)
{
u32 val;
int rc = 0;
@@ -2096,14 +2199,14 @@ bnx2_init_phy(struct bnx2 *bp)
if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
if (CHIP_NUM(bp) == CHIP_NUM_5706)
- rc = bnx2_init_5706s_phy(bp);
+ rc = bnx2_init_5706s_phy(bp, reset_phy);
else if (CHIP_NUM(bp) == CHIP_NUM_5708)
- rc = bnx2_init_5708s_phy(bp);
+ rc = bnx2_init_5708s_phy(bp, reset_phy);
else if (CHIP_NUM(bp) == CHIP_NUM_5709)
- rc = bnx2_init_5709s_phy(bp);
+ rc = bnx2_init_5709s_phy(bp, reset_phy);
}
else {
- rc = bnx2_init_copper_phy(bp);
+ rc = bnx2_init_copper_phy(bp, reset_phy);
}
setup_phy:
@@ -2159,7 +2262,7 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
}
static int
-bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
+bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
{
int i;
u32 val;
@@ -2169,6 +2272,9 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+ if (!ack)
+ return 0;
+
/* wait for an acknowledgement. */
for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
msleep(10);
@@ -2345,28 +2451,27 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
}
static void
-bnx2_set_mac_addr(struct bnx2 *bp)
+bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
{
u32 val;
- u8 *mac_addr = bp->dev->dev_addr;
val = (mac_addr[0] << 8) | mac_addr[1];
- REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
+ REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
+ REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
}
static inline int
-bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
dma_addr_t mapping;
- struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
+ struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct rx_bd *rxbd =
- &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
+ &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
@@ -2381,9 +2486,9 @@ bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
}
static void
-bnx2_free_rx_page(struct bnx2 *bp, u16 index)
+bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
- struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
+ struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct page *page = rx_pg->page;
if (!page)
@@ -2397,12 +2502,12 @@ bnx2_free_rx_page(struct bnx2 *bp, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
struct sk_buff *skb;
- struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
+ struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
- struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -2422,7 +2527,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
- bnapi->rx_prod_bseq += bp->rx_buf_use_size;
+ rxr->rx_prod_bseq += bp->rx_buf_use_size;
return 0;
}
@@ -2430,7 +2535,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
static int
bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
{
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
u32 new_link_state, old_link_state;
int is_set = 1;
@@ -2466,11 +2571,9 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
{
u16 cons;
- if (bnapi->int_num == 0)
- cons = bnapi->status_blk->status_tx_quick_consumer_index0;
- else
- cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
-
+ /* Tell compiler that status block fields can change. */
+ barrier();
+ cons = *bnapi->hw_tx_cons_ptr;
if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
cons++;
return cons;
@@ -2479,11 +2582,16 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
static int
bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
u16 hw_cons, sw_cons, sw_ring_cons;
- int tx_pkt = 0;
+ int tx_pkt = 0, index;
+ struct netdev_queue *txq;
+
+ index = (bnapi - bp->bnx2_napi);
+ txq = netdev_get_tx_queue(bp->dev, index);
hw_cons = bnx2_get_hw_tx_cons(bnapi);
- sw_cons = bnapi->tx_cons;
+ sw_cons = txr->tx_cons;
while (sw_cons != hw_cons) {
struct sw_bd *tx_buf;
@@ -2492,7 +2600,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_ring_cons = TX_RING_IDX(sw_cons);
- tx_buf = &bp->tx_buf_ring[sw_ring_cons];
+ tx_buf = &txr->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb;
/* partial BD completions possible with TSO packets */
@@ -2522,7 +2630,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
pci_unmap_page(bp->pdev,
pci_unmap_addr(
- &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
+ &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
@@ -2538,44 +2646,46 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
- bnapi->hw_tx_cons = hw_cons;
- bnapi->tx_cons = sw_cons;
+ txr->hw_tx_cons = hw_cons;
+ txr->tx_cons = sw_cons;
+
/* Need to make the tx_cons update visible to bnx2_start_xmit()
- * before checking for netif_queue_stopped(). Without the
+ * before checking for netif_tx_queue_stopped(). Without the
* memory barrier, there is a small possibility that bnx2_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
- if (unlikely(netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
- netif_tx_lock(bp->dev);
- if ((netif_queue_stopped(bp->dev)) &&
- (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
- netif_wake_queue(bp->dev);
- netif_tx_unlock(bp->dev);
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
+
return tx_pkt;
}
static void
-bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
struct sk_buff *skb, int count)
{
struct sw_pg *cons_rx_pg, *prod_rx_pg;
struct rx_bd *cons_bd, *prod_bd;
dma_addr_t mapping;
int i;
- u16 hw_prod = bnapi->rx_pg_prod, prod;
- u16 cons = bnapi->rx_pg_cons;
+ u16 hw_prod = rxr->rx_pg_prod, prod;
+ u16 cons = rxr->rx_pg_cons;
for (i = 0; i < count; i++) {
prod = RX_PG_RING_IDX(hw_prod);
- prod_rx_pg = &bp->rx_pg_ring[prod];
- cons_rx_pg = &bp->rx_pg_ring[cons];
- cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
- prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_rx_pg = &rxr->rx_pg_ring[prod];
+ cons_rx_pg = &rxr->rx_pg_ring[cons];
+ cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
if (i == 0 && skb) {
struct page *page;
@@ -2604,25 +2714,25 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
hw_prod = NEXT_RX_BD(hw_prod);
}
- bnapi->rx_pg_prod = hw_prod;
- bnapi->rx_pg_cons = cons;
+ rxr->rx_pg_prod = hw_prod;
+ rxr->rx_pg_cons = cons;
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
- u16 cons, u16 prod)
+bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ struct sk_buff *skb, u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
- cons_rx_buf = &bp->rx_buf_ring[cons];
- prod_rx_buf = &bp->rx_buf_ring[prod];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
pci_unmap_addr(cons_rx_buf, mapping),
- bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
- bnapi->rx_prod_bseq += bp->rx_buf_use_size;
+ rxr->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
@@ -2632,33 +2742,33 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
pci_unmap_addr_set(prod_rx_buf, mapping,
pci_unmap_addr(cons_rx_buf, mapping));
- cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
- prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
- err = bnx2_alloc_rx_skb(bp, bnapi, prod);
+ err = bnx2_alloc_rx_skb(bp, rxr, prod);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
- bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
return err;
}
- skb_reserve(skb, bp->rx_offset);
+ skb_reserve(skb, BNX2_RX_OFFSET);
pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -2668,8 +2778,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
- u16 pg_cons = bnapi->rx_pg_cons;
- u16 pg_prod = bnapi->rx_pg_prod;
+ u16 pg_cons = rxr->rx_pg_cons;
+ u16 pg_prod = rxr->rx_pg_prod;
frag_size = len + 4 - hdr_len;
pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
@@ -2680,9 +2790,9 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
if (unlikely(frag_len <= 4)) {
unsigned int tail = 4 - frag_len;
- bnapi->rx_pg_cons = pg_cons;
- bnapi->rx_pg_prod = pg_prod;
- bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
+ rxr->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
pages - i);
skb->len -= tail;
if (i == 0) {
@@ -2696,7 +2806,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
}
return 0;
}
- rx_pg = &bp->rx_pg_ring[pg_cons];
+ rx_pg = &rxr->rx_pg_ring[pg_cons];
pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
PAGE_SIZE, PCI_DMA_FROMDEVICE);
@@ -2707,11 +2817,12 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
rx_pg->page = NULL;
- err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
+ err = bnx2_alloc_rx_page(bp, rxr,
+ RX_PG_RING_IDX(pg_prod));
if (unlikely(err)) {
- bnapi->rx_pg_cons = pg_cons;
- bnapi->rx_pg_prod = pg_prod;
- bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
+ rxr->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, rxr, skb,
pages - i);
return err;
}
@@ -2724,8 +2835,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
pg_prod = NEXT_RX_BD(pg_prod);
pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
}
- bnapi->rx_pg_prod = pg_prod;
- bnapi->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ rxr->rx_pg_cons = pg_cons;
}
return 0;
}
@@ -2733,8 +2844,11 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
static inline u16
bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
{
- u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
+ u16 cons;
+ /* Tell compiler that status block fields can change. */
+ barrier();
+ cons = *bnapi->hw_rx_cons_ptr;
if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
cons++;
return cons;
@@ -2743,13 +2857,14 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
static int
bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
{
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
- sw_cons = bnapi->rx_cons;
- sw_prod = bnapi->rx_prod;
+ sw_cons = rxr->rx_cons;
+ sw_prod = rxr->rx_prod;
/* Memory barrier necessary as speculative reads of the rx
* buffer can be ahead of the index in the status block
@@ -2765,7 +2880,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
- rx_buf = &bp->rx_buf_ring[sw_ring_cons];
+ rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
rx_buf->skb = NULL;
@@ -2773,7 +2888,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_addr = pci_unmap_addr(rx_buf, mapping);
pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
- bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
+ PCI_DMA_FROMDEVICE);
rx_hdr = (struct l2_fhdr *) skb->data;
len = rx_hdr->l2_fhdr_pkt_len;
@@ -2785,7 +2901,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
- bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
+ bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
@@ -2805,22 +2921,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
new_skb = netdev_alloc_skb(bp->dev, len + 2);
if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
+ bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
/* aligned copy */
- skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
+ skb_copy_from_linear_data_offset(skb,
+ BNX2_RX_OFFSET - 2,
new_skb->data, len + 2);
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
- bnx2_reuse_rx_skb(bp, bnapi, skb,
+ bnx2_reuse_rx_skb(bp, rxr, skb,
sw_ring_cons, sw_ring_prod);
skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
+ } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
goto next_rx;
@@ -2869,16 +2986,15 @@ next_rx:
rmb();
}
}
- bnapi->rx_cons = sw_cons;
- bnapi->rx_prod = sw_prod;
+ rxr->rx_cons = sw_cons;
+ rxr->rx_prod = sw_prod;
if (pg_ring_used)
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
- bnapi->rx_pg_prod);
+ REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
+ REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
- REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
+ REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
mmiowb();
@@ -2892,11 +3008,11 @@ next_rx:
static irqreturn_t
bnx2_msi(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct net_device *dev = bp->dev;
- prefetch(bnapi->status_blk);
+ prefetch(bnapi->status_blk.msi);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
@@ -2913,11 +3029,11 @@ bnx2_msi(int irq, void *dev_instance)
static irqreturn_t
bnx2_msi_1shot(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct net_device *dev = bp->dev;
- prefetch(bnapi->status_blk);
+ prefetch(bnapi->status_blk.msi);
/* Return here if interrupt is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -2931,10 +3047,10 @@ bnx2_msi_1shot(int irq, void *dev_instance)
static irqreturn_t
bnx2_interrupt(int irq, void *dev_instance)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
- struct status_block *sblk = bnapi->status_blk;
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct net_device *dev = bp->dev;
+ struct status_block *sblk = bnapi->status_blk.msi;
/* When using INTx, it is possible for the interrupt to arrive
* at the CPU before the status block posted prior to the
@@ -2968,21 +3084,16 @@ bnx2_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static irqreturn_t
-bnx2_tx_msix(int irq, void *dev_instance)
+static inline int
+bnx2_has_fast_work(struct bnx2_napi *bnapi)
{
- struct net_device *dev = dev_instance;
- struct bnx2 *bp = netdev_priv(dev);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
-
- prefetch(bnapi->status_blk_msix);
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
- /* Return here if interrupt is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
-
- netif_rx_schedule(dev, &bnapi->napi);
- return IRQ_HANDLED;
+ if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
+ (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+ return 1;
+ return 0;
}
#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
@@ -2991,10 +3102,9 @@ bnx2_tx_msix(int irq, void *dev_instance)
static inline int
bnx2_has_work(struct bnx2_napi *bnapi)
{
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
- if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
- (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
+ if (bnx2_has_fast_work(bnapi))
return 1;
if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
@@ -3004,33 +3114,9 @@ bnx2_has_work(struct bnx2_napi *bnapi)
return 0;
}
-static int bnx2_tx_poll(struct napi_struct *napi, int budget)
-{
- struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
- struct bnx2 *bp = bnapi->bp;
- int work_done = 0;
- struct status_block_msix *sblk = bnapi->status_blk_msix;
-
- do {
- work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
- if (unlikely(work_done >= budget))
- return work_done;
-
- bnapi->last_status_idx = sblk->status_idx;
- rmb();
- } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
-
- netif_rx_complete(bp->dev, napi);
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bnapi->last_status_idx);
- return work_done;
-}
-
-static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
- int work_done, int budget)
+static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
u32 status_attn_bits = sblk->status_attn_bits;
u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
@@ -3046,24 +3132,60 @@ static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
REG_RD(bp, BNX2_HC_COMMAND);
}
+}
- if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+ int work_done, int budget)
+{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
bnx2_tx_int(bp, bnapi, 0);
- if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
+ if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
return work_done;
}
+static int bnx2_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
+ int work_done = 0;
+ struct status_block_msix *sblk = bnapi->status_blk.msix;
+
+ while (1) {
+ work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+ if (unlikely(work_done >= budget))
+ break;
+
+ bnapi->last_status_idx = sblk->status_idx;
+ /* status idx must be read before checking for more work. */
+ rmb();
+ if (likely(!bnx2_has_fast_work(bnapi))) {
+
+ netif_rx_complete(bp->dev, napi);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ break;
+ }
+ }
+ return work_done;
+}
+
static int bnx2_poll(struct napi_struct *napi, int budget)
{
struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
struct bnx2 *bp = bnapi->bp;
int work_done = 0;
- struct status_block *sblk = bnapi->status_blk;
+ struct status_block *sblk = bnapi->status_blk.msi;
while (1) {
+ bnx2_poll_link(bp, bnapi);
+
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
if (unlikely(work_done >= budget))
@@ -3106,6 +3228,7 @@ bnx2_set_rx_mode(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
u32 rx_mode, sort_mode;
+ struct dev_addr_list *uc_ptr;
int i;
spin_lock_bh(&bp->phy_lock);
@@ -3161,6 +3284,25 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
+ uc_ptr = NULL;
+ if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
+ rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+ sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+ BNX2_RPM_SORT_USER0_PROM_VLAN;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ uc_ptr = dev->uc_list;
+
+ /* Add all entries into to the match filter list */
+ for (i = 0; i < dev->uc_count; i++) {
+ bnx2_set_mac_addr(bp, uc_ptr->da_addr,
+ i + BNX2_START_UNICAST_ADDRESS_INDEX);
+ sort_mode |= (1 <<
+ (i + BNX2_START_UNICAST_ADDRESS_INDEX));
+ uc_ptr = uc_ptr->next;
+ }
+
+ }
+
if (rx_mode != bp->rx_mode) {
bp->rx_mode = rx_mode;
REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
@@ -3213,7 +3355,7 @@ load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
}
static int
-load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
+load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
{
u32 offset;
u32 val;
@@ -3297,7 +3439,6 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
static int
bnx2_init_cpus(struct bnx2 *bp)
{
- struct cpu_reg cpu_reg;
struct fw_info *fw;
int rc, rv2p_len;
void *text, *rv2p;
@@ -3333,122 +3474,57 @@ bnx2_init_cpus(struct bnx2 *bp)
load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
/* Initialize the RX Processor. */
- cpu_reg.mode = BNX2_RXP_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_RXP_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_RXP_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_rxp_fw_09;
else
fw = &bnx2_rxp_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
if (rc)
goto init_cpu_err;
/* Initialize the TX Processor. */
- cpu_reg.mode = BNX2_TXP_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_TXP_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_TXP_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_txp_fw_09;
else
fw = &bnx2_txp_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
if (rc)
goto init_cpu_err;
/* Initialize the TX Patch-up Processor. */
- cpu_reg.mode = BNX2_TPAT_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_TPAT_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_tpat_fw_09;
else
fw = &bnx2_tpat_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
if (rc)
goto init_cpu_err;
/* Initialize the Completion Processor. */
- cpu_reg.mode = BNX2_COM_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_COM_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_COM_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_com_fw_09;
else
fw = &bnx2_com_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_com, fw);
if (rc)
goto init_cpu_err;
/* Initialize the Command Processor. */
- cpu_reg.mode = BNX2_CP_CPU_MODE;
- cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
- cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
- cpu_reg.state = BNX2_CP_CPU_STATE;
- cpu_reg.state_value_clear = 0xffffff;
- cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
- cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
- cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
- cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
- cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
- cpu_reg.spad_base = BNX2_CP_SCRATCH;
- cpu_reg.mips_view_base = 0x8000000;
-
if (CHIP_NUM(bp) == CHIP_NUM_5709)
fw = &bnx2_cp_fw_09;
else
fw = &bnx2_cp_fw_06;
fw->text = text;
- rc = load_cpu_fw(bp, &cpu_reg, fw);
+ rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
init_cpu_err:
vfree(text);
@@ -3511,7 +3587,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
bp->autoneg = autoneg;
bp->advertising = advertising;
- bnx2_set_mac_addr(bp);
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
val = REG_RD(bp, BNX2_EMAC_MODE);
@@ -3562,7 +3638,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
}
if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
+ 1, 0);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
@@ -4203,35 +4280,43 @@ nvram_write_end:
}
static void
-bnx2_init_remote_phy(struct bnx2 *bp)
+bnx2_init_fw_cap(struct bnx2 *bp)
{
- u32 val;
+ u32 val, sig = 0;
bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
- if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
- return;
+ bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
+
+ if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
return;
- if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
+ if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
+ bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+ sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
+ }
+
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
+ u32 link;
+
bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
- val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
- if (val & BNX2_LINK_STATUS_SERDES_LINK)
+ link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+ if (link & BNX2_LINK_STATUS_SERDES_LINK)
bp->phy_port = PORT_FIBRE;
else
bp->phy_port = PORT_TP;
- if (netif_running(bp->dev)) {
- u32 sig;
-
- sig = BNX2_DRV_ACK_CAP_SIGNATURE |
- BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
- bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
- }
+ sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
+ BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
}
+
+ if (netif_running(bp->dev) && sig)
+ bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
}
static void
@@ -4261,7 +4346,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
udelay(5);
/* Wait for the firmware to tell us it is ok to issue a reset. */
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
/* Deposit a driver reset signature so the firmware knows that
* this is a soft reset. */
@@ -4322,13 +4407,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
}
/* Wait for the firmware to finish its initialization. */
- rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
+ rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
if (rc)
return rc;
spin_lock_bh(&bp->phy_lock);
old_port = bp->phy_port;
- bnx2_init_remote_phy(bp);
+ bnx2_init_fw_cap(bp);
if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
old_port != bp->phy_port)
bnx2_set_default_remote_link(bp);
@@ -4412,7 +4497,7 @@ bnx2_init_chip(struct bnx2 *bp)
bnx2_init_nvram(bp);
- bnx2_set_mac_addr(bp);
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
val = REG_RD(bp, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
@@ -4498,15 +4583,25 @@ bnx2_init_chip(struct bnx2 *bp)
BNX2_HC_CONFIG_COLLECT_STATS;
}
- if (bp->flags & BNX2_FLAG_USING_MSIX) {
- u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
- BNX2_HC_SB_CONFIG_1;
-
+ if (bp->irq_nvecs > 1) {
REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
BNX2_HC_MSIX_BIT_VECTOR_VAL);
+ val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+ }
+
+ if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
+ val |= BNX2_HC_CONFIG_ONE_SHOT;
+
+ REG_WR(bp, BNX2_HC_CONFIG, val);
+
+ for (i = 1; i < bp->irq_nvecs; i++) {
+ u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+ BNX2_HC_SB_CONFIG_1;
+
REG_WR(bp, base,
BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+ BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
BNX2_HC_SB_CONFIG_1_ONE_SHOT);
REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
@@ -4516,13 +4611,13 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
(bp->tx_ticks_int << 16) | bp->tx_ticks);
- val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
- }
+ REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+ (bp->rx_quick_cons_trip_int << 16) |
+ bp->rx_quick_cons_trip);
- if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
- val |= BNX2_HC_CONFIG_ONE_SHOT;
-
- REG_WR(bp, BNX2_HC_CONFIG, val);
+ REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+ (bp->rx_ticks_int << 16) | bp->rx_ticks);
+ }
/* Clear internal stats counters. */
REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
@@ -4538,7 +4633,7 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
}
rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
- 0);
+ 1, 0);
REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
@@ -4554,23 +4649,27 @@ static void
bnx2_clear_ring_states(struct bnx2 *bp)
{
struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+ struct bnx2_rx_ring_info *rxr;
int i;
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
bnapi = &bp->bnx2_napi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
- bnapi->tx_cons = 0;
- bnapi->hw_tx_cons = 0;
- bnapi->rx_prod_bseq = 0;
- bnapi->rx_prod = 0;
- bnapi->rx_cons = 0;
- bnapi->rx_pg_prod = 0;
- bnapi->rx_pg_cons = 0;
+ txr->tx_cons = 0;
+ txr->hw_tx_cons = 0;
+ rxr->rx_prod_bseq = 0;
+ rxr->rx_prod = 0;
+ rxr->rx_cons = 0;
+ rxr->rx_pg_prod = 0;
+ rxr->rx_pg_cons = 0;
}
}
static void
-bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
{
u32 val, offset0, offset1, offset2, offset3;
u32 cid_addr = GET_CID_ADDR(cid);
@@ -4592,43 +4691,43 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
bnx2_ctx_wr(bp, cid_addr, offset1, val);
- val = (u64) bp->tx_desc_mapping >> 32;
+ val = (u64) txr->tx_desc_mapping >> 32;
bnx2_ctx_wr(bp, cid_addr, offset2, val);
- val = (u64) bp->tx_desc_mapping & 0xffffffff;
+ val = (u64) txr->tx_desc_mapping & 0xffffffff;
bnx2_ctx_wr(bp, cid_addr, offset3, val);
}
static void
-bnx2_init_tx_ring(struct bnx2 *bp)
+bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
{
struct tx_bd *txbd;
u32 cid = TX_CID;
struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
- bp->tx_vec = 0;
- if (bp->flags & BNX2_FLAG_USING_MSIX) {
- cid = TX_TSS_CID;
- bp->tx_vec = BNX2_TX_VEC;
- REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
- (TX_TSS_CID << 7));
- }
- bnapi = &bp->bnx2_napi[bp->tx_vec];
+ bnapi = &bp->bnx2_napi[ring_num];
+ txr = &bnapi->tx_ring;
+
+ if (ring_num == 0)
+ cid = TX_CID;
+ else
+ cid = TX_TSS_CID + ring_num - 1;
bp->tx_wake_thresh = bp->tx_ring_size / 2;
- txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
+ txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
- txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
- txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
+ txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
+ txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
- bp->tx_prod = 0;
- bp->tx_prod_bseq = 0;
+ txr->tx_prod = 0;
+ txr->tx_prod_bseq = 0;
- bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
- bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
+ txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+ txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
- bnx2_init_tx_context(bp, cid);
+ bnx2_init_tx_context(bp, cid, txr);
}
static void
@@ -4656,17 +4755,25 @@ bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
}
static void
-bnx2_init_rx_ring(struct bnx2 *bp)
+bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
{
int i;
u16 prod, ring_prod;
- u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
- struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ u32 cid, rx_cid_addr, val;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
- bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
+ if (ring_num == 0)
+ cid = RX_CID;
+ else
+ cid = RX_RSS_CID + ring_num - 1;
+
+ rx_cid_addr = GET_CID_ADDR(cid);
+
+ bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
bp->rx_buf_use_size, bp->rx_max_ring);
- bnx2_init_rx_context0(bp);
+ bnx2_init_rx_context(bp, cid);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
@@ -4675,54 +4782,101 @@ bnx2_init_rx_ring(struct bnx2 *bp)
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
if (bp->rx_pg_ring_size) {
- bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
- bp->rx_pg_desc_mapping,
+ bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
+ rxr->rx_pg_desc_mapping,
PAGE_SIZE, bp->rx_max_pg_ring);
val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
- BNX2_L2CTX_RBDC_JUMBO_KEY);
+ BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
- val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
+ val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
- val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
+ val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
if (CHIP_NUM(bp) == CHIP_NUM_5709)
REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
}
- val = (u64) bp->rx_desc_mapping[0] >> 32;
+ val = (u64) rxr->rx_desc_mapping[0] >> 32;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
- val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
+ val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
- ring_prod = prod = bnapi->rx_pg_prod;
+ ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
- if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
+ if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
break;
prod = NEXT_RX_BD(prod);
ring_prod = RX_PG_RING_IDX(prod);
}
- bnapi->rx_pg_prod = prod;
+ rxr->rx_pg_prod = prod;
- ring_prod = prod = bnapi->rx_prod;
+ ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
+ if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
break;
- }
prod = NEXT_RX_BD(prod);
ring_prod = RX_RING_IDX(prod);
}
- bnapi->rx_prod = prod;
+ rxr->rx_prod = prod;
+
+ rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
+ rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
+ rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
- bnapi->rx_pg_prod);
- REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
+ REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+ REG_WR16(bp, rxr->rx_bidx_addr, prod);
- REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
+ REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+}
+
+static void
+bnx2_init_all_rings(struct bnx2 *bp)
+{
+ int i;
+ u32 val;
+
+ bnx2_clear_ring_states(bp);
+
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
+ for (i = 0; i < bp->num_tx_rings; i++)
+ bnx2_init_tx_ring(bp, i);
+
+ if (bp->num_tx_rings > 1)
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
+ (TX_TSS_CID << 7));
+
+ REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+ bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
+
+ for (i = 0; i < bp->num_rx_rings; i++)
+ bnx2_init_rx_ring(bp, i);
+
+ if (bp->num_rx_rings > 1) {
+ u32 tbl_32;
+ u8 *tbl = (u8 *) &tbl_32;
+
+ bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
+ BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
+
+ for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
+ tbl[i % 4] = i % (bp->num_rx_rings - 1);
+ if ((i % 4) == 3)
+ bnx2_reg_wr_ind(bp,
+ BNX2_RXP_SCRATCH_RSS_TBL + i,
+ cpu_to_be32(tbl_32));
+ }
+
+ val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
+ BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
+
+ REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+
+ }
}
static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
@@ -4750,12 +4904,12 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
u32 rx_size, rx_space, jumbo_size;
/* 8 for CRC and VLAN */
- rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
+ rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
sizeof(struct skb_shared_info);
- bp->rx_copy_thresh = RX_COPY_THRESH;
+ bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
bp->rx_pg_ring_size = 0;
bp->rx_max_pg_ring = 0;
bp->rx_max_pg_ring_idx = 0;
@@ -4770,14 +4924,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
MAX_RX_PG_RINGS);
bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
- rx_size = RX_COPY_THRESH + bp->rx_offset;
+ rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
bp->rx_copy_thresh = 0;
}
bp->rx_buf_use_size = rx_size;
/* hw alignment */
bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
- bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
+ bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
@@ -4788,36 +4942,42 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
{
int i;
- if (bp->tx_buf_ring == NULL)
- return;
-
- for (i = 0; i < TX_DESC_CNT; ) {
- struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
- struct sk_buff *skb = tx_buf->skb;
- int j, last;
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ int j;
- if (skb == NULL) {
- i++;
+ if (txr->tx_buf_ring == NULL)
continue;
- }
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ for (j = 0; j < TX_DESC_CNT; ) {
+ struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (skb == NULL) {
+ j++;
+ continue;
+ }
+
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
- tx_buf->skb = NULL;
+ tx_buf->skb = NULL;
- last = skb_shinfo(skb)->nr_frags;
- for (j = 0; j < last; j++) {
- tx_buf = &bp->tx_buf_ring[i + j + 1];
- pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[j].size,
- PCI_DMA_TODEVICE);
+ last = skb_shinfo(skb)->nr_frags;
+ for (k = 0; k < last; k++) {
+ tx_buf = &txr->tx_buf_ring[j + k + 1];
+ pci_unmap_page(bp->pdev,
+ pci_unmap_addr(tx_buf, mapping),
+ skb_shinfo(skb)->frags[j].size,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ j += k + 1;
}
- dev_kfree_skb(skb);
- i += j + 1;
}
-
}
static void
@@ -4825,25 +4985,33 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
{
int i;
- if (bp->rx_buf_ring == NULL)
- return;
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
- for (i = 0; i < bp->rx_max_ring_idx; i++) {
- struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ if (rxr->rx_buf_ring == NULL)
+ return;
- if (skb == NULL)
- continue;
+ for (j = 0; j < bp->rx_max_ring_idx; j++) {
+ struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
+ struct sk_buff *skb = rx_buf->skb;
- pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+ if (skb == NULL)
+ continue;
- rx_buf->skb = NULL;
+ pci_unmap_single(bp->pdev,
+ pci_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
- dev_kfree_skb(skb);
+ rx_buf->skb = NULL;
+
+ dev_kfree_skb(skb);
+ }
+ for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
+ bnx2_free_rx_page(bp, rxr, j);
}
- for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
- bnx2_free_rx_page(bp, i);
}
static void
@@ -4866,14 +5034,12 @@ bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
if ((rc = bnx2_init_chip(bp)) != 0)
return rc;
- bnx2_clear_ring_states(bp);
- bnx2_init_tx_ring(bp);
- bnx2_init_rx_ring(bp);
+ bnx2_init_all_rings(bp);
return 0;
}
static int
-bnx2_init_nic(struct bnx2 *bp)
+bnx2_init_nic(struct bnx2 *bp, int reset_phy)
{
int rc;
@@ -4881,7 +5047,7 @@ bnx2_init_nic(struct bnx2 *bp)
return rc;
spin_lock_bh(&bp->phy_lock);
- bnx2_init_phy(bp);
+ bnx2_init_phy(bp, reset_phy);
bnx2_set_link(bp);
if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
bnx2_remote_phy_event(bp);
@@ -5141,11 +5307,13 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
tx_napi = bnapi;
- if (bp->flags & BNX2_FLAG_USING_MSIX)
- tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
+ txr = &tx_napi->tx_ring;
+ rxr = &bnapi->rx_ring;
if (loopback_mode == BNX2_MAC_LOOPBACK) {
bp->loopback = MAC_LOOPBACK;
bnx2_set_mac_loopback(bp);
@@ -5183,7 +5351,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
num_pkts = 0;
- txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
+ txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
txbd->tx_bd_haddr_hi = (u64) map >> 32;
txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5191,11 +5359,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
num_pkts++;
- bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
- bp->tx_prod_bseq += pkt_size;
+ txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
+ txr->tx_prod_bseq += pkt_size;
- REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
- REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
+ REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
+ REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
udelay(100);
@@ -5209,7 +5377,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
+ if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
goto loopback_test_done;
rx_idx = bnx2_get_hw_rx_cons(bnapi);
@@ -5217,11 +5385,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
goto loopback_test_done;
}
- rx_buf = &bp->rx_buf_ring[rx_start_idx];
+ rx_buf = &rxr->rx_buf_ring[rx_start_idx];
rx_skb = rx_buf->skb;
rx_hdr = (struct l2_fhdr *) rx_skb->data;
- skb_reserve(rx_skb, bp->rx_offset);
+ skb_reserve(rx_skb, BNX2_RX_OFFSET);
pci_dma_sync_single_for_cpu(bp->pdev,
pci_unmap_addr(rx_buf, mapping),
@@ -5269,7 +5437,7 @@ bnx2_test_loopback(struct bnx2 *bp)
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
spin_lock_bh(&bp->phy_lock);
- bnx2_init_phy(bp);
+ bnx2_init_phy(bp, 1);
spin_unlock_bh(&bp->phy_lock);
if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
rc |= BNX2_MAC_LOOPBACK_FAILED;
@@ -5531,7 +5699,6 @@ bnx2_restart_timer:
static int
bnx2_request_irq(struct bnx2 *bp)
{
- struct net_device *dev = bp->dev;
unsigned long flags;
struct bnx2_irq *irq;
int rc = 0, i;
@@ -5544,7 +5711,7 @@ bnx2_request_irq(struct bnx2 *bp)
for (i = 0; i < bp->irq_nvecs; i++) {
irq = &bp->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
- dev);
+ &bp->bnx2_napi[i]);
if (rc)
break;
irq->requested = 1;
@@ -5555,14 +5722,13 @@ bnx2_request_irq(struct bnx2 *bp)
static void
bnx2_free_irq(struct bnx2 *bp)
{
- struct net_device *dev = bp->dev;
struct bnx2_irq *irq;
int i;
for (i = 0; i < bp->irq_nvecs; i++) {
irq = &bp->irq_tbl[i];
if (irq->requested)
- free_irq(irq->vector, dev);
+ free_irq(irq->vector, &bp->bnx2_napi[i]);
irq->requested = 0;
}
if (bp->flags & BNX2_FLAG_USING_MSI)
@@ -5574,7 +5740,7 @@ bnx2_free_irq(struct bnx2 *bp)
}
static void
-bnx2_enable_msix(struct bnx2 *bp)
+bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
{
int i, rc;
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
@@ -5587,21 +5753,16 @@ bnx2_enable_msix(struct bnx2 *bp)
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
+
+ strcpy(bp->irq_tbl[i].name, bp->dev->name);
+ bp->irq_tbl[i].handler = bnx2_msi_1shot;
}
rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
if (rc != 0)
return;
- bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
- bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
-
- strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
- strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
- strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
- strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
-
- bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
+ bp->irq_nvecs = msix_vecs;
bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
bp->irq_tbl[i].vector = msix_ent[i].vector;
@@ -5610,13 +5771,16 @@ bnx2_enable_msix(struct bnx2 *bp)
static void
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
+ int cpus = num_online_cpus();
+ int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
+
bp->irq_tbl[0].handler = bnx2_interrupt;
strcpy(bp->irq_tbl[0].name, bp->dev->name);
bp->irq_nvecs = 1;
bp->irq_tbl[0].vector = bp->pdev->irq;
- if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
- bnx2_enable_msix(bp);
+ if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
+ bnx2_enable_msix(bp, msix_vecs);
if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
!(bp->flags & BNX2_FLAG_USING_MSIX)) {
@@ -5631,6 +5795,11 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
bp->irq_tbl[0].vector = bp->pdev->irq;
}
}
+
+ bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+ bp->dev->real_num_tx_queues = bp->num_tx_rings;
+
+ bp->num_rx_rings = bp->irq_nvecs;
}
/* Called with rtnl_lock */
@@ -5645,29 +5814,19 @@ bnx2_open(struct net_device *dev)
bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp);
+ bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_napi_enable(bp);
rc = bnx2_alloc_mem(bp);
if (rc)
- return rc;
+ goto open_err;
- bnx2_setup_int_mode(bp, disable_msi);
- bnx2_napi_enable(bp);
rc = bnx2_request_irq(bp);
+ if (rc)
+ goto open_err;
- if (rc) {
- bnx2_napi_disable(bp);
- bnx2_free_mem(bp);
- return rc;
- }
-
- rc = bnx2_init_nic(bp);
-
- if (rc) {
- bnx2_napi_disable(bp);
- bnx2_free_irq(bp);
- bnx2_free_skbs(bp);
- bnx2_free_mem(bp);
- return rc;
- }
+ rc = bnx2_init_nic(bp, 1);
+ if (rc)
+ goto open_err;
mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -5691,17 +5850,14 @@ bnx2_open(struct net_device *dev)
bnx2_setup_int_mode(bp, 1);
- rc = bnx2_init_nic(bp);
+ rc = bnx2_init_nic(bp, 0);
if (!rc)
rc = bnx2_request_irq(bp);
if (rc) {
- bnx2_napi_disable(bp);
- bnx2_free_skbs(bp);
- bnx2_free_mem(bp);
del_timer_sync(&bp->timer);
- return rc;
+ goto open_err;
}
bnx2_enable_int(bp);
}
@@ -5711,9 +5867,16 @@ bnx2_open(struct net_device *dev)
else if (bp->flags & BNX2_FLAG_USING_MSIX)
printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
+
+open_err:
+ bnx2_napi_disable(bp);
+ bnx2_free_skbs(bp);
+ bnx2_free_irq(bp);
+ bnx2_free_mem(bp);
+ return rc;
}
static void
@@ -5726,7 +5889,7 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp);
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 1);
atomic_set(&bp->intr_sem, 1);
bnx2_netif_start(bp);
@@ -5752,6 +5915,8 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
bp->vlgrp = vlgrp;
bnx2_set_rx_mode(dev);
+ if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
bnx2_netif_start(bp);
}
@@ -5771,18 +5936,26 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 len, vlan_tag_flags, last_frag, mss;
u16 prod, ring_prod;
int i;
- struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
+ struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+ struct netdev_queue *txq;
- if (unlikely(bnx2_tx_avail(bp, bnapi) <
+ /* Determine which tx ring we will be placed on */
+ i = skb_get_queue_mapping(skb);
+ bnapi = &bp->bnx2_napi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+
+ if (unlikely(bnx2_tx_avail(bp, txr) <
(skb_shinfo(skb)->nr_frags + 1))) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
dev->name);
return NETDEV_TX_BUSY;
}
len = skb_headlen(skb);
- prod = bp->tx_prod;
+ prod = txr->tx_prod;
ring_prod = TX_RING_IDX(prod);
vlan_tag_flags = 0;
@@ -5844,11 +6017,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- tx_buf = &bp->tx_buf_ring[ring_prod];
+ tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
pci_unmap_addr_set(tx_buf, mapping, mapping);
- txbd = &bp->tx_desc_ring[ring_prod];
+ txbd = &txr->tx_desc_ring[ring_prod];
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -5862,12 +6035,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
- txbd = &bp->tx_desc_ring[ring_prod];
+ txbd = &txr->tx_desc_ring[ring_prod];
len = frag->size;
mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
+ pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
mapping, mapping);
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -5879,20 +6052,20 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
prod = NEXT_TX_BD(prod);
- bp->tx_prod_bseq += skb->len;
+ txr->tx_prod_bseq += skb->len;
- REG_WR16(bp, bp->tx_bidx_addr, prod);
- REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
+ REG_WR16(bp, txr->tx_bidx_addr, prod);
+ REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
mmiowb();
- bp->tx_prod = prod;
+ txr->tx_prod = prod;
dev->trans_start = jiffies;
- if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
- netif_stop_queue(dev);
- if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
- netif_wake_queue(dev);
+ if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
+ netif_tx_stop_queue(txq);
+ if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
}
return NETDEV_TX_OK;
@@ -6095,6 +6268,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
!(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
goto err_out_unlock;
+ /* If device is down, we can store the settings only if the user
+ * is setting the currently active port.
+ */
+ if (!netif_running(dev) && cmd->port != bp->phy_port)
+ goto err_out_unlock;
+
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
@@ -6152,7 +6331,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->req_line_speed = req_line_speed;
bp->req_duplex = req_duplex;
- err = bnx2_setup_phy(bp, cmd->port);
+ err = 0;
+ /* If device is down, the new settings will be picked up when it is
+ * brought up.
+ */
+ if (netif_running(dev))
+ err = bnx2_setup_phy(bp, cmd->port);
err_out_unlock:
spin_unlock_bh(&bp->phy_lock);
@@ -6414,7 +6598,7 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 0);
bnx2_netif_start(bp);
}
@@ -6457,7 +6641,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
rc = bnx2_alloc_mem(bp);
if (rc)
return rc;
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 0);
bnx2_netif_start(bp);
}
return 0;
@@ -6725,7 +6909,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
}
else {
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 1);
bnx2_netif_start(bp);
}
@@ -6951,7 +7135,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
- bnx2_set_mac_addr(bp);
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
return 0;
}
@@ -7108,6 +7292,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
}
pci_set_master(pdev);
+ pci_save_state(pdev);
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
@@ -7125,7 +7310,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
- mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
+ mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq;
@@ -7272,7 +7457,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg &= BNX2_CONDITION_MFW_RUN_MASK;
if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
reg != BNX2_CONDITION_MFW_RUN_NONE) {
- int i;
u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
bp->fw_version[j++] = ' ';
@@ -7294,8 +7478,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[4] = (u8) (reg >> 8);
bp->mac_addr[5] = (u8) reg;
- bp->rx_offset = sizeof(struct l2_fhdr) + 2;
-
bp->tx_ring_size = MAX_TX_DESC_CNT;
bnx2_set_rx_ring_size(bp, 255);
@@ -7345,8 +7527,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
}
- bnx2_init_remote_phy(bp);
-
} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
CHIP_NUM(bp) == CHIP_NUM_5708)
bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
@@ -7355,6 +7535,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
CHIP_REV(bp) == CHIP_REV_Bx))
bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
+ bnx2_init_fw_cap(bp);
+
if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B0) ||
(CHIP_ID(bp) == CHIP_ID_5708_B1)) {
@@ -7451,15 +7633,19 @@ static void __devinit
bnx2_init_napi(struct bnx2 *bp)
{
int i;
- struct bnx2_napi *bnapi;
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
- bnapi = &bp->bnx2_napi[i];
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ int (*poll)(struct napi_struct *, int);
+
+ if (i == 0)
+ poll = bnx2_poll;
+ else
+ poll = bnx2_poll_msix;
+
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
bnapi->bp = bp;
}
- netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
- netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
- 64);
}
static int __devinit
@@ -7476,7 +7662,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO "%s", version);
/* dev zeroed in init_etherdev */
- dev = alloc_etherdev(sizeof(*bp));
+ dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
if (!dev)
return -ENOMEM;
@@ -7491,7 +7677,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hard_start_xmit = bnx2_start_xmit;
dev->stop = bnx2_close;
dev->get_stats = bnx2_get_stats;
- dev->set_multicast_list = bnx2_set_rx_mode;
+ dev->set_rx_mode = bnx2_set_rx_mode;
dev->do_ioctl = bnx2_ioctl;
dev->set_mac_address = bnx2_change_mac_addr;
dev->change_mtu = bnx2_change_mtu;
@@ -7612,11 +7798,97 @@ bnx2_resume(struct pci_dev *pdev)
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- bnx2_init_nic(bp);
+ bnx2_init_nic(bp, 1);
bnx2_netif_start(bp);
return 0;
}
+/**
+ * bnx2_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (netif_running(dev)) {
+ bnx2_netif_stop(bp);
+ del_timer_sync(&bp->timer);
+ bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+ }
+
+ pci_disable_device(pdev);
+ rtnl_unlock();
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev)) {
+ bnx2_set_power_state(bp, PCI_D0);
+ bnx2_init_nic(bp, 1);
+ }
+
+ rtnl_unlock();
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ if (netif_running(dev))
+ bnx2_netif_start(bp);
+
+ netif_device_attach(dev);
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2_err_handler = {
+ .error_detected = bnx2_io_error_detected,
+ .slot_reset = bnx2_io_slot_reset,
+ .resume = bnx2_io_resume,
+};
+
static struct pci_driver bnx2_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bnx2_pci_tbl,
@@ -7624,6 +7896,7 @@ static struct pci_driver bnx2_pci_driver = {
.remove = __devexit_p(bnx2_remove_one),
.suspend = bnx2_suspend,
.resume = bnx2_resume,
+ .err_handler = &bnx2_err_handler,
};
static int __init bnx2_init(void)
OpenPOWER on IntegriCloud