summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1701
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c35
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c116
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c534
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h9
21 files changed, 1506 insertions, 1189 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f15e72e..4bd416b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -101,6 +101,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
+ select HWMON
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index eac2523..72897c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.51-0"
-#define DRV_MODULE_RELDATE "2012/06/18"
+#define DRV_MODULE_VERSION "1.78.00-0"
+#define DRV_MODULE_RELDATE "2012/09/27"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e97a7..30f04a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2285,7 +2285,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Wait for all pending SP commands to complete */
if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
- bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
return -EBUSY;
}
@@ -2333,7 +2333,7 @@ load_error0:
}
/* must be called with rtnl_lock */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{
int i;
bool global = false;
@@ -2395,7 +2395,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
- bnx2x_chip_cleanup(bp, unload_mode);
+ bnx2x_chip_cleanup(bp, unload_mode, keep_link);
else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
@@ -2419,7 +2419,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
- bnx2x_send_unload_done(bp);
+ bnx2x_send_unload_done(bp, false);
}
/*
@@ -3026,8 +3026,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
- mac_type);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS,
+ 0);
/* header nbd */
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
@@ -3077,13 +3078,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
&pbd_e2->dst_mac_addr_lo,
eth->h_dest);
}
+
+ SET_FLAG(pbd_e2_parsing_data,
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
} else {
+ u16 global_data = 0;
pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+ SET_FLAG(global_data,
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
+ pbd_e1x->global_data |= cpu_to_le16(global_data);
}
/* Setup the data pointer of the first BD of the packet */
@@ -3770,7 +3778,7 @@ int bnx2x_reload_if_running(struct net_device *dev)
if (unlikely(!netif_running(dev)))
return 0;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
return bnx2x_nic_load(bp, LOAD_NORMAL);
}
@@ -3967,7 +3975,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
- bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfd86a5..9c5ea6c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
* bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
*
* @bp: driver handle
+ * @keep_link: true iff link should be kept up
*/
-void bnx2x_send_unload_done(struct bnx2x *bp);
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
/**
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
@@ -153,6 +154,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
void bnx2x_link_set(struct bnx2x *bp);
/**
+ * bnx2x_force_link_reset - Forces link reset, and put the PHY
+ * in reset as well.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_force_link_reset(struct bnx2x *bp);
+
+/**
* bnx2x_link_test - query link status.
*
* @bp: driver handle
@@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp);
*
* @bp: driver handle
* @unload_mode: COMMON, PORT, FUNCTION
+ * @keep_link: true iff link should be kept up.
*
* - Cleanup MAC configuration.
* - Closes clients.
* - etc.
*/
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
/**
* bnx2x_acquire_hw_lock - acquire HW lock.
@@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
/* dev_close main block */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
/* dev_open main block */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 8a73374..2245c38 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -91,25 +91,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
/*
* Rx COS configuration
* Changing PFC RX configuration .
- * In RX COS0 will always be configured to lossy and COS1 to lossless
+ * In RX COS0 will always be configured to lossless and COS1 to lossy
*/
for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
pri_bit = 1 << i;
- if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+ if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
val |= 1 << (i * 4);
}
pfc_params.pkt_priority_to_cos = val;
/* RX COS0 */
- pfc_params.llfc_low_priority_classes = 0;
+ pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
/* RX COS1 */
- pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
-
- /* BRB configuration */
- pfc_params.cos0_pauseable = false;
- pfc_params.cos1_pauseable = true;
+ pfc_params.llfc_high_priority_classes = 0;
bnx2x_acquire_phy_lock(bp);
bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ebf40cd..c65295d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev)
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
bnx2x_link_set(bp);
}
@@ -1606,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
"register_test (offline) ",
"memory_test (offline) ",
"int_loopback_test (offline)",
@@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+ eee_cfg = bp->link_vars.eee_status;
edata->supported =
bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
@@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+ eee_cfg = bp->link_vars.eee_status;
if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
@@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Restart link to propogate changes */
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
bnx2x_link_set(bp);
}
@@ -2038,8 +2040,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 pkt_prod, bd_prod;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
- struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
- struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
dma_addr_t mapping;
union eth_rx_cqe *cqe;
u8 cqe_fp_flags, cqe_fp_type;
@@ -2131,21 +2131,32 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_ETH_ADDR_TYPE,
- UNICAST_ADDRESS);
- SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_HDR_NBDS,
1);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS,
+ 0);
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
- pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
-
- memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
-
+ if (CHIP_IS_E1x(bp)) {
+ u16 global_data = 0;
+ struct eth_tx_parse_bd_e1x *pbd_e1x =
+ &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ SET_FLAG(global_data,
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+ pbd_e1x->global_data = cpu_to_le16(global_data);
+ } else {
+ u32 parsing_data = 0;
+ struct eth_tx_parse_bd_e2 *pbd_e2 =
+ &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ SET_FLAG(parsing_data,
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+ pbd_e2->parsing_data = cpu_to_le32(parsing_data);
+ }
wmb();
txdata->tx_db.data.prod += 2;
@@ -2263,7 +2274,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
if (!netif_running(bp->dev))
return BNX2X_EXT_LOOPBACK_FAILED;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
if (rc) {
DP(BNX2X_MSG_ETHTOOL,
@@ -2414,7 +2425,7 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_DIAG);
if (rc) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -2446,7 +2457,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
}
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
@@ -2534,7 +2545,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, offset, start;
+ int i, j, k, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2570,13 +2581,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
start = 0;
else
start = 4;
- for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
- i++, j++) {
- offset = sprintf(buf+32*i, "%s",
- bnx2x_tests_str_arr[j]);
- *(buf+offset) = '\0';
- }
- break;
+ memcpy(buf, bnx2x_tests_str_arr + start,
+ ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
}
}
@@ -2940,7 +2946,7 @@ static int bnx2x_set_channels(struct net_device *dev,
bnx2x_change_num_queues(bp, channels->combined_count);
return 0;
}
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_change_num_queues(bp, channels->combined_count);
return bnx2x_nic_load(bp, LOAD_NORMAL);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index bbc66ce..620fe93 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -88,9 +88,6 @@
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
-#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
- (IRO[108].base)
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IRO[201].base + ((pfId) * IRO[201].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 76b6e65..1870492 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1286,6 +1286,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
#define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
+ #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
+
+ #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
@@ -1909,6 +1912,54 @@ struct lldp_local_mib {
};
/***END OF DCBX STRUCTURES DECLARATIONS***/
+/***********************************************************/
+/* Elink section */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+ u32 req_duplex;
+ #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
+ #define REQ_DUPLEX_PHY0_SHIFT 0
+ #define REQ_DUPLEX_PHY1_MASK 0xffff0000
+ #define REQ_DUPLEX_PHY1_SHIFT 16
+ u32 req_flow_ctrl;
+ #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
+ #define REQ_FLOW_CTRL_PHY0_SHIFT 0
+ #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
+ #define REQ_FLOW_CTRL_PHY1_SHIFT 16
+ u32 req_line_speed; /* Also determine AutoNeg */
+ #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
+ #define REQ_LINE_SPD_PHY0_SHIFT 0
+ #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
+ #define REQ_LINE_SPD_PHY1_SHIFT 16
+ u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+ u32 additional_config;
+ #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
+ #define REQ_FC_AUTO_ADV0_SHIFT 0
+ #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
+ u32 lfa_sts;
+ #define LFA_LINK_FLAP_REASON_OFFSET 0
+ #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+ #define LFA_LINK_DOWN 0x1
+ #define LFA_LOOPBACK_ENABLED 0x2
+ #define LFA_DUPLEX_MISMATCH 0x3
+ #define LFA_MFW_IS_TOO_OLD 0x4
+ #define LFA_LINK_SPEED_MISMATCH 0x5
+ #define LFA_FLOW_CTRL_MISMATCH 0x6
+ #define LFA_SPEED_CAP_MISMATCH 0x7
+ #define LFA_DCC_LFA_DISABLED 0x8
+ #define LFA_EEE_MISMATCH 0x9
+
+ #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+ #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+
+ #define LINK_FLAP_COUNT_OFFSET 16
+ #define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ #define LFA_FLAGS_MASK 0xff000000
+ #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
+};
+
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2738,8 +2789,8 @@ struct afex_stats {
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 51
+#define BCM_5710_FW_MINOR_VERSION 8
+#define BCM_5710_FW_REVISION_VERSION 2
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3861,10 +3912,8 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
__le32 __reserved2;
@@ -4080,27 +4129,29 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
-#define ETH_TX_START_BD_RESREVED (0x1<<5)
-#define ETH_TX_START_BD_RESREVED_SHIFT 5
-#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
-#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_RESREVED (0x1<<7)
+#define ETH_TX_START_BD_RESREVED_SHIFT 7
};
/*
* Tx parsing BD structure for ETH E1/E1h
*/
struct eth_tx_parse_bd_e1x {
- u8 global_data;
+ __le16 global_data;
#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
-#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
-#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
u8 tcp_flags;
#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
@@ -4119,7 +4170,6 @@ struct eth_tx_parse_bd_e1x {
#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
u8 ip_hlen_w;
- s8 reserved;
__le16 total_hlen_w;
__le16 tcp_pseudo_csum;
__le16 lso_mss;
@@ -4138,14 +4188,16 @@ struct eth_tx_parse_bd_e2 {
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
-#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
-#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
};
/*
@@ -4913,7 +4965,8 @@ struct flow_control_configuration {
*
*/
struct function_start_data {
- __le16 function_mode;
+ u8 function_mode;
+ u8 reserved;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 559c396..c8f10f0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -566,7 +566,7 @@ static const struct {
u32 e2; /* 57712 */
u32 e3; /* 578xx */
} reg_mask; /* Register mask (all valid bits) */
- char name[7]; /* Block's longest name is 6 characters long
+ char name[8]; /* Block's longest name is 7 characters long
* (name + suffix)
*/
} bnx2x_blocks_parity_data[] = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b046beb..e2e45ee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -161,120 +161,6 @@
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
-/* BRB default for class 0 E2 */
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
-#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
-#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
-
-/* BRB thresholds for E2*/
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
-
-#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
-
-/* BRB default for class 0 E3A0 */
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
-
-/* BRB thresholds for E3A0 */
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
-
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
-
-/* BRB default for E3B0 */
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
-
-/* BRB thresholds for E3B0 2 port mode*/
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
-
-/* only for E3B0*/
-#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
-#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
-
-/* Lossy +Lossless GUARANTIED == GUART */
-#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_LB_GUART 236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
-
-/* Lossy +Lossless*/
-#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
-#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
-
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
-
-/* BRB thresholds for E3B0 4 port mode */
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
-
-/* only for E3B0*/
-#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
-#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
-#define PFC_E3B0_4P_LB_GUART 120
-
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
-
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
-
-/* Pause defines*/
-#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
-#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
-#define DEFAULT_E3B0_LB_GUART 40
-
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
-
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
-
/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
@@ -321,6 +207,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
return val;
}
+/*
+ * bnx2x_check_lfa - This function checks if link reinitialization is required,
+ * or link flap can be avoided.
+ *
+ * @params: link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ * condition code.
+ */
+static int bnx2x_check_lfa(struct link_params *params)
+{
+ u32 link_status, cfg_idx, lfa_mask, cfg_size;
+ u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+ u32 saved_val, req_val, eee_status;
+ struct bnx2x *bp = params->bp;
+
+ additional_config =
+ REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+
+ /* NOTE: must be first condition checked -
+ * to verify DCC bit is cleared in any case!
+ */
+ if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+ DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config),
+ additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+ return LFA_DCC_LFA_DISABLED;
+ }
+
+ /* Verify that link is up */
+ link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status));
+ if (!(link_status & LINK_STATUS_LINK_UP))
+ return LFA_LINK_DOWN;
+
+ /* Verify that loopback mode is not set */
+ if (params->loopback_mode)
+ return LFA_LOOPBACK_ENABLED;
+
+ /* Verify that MFW supports LFA */
+ if (!params->lfa_base)
+ return LFA_MFW_IS_TOO_OLD;
+
+ if (params->num_phys == 3) {
+ cfg_size = 2;
+ lfa_mask = 0xffffffff;
+ } else {
+ cfg_size = 1;
+ lfa_mask = 0xffff;
+ }
+
+ /* Compare Duplex */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex));
+ req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_DUPLEX_MISMATCH;
+ }
+ /* Compare Flow Control */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl));
+ req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+ /* Compare Link Speed */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed));
+ req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_LINK_SPEED_MISMATCH;
+ }
+
+ for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+ cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]));
+
+ if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+ DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
+ cur_speed_cap_mask,
+ params->speed_cap_mask[cfg_idx]);
+ return LFA_SPEED_CAP_MISMATCH;
+ }
+ }
+
+ cur_req_fc_auto_adv =
+ REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config)) &
+ REQ_FC_AUTO_ADV_MASK;
+
+ if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+ DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
+ cur_req_fc_auto_adv, params->req_fc_auto_adv);
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+
+ eee_status = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
+
+ if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+ (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
+ ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+ (params->eee_mode & EEE_MODE_ADV_LPI))) {
+ DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
+ eee_status);
+ return LFA_EEE_MISMATCH;
+ }
+
+ /* LFA conditions are met */
+ return 0;
+}
/******************************************************************/
/* EPIO/GPIO section */
/******************************************************************/
@@ -1307,93 +1314,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
}
/******************************************************************/
-/* EEE section */
-/******************************************************************/
-static u8 bnx2x_eee_has_cap(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
-
- if (REG_RD(bp, params->shmem2_base) <=
- offsetof(struct shmem2_region, eee_status[params->port]))
- return 0;
-
- return 1;
-}
-
-static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
-{
- switch (nvram_mode) {
- case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
- *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
- break;
- case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
- *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
- break;
- case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
- *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
- break;
- default:
- *idle_timer = 0;
- break;
- }
-
- return 0;
-}
-
-static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
-{
- switch (idle_timer) {
- case EEE_MODE_NVRAM_BALANCED_TIME:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
- break;
- case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
- break;
- case EEE_MODE_NVRAM_LATENCY_TIME:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
- break;
- default:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
- break;
- }
-
- return 0;
-}
-
-static u32 bnx2x_eee_calc_timer(struct link_params *params)
-{
- u32 eee_mode, eee_idle;
- struct bnx2x *bp = params->bp;
-
- if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
- if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
- /* time value in eee_mode --> used directly*/
- eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
- } else {
- /* hsi value in eee_mode --> time */
- if (bnx2x_eee_nvram_to_time(params->eee_mode &
- EEE_MODE_NVRAM_MASK,
- &eee_idle))
- return 0;
- }
- } else {
- /* hsi values in nvram --> time*/
- eee_mode = ((REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- eee_power_mode)) &
- PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
- PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
-
- if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
- return 0;
- }
-
- return eee_idle;
-}
-
-
-/******************************************************************/
/* PFC section */
/******************************************************************/
static void bnx2x_update_pfc_xmac(struct link_params *params,
@@ -1606,16 +1526,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
}
-static void bnx2x_umac_disable(struct link_params *params)
+static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
{
u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ u32 val;
struct bnx2x *bp = params->bp;
if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
return;
-
+ val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
+ if (en)
+ val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
+ else
+ val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
/* Disable RX and TX */
- REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0);
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
}
static void bnx2x_umac_enable(struct link_params *params,
@@ -1671,6 +1598,16 @@ static void bnx2x_umac_enable(struct link_params *params,
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
+ /* Configure UMAC for EEE */
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
+ REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+ UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+ REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+ } else {
+ REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+ }
+
/* Set MAC address for source TX Pause/PFC frames (under SW reset) */
REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
((params->mac_addr[2] << 24) |
@@ -1766,11 +1703,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
}
-static void bnx2x_xmac_disable(struct link_params *params)
+static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
{
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ u32 val;
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -1784,7 +1722,12 @@ static void bnx2x_xmac_disable(struct link_params *params)
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
(pfc_ctrl | (1<<1)));
DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
- REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+ val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
+ if (en)
+ val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ else
+ val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
}
}
@@ -2087,391 +2030,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-/* PFC BRB internal port configuration params */
-struct bnx2x_pfc_brb_threshold_val {
- u32 pause_xoff;
- u32 pause_xon;
- u32 full_xoff;
- u32 full_xon;
-};
-
-struct bnx2x_pfc_brb_e3b0_val {
- u32 per_class_guaranty_mode;
- u32 lb_guarantied_hyst;
- u32 full_lb_xoff_th;
- u32 full_lb_xon_threshold;
- u32 lb_guarantied;
- u32 mac_0_class_t_guarantied;
- u32 mac_0_class_t_guarantied_hyst;
- u32 mac_1_class_t_guarantied;
- u32 mac_1_class_t_guarantied_hyst;
-};
-
-struct bnx2x_pfc_brb_th_val {
- struct bnx2x_pfc_brb_threshold_val pauseable_th;
- struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
- struct bnx2x_pfc_brb_threshold_val default_class0;
- struct bnx2x_pfc_brb_threshold_val default_class1;
-
-};
-static int bnx2x_pfc_brb_get_config_params(
- struct link_params *params,
- struct bnx2x_pfc_brb_th_val *config_val)
-{
- struct bnx2x *bp = params->bp;
- DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
-
- config_val->default_class1.pause_xoff = 0;
- config_val->default_class1.pause_xon = 0;
- config_val->default_class1.full_xoff = 0;
- config_val->default_class1.full_xon = 0;
-
- if (CHIP_IS_E2(bp)) {
- /* Class0 defaults */
- config_val->default_class0.pause_xoff =
- DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
- config_val->default_class0.pause_xon =
- DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
- config_val->default_class0.full_xoff =
- DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
- config_val->default_class0.full_xon =
- DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
- /* Pause able*/
- config_val->pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else if (CHIP_IS_E3A0(bp)) {
- /* Class0 defaults */
- config_val->default_class0.pause_xoff =
- DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
- config_val->default_class0.pause_xon =
- DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
- config_val->default_class0.full_xoff =
- DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
- config_val->default_class0.full_xon =
- DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
- /* Pause able */
- config_val->pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else if (CHIP_IS_E3B0(bp)) {
- /* Class0 defaults */
- config_val->default_class0.pause_xoff =
- DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
- config_val->default_class0.pause_xon =
- DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
- config_val->default_class0.full_xoff =
- DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
- config_val->default_class0.full_xon =
- DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
-
- if (params->phy[INT_PHY].flags &
- FLAGS_4_PORT_MODE) {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- }
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(
- struct link_params *params,
- struct bnx2x_pfc_brb_e3b0_val
- *e3b0_val,
- struct bnx2x_nig_brb_pfc_port_params *pfc_params,
- const u8 pfc_enabled)
-{
- if (pfc_enabled && pfc_params) {
- e3b0_val->per_class_guaranty_mode = 1;
- e3b0_val->lb_guarantied_hyst = 80;
-
- if (params->phy[INT_PHY].flags &
- FLAGS_4_PORT_MODE) {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
- e3b0_val->lb_guarantied =
- PFC_E3B0_4P_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
- } else {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
- if (pfc_params->cos0_pauseable !=
- pfc_params->cos1_pauseable) {
- /* Nonpauseable= Lossy + pauseable = Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
- } else if (pfc_params->cos0_pauseable) {
- /* Lossless +Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
- } else {
- /* Lossy +Lossy*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_NON_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
- }
- }
- } else {
- e3b0_val->per_class_guaranty_mode = 0;
- e3b0_val->lb_guarantied_hyst = 0;
- e3b0_val->full_lb_xoff_th =
- DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
- e3b0_val->lb_guarantied =
- DEFAULT_E3B0_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
- }
-}
-static int bnx2x_update_pfc_brb(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_nig_brb_pfc_port_params
- *pfc_params)
-{
- struct bnx2x *bp = params->bp;
- struct bnx2x_pfc_brb_th_val config_val = { {0} };
- struct bnx2x_pfc_brb_threshold_val *reg_th_config =
- &config_val.pauseable_th;
- struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
- const int set_pfc = params->feature_config_flags &
- FEATURE_CONFIG_PFC_ENABLED;
- const u8 pfc_enabled = (set_pfc && pfc_params);
- int bnx2x_status = 0;
- u8 port = params->port;
-
- /* default - pause configuration */
- reg_th_config = &config_val.pauseable_th;
- bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
- if (bnx2x_status)
- return bnx2x_status;
-
- if (pfc_enabled) {
- /* First COS */
- if (pfc_params->cos0_pauseable)
- reg_th_config = &config_val.pauseable_th;
- else
- reg_th_config = &config_val.non_pauseable_th;
- } else
- reg_th_config = &config_val.default_class0;
- /* The number of free blocks below which the pause signal to class 0
- * of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
- reg_th_config->pause_xoff);
- /* The number of free blocks above which the pause signal to class 0
- * of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
- /* The number of free blocks below which the full signal to class 0
- * of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
- /* The number of free blocks above which the full signal to class 0
- * of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
- BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
-
- if (pfc_enabled) {
- /* Second COS */
- if (pfc_params->cos1_pauseable)
- reg_th_config = &config_val.pauseable_th;
- else
- reg_th_config = &config_val.non_pauseable_th;
- } else
- reg_th_config = &config_val.default_class1;
- /* The number of free blocks below which the pause signal to
- * class 1 of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
- reg_th_config->pause_xoff);
-
- /* The number of free blocks above which the pause signal to
- * class 1 of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
- reg_th_config->pause_xon);
- /* The number of free blocks below which the full signal to
- * class 1 of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
- reg_th_config->full_xoff);
- /* The number of free blocks above which the full signal to
- * class 1 of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
- BRB1_REG_FULL_1_XON_THRESHOLD_0,
- reg_th_config->full_xon);
-
- if (CHIP_IS_E3B0(bp)) {
- bnx2x_pfc_brb_get_e3b0_config_params(
- params,
- &e3b0_val,
- pfc_params,
- pfc_enabled);
-
- REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
- e3b0_val.per_class_guaranty_mode);
-
- /* The hysteresis on the guarantied buffer space for the Lb
- * port before signaling XON.
- */
- REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
- e3b0_val.lb_guarantied_hyst);
-
- /* The number of free blocks below which the full signal to the
- * LB port is asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
- e3b0_val.full_lb_xoff_th);
- /* The number of free blocks above which the full signal to the
- * LB port is de-asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
- e3b0_val.full_lb_xon_threshold);
- /* The number of blocks guarantied for the MAC #n port. n=0,1
- */
-
- /* The number of blocks guarantied for the LB port. */
- REG_WR(bp, BRB1_REG_LB_GUARANTIED,
- e3b0_val.lb_guarantied);
-
- /* The number of blocks guarantied for the MAC #n port. */
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
- 2 * e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
- 2 * e3b0_val.mac_1_class_t_guarantied);
- /* The number of blocks guarantied for class #t in MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- /* The hysteresis on the guarantied buffer space for class in
- * MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
-
- /* The number of blocks guarantied for class #t in MAC1.t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- /* The hysteresis on the guarantied buffer space for class #t
- * in MAC1. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- }
-
- return bnx2x_status;
-}
-
/******************************************************************************
* Description:
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
@@ -2529,16 +2087,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
-static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
-{
- struct bnx2x *bp = params->bp;
-
- if (bnx2x_eee_has_cap(params))
- REG_WR(bp, params->shmem2_base +
- offsetof(struct shmem2_region,
- eee_status[params->port]), eee_status);
-}
-
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2658,11 +2206,6 @@ int bnx2x_update_pfc(struct link_params *params,
/* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* Update BRB params */
- bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
- if (bnx2x_status)
- return bnx2x_status;
-
if (!vars->link_up)
return bnx2x_status;
@@ -2827,16 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
static int bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
- u8 is_lb)
+ u8 is_lb, u8 reset_bmac)
{
int rc = 0;
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
/* Reset and unreset the BigMac */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- usleep_range(1000, 2000);
+ if (reset_bmac) {
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ usleep_range(1000, 2000);
+ }
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2868,37 +2413,28 @@ static int bnx2x_bmac_enable(struct link_params *params,
return rc;
}
-static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+ if (CHIP_IS_E2(bp))
+ bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+ else
+ bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
/* Only if the bmac is out of reset */
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
nig_bmac_enable) {
-
- if (CHIP_IS_E2(bp)) {
- /* Clear Rx Enable bit in BMAC_CONTROL register */
- REG_RD_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- } else {
- /* Clear Rx Enable bit in BMAC_CONTROL register */
- REG_RD_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
+ if (en)
+ wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
+ else
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- }
+ REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
usleep_range(1000, 2000);
}
}
@@ -3233,6 +2769,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
EMAC_MDIO_STATUS_10MB);
return rc;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+static int bnx2x_eee_set_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_initial_config(struct link_params *params,
+ struct link_vars *vars, u8 mode)
+{
+ vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ return bnx2x_eee_set_timers(params, vars);
+}
+
+static int bnx2x_eee_disable(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars, u8 modes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ if (modes & SHMEM_EEE_10G_ADV) {
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+ val |= 0x8;
+ }
+ if (modes & SHMEM_EEE_1G_ADV) {
+ DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
+ val |= 0x4;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
+static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 adv = 0, lp = 0;
+ u32 lp_adv = 0;
+ u8 neg = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+ if (lp & 0x2) {
+ lp_adv |= SHMEM_EEE_100M_ADV;
+ if (adv & 0x2) {
+ if (vars->line_speed == SPEED_100)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
+ }
+ }
+ if (lp & 0x14) {
+ lp_adv |= SHMEM_EEE_1G_ADV;
+ if (adv & 0x14) {
+ if (vars->line_speed == SPEED_1000)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
+ }
+ }
+ if (lp & 0x68) {
+ lp_adv |= SHMEM_EEE_10G_ADV;
+ if (adv & 0x68) {
+ if (vars->line_speed == SPEED_10000)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
+ }
+ }
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ if (neg) {
+ DP(NETIF_MSG_LINK, "EEE is active\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+}
+
/******************************************************************/
/* BSC access functions from E3 */
/******************************************************************/
@@ -3754,6 +3529,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+
+static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
@@ -4013,13 +3801,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
- /* Enable LPI pass through */
- DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_EEE_COMBO_CONTROL0,
- 0x7c);
- bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+ bnx2x_warpcore_set_lpi_passthrough(phy, params);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4116,6 +3898,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+ bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4409,7 +4193,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
"serdes_net_if = 0x%x\n",
vars->line_speed, serdes_net_if);
bnx2x_set_aer_mmd(params, phy);
-
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
vars->phy_flags |= PHY_XGXS_FLAG;
if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
(phy->req_line_speed &&
@@ -4718,6 +4502,10 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
+ if (bnx2x_eee_has_cap(params))
+ vars->eee_status = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_sync_link(params, vars);
@@ -6530,25 +6318,21 @@ static int bnx2x_update_link_down(struct link_params *params,
usleep_range(10000, 20000);
/* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp)) {
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- }
+ CHIP_IS_E2(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
if (CHIP_IS_E3(bp)) {
/* Prevent LPI Generation by chip */
REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
0);
- REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
0);
vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
SHMEM_EEE_ACTIVE_BIT);
bnx2x_update_mng_eee(params, vars->eee_status);
- bnx2x_xmac_disable(params);
- bnx2x_umac_disable(params);
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
}
return 0;
@@ -6600,7 +6384,7 @@ static int bnx2x_update_link_up(struct link_params *params,
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
if (link_10g) {
- if (bnx2x_bmac_enable(params, vars, 0) ==
+ if (bnx2x_bmac_enable(params, vars, 0, 1) ==
-ESRCH) {
DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
vars->link_up = 0;
@@ -7207,6 +6991,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
msleep(500);
}
+static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ /* Enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+ break;
+ }
+}
+
static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -7227,12 +7027,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* Enable LASI */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
-
+ bnx2x_8073_specific_func(phy, params, PHY_INIT);
bnx2x_8073_set_pause_cl37(params, phy, vars);
bnx2x_cl45_read(bp, phy,
@@ -8267,7 +8062,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
u32 action)
{
struct bnx2x *bp = params->bp;
-
+ u16 val;
switch (action) {
case DISABLE_TX:
bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -8276,6 +8071,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
bnx2x_sfp_set_transmitter(params, phy, 1);
break;
+ case PHY_INIT:
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ (1<<2) | (1<<5));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+ /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ 0xa001);
+ break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
action);
@@ -9058,28 +8887,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
u32 tx_en_mode;
- u16 tmp1, val, mod_abs, tmp2;
- u16 rx_alarm_ctrl_val;
- u16 lasi_ctrl_val;
+ u16 tmp1, mod_abs, tmp2;
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
bnx2x_wait_reset_complete(bp, phy, params);
- rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
- /* Should be 0x6 to enable XS on Tx side. */
- lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* Enable LASI */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
- rx_alarm_ctrl_val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
- 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
+ bnx2x_8727_specific_func(phy, params, PHY_INIT);
/* Initially configure MOD_ABS to interrupt when module is
* presence( bit 8)
*/
@@ -9095,25 +8911,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
-
/* Enable/Disable PHY transmitter output */
bnx2x_set_disable_pmd_transmit(params, phy, 0);
- /* Make MOD_ABS give interrupt on change */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- &val);
- val |= (1<<12);
- if (phy->flags & FLAGS_NOC)
- val |= (3<<5);
-
- /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
- * status which reflect SFP+ module over-current
- */
- if (!(phy->flags & FLAGS_NOC))
- val &= 0xff8f; /* Reset bits 4-6 */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
-
bnx2x_8727_power_module(bp, phy, 1);
bnx2x_cl45_read(bp, phy,
@@ -9123,13 +8923,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
bnx2x_8727_config_speed(phy, params);
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
+
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
@@ -9558,6 +9352,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
0xFFFB, 0xFFFD);
}
+static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ }
+ /* This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_848xx_set_led(bp, phy);
+ break;
+ }
+}
+
static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -9565,22 +9382,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
- if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, bp, params->port);
- }
- /* This phy uses the NIG latch mechanism since link indication
- * arrives through its LED4 and not via its LASI signal, so we
- * get steady signal instead of clear on read
- */
- bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
- 1 << NIG_LATCH_BC_ENABLE_MI_INT);
-
+ bnx2x_848xx_specific_func(phy, params, PHY_INIT);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
- bnx2x_848xx_set_led(bp, phy);
-
/* set 1000 speed advertisement */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
@@ -9887,39 +9692,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_8483x_eee_timers(struct link_params *params,
- struct link_vars *vars)
-{
- u32 eee_idle = 0, eee_mode;
- struct bnx2x *bp = params->bp;
-
- eee_idle = bnx2x_eee_calc_timer(params);
-
- if (eee_idle) {
- REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
- eee_idle);
- } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
- (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
- (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
- DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
- return -EINVAL;
- }
-
- vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
- if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
- /* eee_idle in 1u --> eee_status in 16u */
- eee_idle >>= 4;
- vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
- SHMEM_EEE_TIME_OUTPUT_BIT;
- } else {
- if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
- return -EINVAL;
- vars->eee_status |= eee_mode;
- }
-
- return 0;
-}
-
static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -9930,10 +9702,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
- /* Make Certain LPI is disabled */
- REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
- REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
-
/* Prevent Phy from working in EEE and advertising it */
rc = bnx2x_84833_cmd_hdlr(phy, params,
PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
@@ -9942,10 +9710,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
return rc;
}
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
- vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
-
- return 0;
+ return bnx2x_eee_disable(phy, params, vars);
}
static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
@@ -9956,8 +9721,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 cmd_args = 1;
- DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
-
rc = bnx2x_84833_cmd_hdlr(phy, params,
PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
@@ -9965,15 +9728,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
return rc;
}
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
-
- /* Mask events preventing LPI generation */
- REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
-
- vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
- vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
-
- return 0;
+ return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
}
#define PHY84833_CONSTANT_LATENCY 1193
@@ -10105,22 +9860,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_84833_TOP_CFG_FW_REV, &val);
/* Configure EEE support */
- if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
- phy->flags |= FLAGS_EEE_10GBT;
- vars->eee_status |= SHMEM_EEE_10G_ADV <<
- SHMEM_EEE_SUPPORTED_SHIFT;
- /* Propogate params' bits --> vars (for migration exposure) */
- if (params->eee_mode & EEE_MODE_ENABLE_LPI)
- vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
- else
- vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
-
- if (params->eee_mode & EEE_MODE_ADV_LPI)
- vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
- else
- vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
-
- rc = bnx2x_8483x_eee_timers(params, vars);
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+ (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+ bnx2x_eee_has_cap(params)) {
+ rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
if (rc) {
DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
bnx2x_8483x_disable_eee(phy, params, vars);
@@ -10139,7 +9882,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
return rc;
}
} else {
- phy->flags &= ~FLAGS_EEE_10GBT;
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
@@ -10278,29 +10020,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- u32 eee_shmem = 0;
-
- bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
- MDIO_AN_REG_EEE_ADV, &val1);
- bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_EEE_ADV, &val2);
- if ((val1 & val2) & 0x8) {
- DP(NETIF_MSG_LINK, "EEE negotiated\n");
- vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
- }
-
- if (val2 & 0x12)
- eee_shmem |= SHMEM_EEE_100M_ADV;
- if (val2 & 0x4)
- eee_shmem |= SHMEM_EEE_1G_ADV;
- if (val2 & 0x68)
- eee_shmem |= SHMEM_EEE_10G_ADV;
-
- vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
- vars->eee_status |= (eee_shmem <<
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
- }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bnx2x_eee_an_resolve(phy, params, vars);
}
return link_up;
@@ -10569,6 +10290,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/******************************************************************/
/* 54618SE PHY SECTION */
/******************************************************************/
+static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+ switch (action) {
+ case PHY_INIT:
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+ break;
+ }
+}
+
static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -10606,24 +10356,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to reset */
msleep(50);
- /* Configure LED4: set to INTR (0x6). */
- /* Accessing shadow register 0xe. */
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_LED_SEL2);
- bnx2x_cl22_read(bp, phy,
- MDIO_REG_GPHY_SHADOW,
- &temp);
- temp &= ~(0xf << 4);
- temp |= (0x6 << 4);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
- /* Configure INTR based on link status change. */
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_INTR_MASK,
- ~MDIO_REG_INTR_MASK_LINK_STATUS);
+ bnx2x_54618se_specific_func(phy, params, PHY_INIT);
/* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
bnx2x_cl22_write(bp, phy,
MDIO_REG_GPHY_SHADOW,
@@ -10728,28 +10462,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting 10M force\n");
}
- /* Check if we should turn on Auto-GrEEEn */
- bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
- if (temp == MDIO_REG_GPHY_ID_54618SE) {
- if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
- temp = 6;
- DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
+ int rc;
+
+ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
+ MDIO_REG_GPHY_EXP_ACCESS_TOP |
+ MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+ temp &= 0xfffe;
+ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+ rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_eee_disable(phy, params, vars);
+ } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (phy->req_duplex == DUPLEX_FULL) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
+ /* Need to advertise EEE only when requested,
+ * and either no LPI assertion was requested,
+ * or it was requested and a valid timer was set.
+ * Also notice full duplex is required for EEE.
+ */
+ bnx2x_eee_advertise(phy, params, vars,
+ SHMEM_EEE_1G_ADV);
} else {
- temp = 0;
- DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+ DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
+ bnx2x_eee_disable(phy, params, vars);
+ }
+ } else {
+ vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+
+ if (phy->flags & FLAGS_EEE) {
+ /* Handle legacy auto-grEEEn */
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ } else {
+ temp = 0;
+ DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, temp);
}
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- MDIO_REG_GPHY_EEE_ADV);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG,
- (0x1 << 14) | MDIO_AN_DEVAD);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- temp);
}
bnx2x_cl22_write(bp, phy,
@@ -10896,29 +10654,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
vars->line_speed);
- /* Report whether EEE is resolved. */
- bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
- if (val == MDIO_REG_GPHY_ID_54618SE) {
- if (vars->link_status &
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
- val = 0;
- else {
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG,
- MDIO_AN_DEVAD);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- MDIO_REG_GPHY_EEE_RESOLVED);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG,
- (0x1 << 14) | MDIO_AN_DEVAD);
- bnx2x_cl22_read(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- &val);
- }
- DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
- }
-
bnx2x_ext_phy_resolve_fc(phy, params, vars);
if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
@@ -10948,6 +10683,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ if ((phy->flags & FLAGS_EEE) &&
+ bnx2x_eee_has_cap(params))
+ bnx2x_eee_an_resolve(phy, params, vars);
}
}
return link_up;
@@ -11353,7 +11092,7 @@ static struct bnx2x_phy phy_8073 = {
.format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
};
static struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11546,7 +11285,7 @@ static struct bnx2x_phy phy_84823 = {
.format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
static struct bnx2x_phy phy_84833 = {
@@ -11555,8 +11294,7 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK |
- FLAGS_EEE_10GBT),
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11582,7 +11320,7 @@ static struct bnx2x_phy phy_84833 = {
.format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
.hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
.set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
static struct bnx2x_phy phy_54618se = {
@@ -11616,7 +11354,7 @@ static struct bnx2x_phy phy_54618se = {
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
};
/*****************************************************************/
/* */
@@ -11862,6 +11600,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
+ if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ phy->flags |= FLAGS_EEE;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
@@ -12141,7 +11881,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
/* set bmac loopback */
- bnx2x_bmac_enable(params, vars, 1);
+ bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
@@ -12233,7 +11973,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
if (USES_WARPCORE(bp))
bnx2x_xmac_enable(params, vars, 0);
else
- bnx2x_bmac_enable(params, vars, 0);
+ bnx2x_bmac_enable(params, vars, 0, 1);
}
if (params->loopback_mode == LOOPBACK_XGXS) {
@@ -12258,8 +11998,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
}
+static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+{
+ struct bnx2x *bp = params->bp;
+ u8 val = en * 0x1F;
+
+ /* Open the gate between the NIG to the BRB */
+ if (!CHIP_IS_E1x(bp))
+ val |= en * 0x20;
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
+
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
+ en*0x3);
+ }
+
+ REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+static int bnx2x_avoid_link_flap(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 phy_idx;
+ u32 dont_clear_stat, lfa_sts;
+ struct bnx2x *bp = params->bp;
+
+ /* Sync the link parameters */
+ bnx2x_link_status_update(params, vars);
+
+ /*
+ * The module verification was already done by previous link owner,
+ * so this call is meant only to get warning message
+ */
+
+ for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+ struct bnx2x_phy *phy = &params->phy[phy_idx];
+ if (phy->phy_specific_func) {
+ DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
+ phy->phy_specific_func(phy, params, PHY_INIT);
+ }
+ if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
+ (phy->media_type == ETH_PHY_DA_TWINAX))
+ bnx2x_verify_sfp_module(phy, params);
+ }
+ lfa_sts = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ lfa_sts));
+
+ dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Re-enable the NIG/MAC */
+ if (CHIP_IS_E3(bp)) {
+ if (!dont_clear_stat) {
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ }
+ if (vars->line_speed < SPEED_10000)
+ bnx2x_umac_enable(params, vars, 0);
+ else
+ bnx2x_xmac_enable(params, vars, 0);
+ } else {
+ if (vars->line_speed < SPEED_10000)
+ bnx2x_emac_enable(params, vars, 0);
+ else
+ bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
+ }
+
+ /* Increment LFA count */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+ LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+ /* Clear link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+ /* Disable NIG DRAIN */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+ /* Enable interrupts */
+ bnx2x_link_int_enable(params);
+ return 0;
+}
+
+static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
+ struct link_vars *vars,
+ int lfa_status)
+{
+ u32 lfa_sts, cfg_idx, tmp_val;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_link_reset(params, vars, 1);
+
+ if (!params->lfa_base)
+ return;
+ /* Store the new link parameters */
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex),
+ params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl),
+ params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed),
+ params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+ for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]),
+ params->speed_cap_mask[cfg_idx]);
+ }
+
+ tmp_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+ tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+ tmp_val |= params->req_fc_auto_adv;
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+ lfa_sts = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts));
+
+ /* Clear the "Don't Clear Statistics" bit, and set reason */
+ lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Set link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+ lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+ LFA_LINK_FLAP_REASON_OFFSET);
+
+ /* Increment link flap counter */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+ LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_COUNT_OFFSET));
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+ /* Proceed with regular link initialization */
+}
+
int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
{
+ int lfa_status;
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Phy Initialization started\n");
DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
@@ -12274,6 +12167,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
+ /* Driver opens NIG-BRB filters */
+ bnx2x_set_rx_filter(params, 1);
+ /* Check if link flap can be avoided */
+ lfa_status = bnx2x_check_lfa(params);
+
+ if (lfa_status == 0) {
+ DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
+ return bnx2x_avoid_link_flap(params, vars);
+ }
+
+ DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
+ lfa_status);
+ bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
/* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -12356,13 +12262,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
}
- /* Stop BigMac rx */
- if (!CHIP_IS_E3(bp))
- bnx2x_bmac_rx_disable(bp, port);
- else {
- bnx2x_xmac_disable(params);
- bnx2x_umac_disable(params);
- }
+ if (!CHIP_IS_E3(bp)) {
+ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+ } else {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -12420,6 +12325,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
vars->phy_flags = 0;
return 0;
}
+int bnx2x_lfa_reset(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ if (!params->lfa_base)
+ return bnx2x_link_reset(params, vars, 1);
+ /*
+ * Activate NIG drain so that during this time the device won't send
+ * anything while it is unable to response.
+ */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+
+ /*
+ * Close gracefully the gate from BMAC to NIG such that no half packets
+ * are passed.
+ */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
+ /* Wait 10ms for the pipe to clean up*/
+ usleep_range(10000, 20000);
+
+ /* Clean the NIG-BRB using the network filters in a way that will
+ * not cut a packet in the middle.
+ */
+ bnx2x_set_rx_filter(params, 0);
+
+ /*
+ * Re-open the gate between the BMAC and the NIG, after verifying the
+ * gate to the BRB is closed, otherwise packets may arrive to the
+ * firmware before driver had initialized it. The target is to achieve
+ * minimum management protocol down time.
+ */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
+
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_set_xmac_rxtx(params, 1);
+ bnx2x_set_umac_rxtx(params, 1);
+ }
+ /* Disable NIG drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ return 0;
+}
/****************************************************************************/
/* Common function */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 51cac81..9165b89 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -155,7 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
-#define FLAGS_EEE_10GBT (1<<13)
+#define FLAGS_EEE (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -216,6 +216,7 @@ struct bnx2x_phy {
phy_specific_func_t phy_specific_func;
#define DISABLE_TX 1
#define ENABLE_TX 2
+#define PHY_INIT 3
};
/* Inputs parameters to the CLC */
@@ -304,6 +305,8 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
+ u16 rsrv1;
+ u32 lfa_base;
};
/* Output parameters */
@@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
to 0 */
int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy);
-
+int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
/* bnx2x_link_update should be called upon link interrupt */
int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e11485c..f7ed122 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2166,7 +2166,6 @@ void bnx2x_link_set(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2179,12 +2178,19 @@ static void bnx2x__link_reset(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
} else
BNX2X_ERR("Bootcode is missing - can not reset link\n");
}
+void bnx2x_force_link_reset(struct bnx2x *bp)
+{
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_release_phy_lock(bp);
+}
+
u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
{
u8 rc = 0;
@@ -6751,7 +6757,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
u32 low, high;
u32 val;
- bnx2x__link_reset(bp);
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@ -8244,12 +8249,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
* bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
*
* @bp: driver handle
+ * @keep_link: true iff link should be kept up
*/
-void bnx2x_send_unload_done(struct bnx2x *bp)
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
{
+ u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
/* Report UNLOAD_DONE to MCP */
if (!BP_NOMCP(bp))
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
}
static int bnx2x_func_wait_started(struct bnx2x *bp)
@@ -8318,7 +8326,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
return 0;
}
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
{
int port = BP_PORT(bp);
int i, rc = 0;
@@ -8440,7 +8448,7 @@ unload_error:
/* Report UNLOAD_DONE to MCP */
- bnx2x_send_unload_done(bp);
+ bnx2x_send_unload_done(bp, keep_link);
}
void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -8852,7 +8860,8 @@ int bnx2x_leader_reset(struct bnx2x *bp)
* driver is owner of the HW
*/
if (!global && !BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EAGAIN;
@@ -8958,7 +8967,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
/* Stop the driver */
/* If interface has been removed - break */
- if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
return;
bp->recovery_state = BNX2X_RECOVERY_WAIT;
@@ -9124,7 +9133,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
goto sp_rtnl_exit;
@@ -9310,7 +9319,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
{
- u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
+ DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
if (!rc) {
BNX2X_ERR("MCP response failure, aborting\n");
return -EBUSY;
@@ -11000,7 +11010,7 @@ static int bnx2x_close(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
/* Unload the driver, release IRQs */
- bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
/* Power off */
bnx2x_set_power_state(bp, PCI_D3hot);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 28a0bcf..1b1999d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -4949,6 +4949,10 @@
#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
+ * state from LPI state when it receives packet for transmission. The
+ * decrement unit is 1 micro-second. */
+#define UMAC_REG_EEE_WAKE_TIMER 0x6c
/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
* to bit 17 of the MAC address etc. */
#define UMAC_REG_MAC_ADDR0 0xc
@@ -4958,6 +4962,8 @@
/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
* logic to check frames. */
#define UMAC_REG_MAXFR 0x14
+#define UMAC_REG_UMAC_EEE_CTRL 0x64
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3)
/* [RW 8] The event id for aggregated interrupt 0 */
#define USDM_REG_AGG_INT_EVENT_0 0xc4038
#define USDM_REG_AGG_INT_EVENT_1 0xc403c
@@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_FW_REV 0x400f
#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
-#define MDIO_REG_GPHY_EEE_ADV 0x3c
-#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
-#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
+#define MDIO_REG_GPHY_EXP_ACCESS 0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 62f754b..71971a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
list_add_tail(&spacer.link, &o->pending_comp);
mb();
- list_del(&elem->link);
- list_add_tail(&elem->link, &o->pending_comp);
+ list_move_tail(&elem->link, &o->pending_comp);
list_del(&spacer.link);
} else
break;
@@ -5620,7 +5619,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+ rdata->function_mode = (u8)start_params->mf_mode;
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index a1d0446..348ed02 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref)
#endif
}
-static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
{
- u16 res = sizeof(struct host_port_stats) >> 2;
+ u16 res = 0;
- /* if PFC stats are not supported by the MFW, don't DMA them */
- if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
- res -= (sizeof(u32)*4) >> 2;
+ /* 'newest' convention - shmem2 cotains the size of the port stats */
+ if (SHMEM2_HAS(bp, sizeof_port_stats)) {
+ u32 size = SHMEM2_RD(bp, sizeof_port_stats);
+ if (size)
+ res = size;
+ /* prevent newer BC from causing buffer overflow */
+ if (res > sizeof(struct host_port_stats))
+ res = sizeof(struct host_port_stats);
+ }
+
+ /* Older convention - all BCs support the port stats' fields up until
+ * the 'not_used' field
+ */
+ if (!res) {
+ res = offsetof(struct host_port_stats, not_used) + 4;
+
+ /* if PFC stats are supported by the MFW, DMA them as well */
+ if (bp->flags & BC_SUPPORTS_PFC_STATS) {
+ res += offsetof(struct host_port_stats,
+ pfc_frames_rx_lo) -
+ offsetof(struct host_port_stats,
+ pfc_frames_tx_hi) + 4 ;
+ }
+ }
+
+ res >>= 2;
+
+ WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
return res;
}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3b4fc61..cc8434f 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
}
}
-static void __cnic_free_uio(struct cnic_uio_dev *udev)
+static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
{
- uio_unregister_device(&udev->cnic_uinfo);
-
if (udev->l2_buf) {
dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
udev->l2_buf, udev->l2_buf_map);
@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
udev->l2_ring = NULL;
}
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+ uio_unregister_device(&udev->cnic_uinfo);
+
+ __cnic_free_uio_rings(udev);
+
pci_dev_put(udev->pdev);
kfree(udev);
}
@@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev)
if (udev) {
udev->dev = NULL;
cp->udev = NULL;
+ if (udev->uio_dev == -1)
+ __cnic_free_uio_rings(udev);
}
cnic_free_context(dev);
@@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
return 0;
}
+static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
+{
+ struct cnic_local *cp = udev->dev->cnic_priv;
+
+ if (udev->l2_ring)
+ return 0;
+
+ udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+ udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ &udev->l2_ring_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_ring)
+ return -ENOMEM;
+
+ udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+ udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+ &udev->l2_buf_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_buf) {
+ __cnic_free_uio_rings(udev);
+ return -ENOMEM;
+ }
+
+ return 0;
+
+}
+
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
+ if (__cnic_alloc_uio_rings(udev, pages)) {
+ udev->dev = NULL;
+ read_unlock(&cnic_dev_lock);
+ return -ENOMEM;
+ }
cp->udev = udev;
read_unlock(&cnic_dev_lock);
return 0;
@@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
udev->dev = dev;
udev->pdev = dev->pcidev;
- udev->l2_ring_size = pages * BCM_PAGE_SIZE;
- udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
- &udev->l2_ring_map,
- GFP_KERNEL | __GFP_COMP);
- if (!udev->l2_ring)
- goto err_udev;
- udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
- udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
- &udev->l2_buf_map,
- GFP_KERNEL | __GFP_COMP);
- if (!udev->l2_buf)
- goto err_dma;
+ if (__cnic_alloc_uio_rings(udev, pages))
+ goto err_udev;
write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
@@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
cp->udev = udev;
return 0;
- err_dma:
- dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
- udev->l2_ring, udev->l2_ring_map);
+
err_udev:
kfree(udev);
return -ENOMEM;
@@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (CNIC_SUPPORTS_FCOE(cp)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
@@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ return 0;
+
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
cp->l2_rx_ring_size = 15;
@@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
IGU_INT_DISABLE, 0);
}
+static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
+ IGU_INT_ENABLE, 1);
+}
+
+static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
+ IGU_INT_ENABLE, 1);
+}
+
static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
u32 last_status = *info->status_idx_ptr;
@@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+ cp->arm_int(dev, status_idx);
break;
}
@@ -4845,6 +4891,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+ struct eth_tx_parse_bd_e1x *pbd_e1x =
+ &((txbd + 1)->parse_bd_e1x);
+ struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
@@ -4854,10 +4903,15 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
start_bd->nbytes = cpu_to_le16(0x10);
start_bd->nbd = cpu_to_le16(3);
start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- start_bd->general_data = (UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ pbd_e2->parsing_data = (UNICAST_ADDRESS <<
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+ else
+ pbd_e1x->global_data = (UNICAST_ADDRESS <<
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
}
val = (u64) ring_map >> 32;
@@ -5308,7 +5362,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
/* Need to wait for the ring shutdown event to complete
* before clearing the CNIC_UP flag.
*/
- while (cp->udev->uio_dev != -1 && i < 15) {
+ while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
msleep(100);
i++;
}
@@ -5473,8 +5527,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
- !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ if (CNIC_SUPPORTS_FCOE(cp))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
@@ -5492,10 +5545,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cp->ack_int = cnic_ack_bnx2x_e2_msix;
- else
+ cp->arm_int = cnic_arm_bnx2x_e2_msix;
+ } else {
cp->ack_int = cnic_ack_bnx2x_msix;
+ cp->arm_int = cnic_arm_bnx2x_msix;
+ }
cp->close_conn = cnic_close_bnx2x_conn;
return cdev;
}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 3032809..148604c 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -334,6 +334,7 @@ struct cnic_local {
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
+ void (*arm_int)(struct cnic_dev *, u32 index);
void (*close_conn)(struct cnic_sock *, u32 opcode);
};
@@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next {
MAX_STAT_COUNTER_ID_E1))
#endif
+#define CNIC_SUPPORTS_FCOE(cp) \
+ (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
+ !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+
#define CNIC_RAMROD_TMO (HZ / 4)
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 382c98b..ede3db3 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -896,7 +896,7 @@ struct tstorm_tcp_tcp_ag_context_section {
u32 snd_nxt;
u32 rtt_seq;
u32 rtt_time;
- u32 __reserved66;
+ u32 wnd_right_edge_local;
u32 wnd_right_edge;
u32 tcp_agg_vars1;
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 5cb8888..865095a 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.12"
-#define CNIC_MODULE_RELDATE "June 29, 2012"
+#define CNIC_MODULE_VERSION "2.5.14"
+#define CNIC_MODULE_RELDATE "Sep 30, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 388d322..46280ba 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,10 +44,8 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
-#if IS_ENABLED(CONFIG_HWMON)
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#endif
#include <net/checksum.h>
#include <net/ip.h>
@@ -92,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 124
+#define TG3_MIN_NUM 125
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "March 21, 2012"
+#define DRV_MODULE_RELDATE "September 26, 2012"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -6263,7 +6261,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
tp->rx_refill = false;
- for (i = 1; i < tp->irq_cnt; i++)
+ for (i = 1; i <= tp->rxq_cnt; i++)
err |= tg3_rx_prodring_xfer(tp, dpr,
&tp->napi[i].prodring);
@@ -7592,15 +7590,11 @@ static int tg3_init_rings(struct tg3 *tp)
return 0;
}
-/*
- * Must not be invoked with interrupt sources disabled and
- * the hardware shutdown down.
- */
-static void tg3_free_consistent(struct tg3 *tp)
+static void tg3_mem_tx_release(struct tg3 *tp)
{
int i;
- for (i = 0; i < tp->irq_cnt; i++) {
+ for (i = 0; i < tp->irq_max; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->tx_ring) {
@@ -7611,17 +7605,114 @@ static void tg3_free_consistent(struct tg3 *tp)
kfree(tnapi->tx_buffers);
tnapi->tx_buffers = NULL;
+ }
+}
- if (tnapi->rx_rcb) {
- dma_free_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- tnapi->rx_rcb,
- tnapi->rx_rcb_mapping);
- tnapi->rx_rcb = NULL;
- }
+static int tg3_mem_tx_acquire(struct tg3 *tp)
+{
+ int i;
+ struct tg3_napi *tnapi = &tp->napi[0];
+
+ /* If multivector TSS is enabled, vector 0 does not handle
+ * tx interrupts. Don't allocate any resources for it.
+ */
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi++;
+
+ for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
+ tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
+ if (!tnapi->tx_buffers)
+ goto err_out;
+
+ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping,
+ GFP_KERNEL);
+ if (!tnapi->tx_ring)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ tg3_mem_tx_release(tp);
+ return -ENOMEM;
+}
+
+static void tg3_mem_rx_release(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_max; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
tg3_rx_prodring_fini(tp, &tnapi->prodring);
+ if (!tnapi->rx_rcb)
+ continue;
+
+ dma_free_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = NULL;
+ }
+}
+
+static int tg3_mem_rx_acquire(struct tg3 *tp)
+{
+ unsigned int i, limit;
+
+ limit = tp->rxq_cnt;
+
+ /* If RSS is enabled, we need a (dummy) producer ring
+ * set on vector zero. This is the true hw prodring.
+ */
+ if (tg3_flag(tp, ENABLE_RSS))
+ limit++;
+
+ for (i = 0; i < limit; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+ goto err_out;
+
+ /* If multivector RSS is enabled, vector 0
+ * does not handle rx or tx interrupts.
+ * Don't allocate any resources for it.
+ */
+ if (!i && tg3_flag(tp, ENABLE_RSS))
+ continue;
+
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
+ if (!tnapi->rx_rcb)
+ goto err_out;
+
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ }
+
+ return 0;
+
+err_out:
+ tg3_mem_rx_release(tp);
+ return -ENOMEM;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
if (tnapi->hw_status) {
dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
tnapi->hw_status,
@@ -7630,6 +7721,9 @@ static void tg3_free_consistent(struct tg3 *tp)
}
}
+ tg3_mem_rx_release(tp);
+ tg3_mem_tx_release(tp);
+
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
@@ -7668,72 +7762,38 @@ static int tg3_alloc_consistent(struct tg3 *tp)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
- if (tg3_rx_prodring_init(tp, &tnapi->prodring))
- goto err_out;
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ u16 *prodptr = 0;
- /* If multivector TSS is enabled, vector 0 does not handle
- * tx interrupts. Don't allocate any resources for it.
- */
- if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
- (i && tg3_flag(tp, ENABLE_TSS))) {
- tnapi->tx_buffers = kzalloc(
- sizeof(struct tg3_tx_ring_info) *
- TG3_TX_RING_SIZE, GFP_KERNEL);
- if (!tnapi->tx_buffers)
- goto err_out;
-
- tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
- TG3_TX_RING_BYTES,
- &tnapi->tx_desc_mapping,
- GFP_KERNEL);
- if (!tnapi->tx_ring)
- goto err_out;
- }
-
- /*
- * When RSS is enabled, the status block format changes
- * slightly. The "rx_jumbo_consumer", "reserved",
- * and "rx_mini_consumer" members get mapped to the
- * other three rx return ring producer indexes.
- */
- switch (i) {
- default:
- if (tg3_flag(tp, ENABLE_RSS)) {
- tnapi->rx_rcb_prod_idx = NULL;
+ /*
+ * When RSS is enabled, the status block format changes
+ * slightly. The "rx_jumbo_consumer", "reserved",
+ * and "rx_mini_consumer" members get mapped to the
+ * other three rx return ring producer indexes.
+ */
+ switch (i) {
+ case 1:
+ prodptr = &sblk->idx[0].rx_producer;
+ break;
+ case 2:
+ prodptr = &sblk->rx_jumbo_consumer;
+ break;
+ case 3:
+ prodptr = &sblk->reserved;
+ break;
+ case 4:
+ prodptr = &sblk->rx_mini_consumer;
break;
}
- /* Fall through */
- case 1:
+ tnapi->rx_rcb_prod_idx = prodptr;
+ } else {
tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
- break;
- case 2:
- tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
- break;
- case 3:
- tnapi->rx_rcb_prod_idx = &sblk->reserved;
- break;
- case 4:
- tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
- break;
}
-
- /*
- * If multivector RSS is enabled, vector 0 does not handle
- * rx or tx interrupts. Don't allocate any resources for it.
- */
- if (!i && tg3_flag(tp, ENABLE_RSS))
- continue;
-
- tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL);
- if (!tnapi->rx_rcb)
- goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
+ if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
+ goto err_out;
+
return 0;
err_out:
@@ -8247,9 +8307,10 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
nic_addr);
}
-static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+
+static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
{
- int i;
+ int i = 0;
if (!tg3_flag(tp, ENABLE_TSS)) {
tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
@@ -8259,31 +8320,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_TXCOL_TICKS, 0);
tw32(HOSTCC_TXMAX_FRAMES, 0);
tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+ for (; i < tp->txq_cnt; i++) {
+ u32 reg;
+
+ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_coalesce_usecs);
+ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames);
+ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames_irq);
+ }
}
+ for (; i < tp->irq_max - 1; i++) {
+ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+}
+
+static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+ int i = 0;
+ u32 limit = tp->rxq_cnt;
+
if (!tg3_flag(tp, ENABLE_RSS)) {
tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+ limit--;
} else {
tw32(HOSTCC_RXCOL_TICKS, 0);
tw32(HOSTCC_RXMAX_FRAMES, 0);
tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
}
- if (!tg3_flag(tp, 5705_PLUS)) {
- u32 val = ec->stats_block_coalesce_usecs;
-
- tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
- tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
-
- if (!netif_carrier_ok(tp->dev))
- val = 0;
-
- tw32(HOSTCC_STAT_COAL_TICKS, val);
- }
-
- for (i = 0; i < tp->irq_cnt - 1; i++) {
+ for (; i < limit; i++) {
u32 reg;
reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
@@ -8292,27 +8365,30 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(reg, ec->rx_max_coalesced_frames);
reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
tw32(reg, ec->rx_max_coalesced_frames_irq);
-
- if (tg3_flag(tp, ENABLE_TSS)) {
- reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
- tw32(reg, ec->tx_coalesce_usecs);
- reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
- tw32(reg, ec->tx_max_coalesced_frames);
- reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
- tw32(reg, ec->tx_max_coalesced_frames_irq);
- }
}
for (; i < tp->irq_max - 1; i++) {
tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+}
- if (tg3_flag(tp, ENABLE_TSS)) {
- tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
- tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
- tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
- }
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+ tg3_coal_tx_init(tp, ec);
+ tg3_coal_rx_init(tp, ec);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 val = ec->stats_block_coalesce_usecs;
+
+ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+ if (!netif_carrier_ok(tp->dev))
+ val = 0;
+
+ tw32(HOSTCC_STAT_COAL_TICKS, val);
}
}
@@ -8570,13 +8646,12 @@ static void __tg3_set_rx_mode(struct net_device *dev)
}
}
-static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
{
int i;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- tp->rss_ind_tbl[i] =
- ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+ tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
}
static void tg3_rss_check_indir_tbl(struct tg3 *tp)
@@ -8598,7 +8673,7 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
}
if (i != TG3_RSS_INDIR_TBL_SIZE)
- tg3_rss_init_dflt_indir_tbl(tp);
+ tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
}
static void tg3_rss_write_indir_tbl(struct tg3 *tp)
@@ -9495,7 +9570,6 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
-#if IS_ENABLED(CONFIG_HWMON)
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
int i;
@@ -9548,22 +9622,17 @@ static const struct attribute_group tg3_group = {
.attrs = tg3_attributes,
};
-#endif
-
static void tg3_hwmon_close(struct tg3 *tp)
{
-#if IS_ENABLED(CONFIG_HWMON)
if (tp->hwmon_dev) {
hwmon_device_unregister(tp->hwmon_dev);
tp->hwmon_dev = NULL;
sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
}
-#endif
}
static void tg3_hwmon_open(struct tg3 *tp)
{
-#if IS_ENABLED(CONFIG_HWMON)
int i, err;
u32 size = 0;
struct pci_dev *pdev = tp->pdev;
@@ -9595,7 +9664,6 @@ static void tg3_hwmon_open(struct tg3 *tp)
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
}
-#endif
}
@@ -10119,21 +10187,43 @@ static int tg3_request_firmware(struct tg3 *tp)
return 0;
}
-static bool tg3_enable_msix(struct tg3 *tp)
+static u32 tg3_irq_count(struct tg3 *tp)
{
- int i, rc;
- struct msix_entry msix_ent[tp->irq_max];
+ u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
- tp->irq_cnt = netif_get_num_default_rss_queues();
- if (tp->irq_cnt > 1) {
+ if (irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
* only deals with link interrupts, etc, so we add
* one to the number of vectors we are requesting.
*/
- tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+ irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
}
+ return irq_cnt;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+ int i, rc;
+ struct msix_entry msix_ent[tp->irq_max];
+
+ tp->txq_cnt = tp->txq_req;
+ tp->rxq_cnt = tp->rxq_req;
+ if (!tp->rxq_cnt)
+ tp->rxq_cnt = netif_get_num_default_rss_queues();
+ if (tp->rxq_cnt > tp->rxq_max)
+ tp->rxq_cnt = tp->rxq_max;
+
+ /* Disable multiple TX rings by default. Simple round-robin hardware
+ * scheduling of the TX rings can cause starvation of rings with
+ * small packets when other rings have TSO or jumbo packets.
+ */
+ if (!tp->txq_req)
+ tp->txq_cnt = 1;
+
+ tp->irq_cnt = tg3_irq_count(tp);
+
for (i = 0; i < tp->irq_max; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
@@ -10148,27 +10238,28 @@ static bool tg3_enable_msix(struct tg3 *tp)
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc);
tp->irq_cnt = rc;
+ tp->rxq_cnt = max(rc - 1, 1);
+ if (tp->txq_cnt)
+ tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
}
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].irq_vec = msix_ent[i].vector;
- netif_set_real_num_tx_queues(tp->dev, 1);
- rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
- if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+ if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
pci_disable_msix(tp->pdev);
return false;
}
- if (tp->irq_cnt > 1) {
- tg3_flag_set(tp, ENABLE_RSS);
+ if (tp->irq_cnt == 1)
+ return true;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
- tg3_flag_set(tp, ENABLE_TSS);
- netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
- }
- }
+ tg3_flag_set(tp, ENABLE_RSS);
+
+ if (tp->txq_cnt > 1)
+ tg3_flag_set(tp, ENABLE_TSS);
+
+ netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
return true;
}
@@ -10202,6 +10293,11 @@ defcfg:
if (!tg3_flag(tp, USING_MSIX)) {
tp->irq_cnt = 1;
tp->napi[0].irq_vec = tp->pdev->irq;
+ }
+
+ if (tp->irq_cnt == 1) {
+ tp->txq_cnt = 1;
+ tp->rxq_cnt = 1;
netif_set_real_num_tx_queues(tp->dev, 1);
netif_set_real_num_rx_queues(tp->dev, 1);
}
@@ -10219,38 +10315,11 @@ static void tg3_ints_fini(struct tg3 *tp)
tg3_flag_clear(tp, ENABLE_TSS);
}
-static int tg3_open(struct net_device *dev)
+static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
{
- struct tg3 *tp = netdev_priv(dev);
+ struct net_device *dev = tp->dev;
int i, err;
- if (tp->fw_needed) {
- err = tg3_request_firmware(tp);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
- if (err)
- return err;
- } else if (err) {
- netdev_warn(tp->dev, "TSO capability disabled\n");
- tg3_flag_clear(tp, TSO_CAPABLE);
- } else if (!tg3_flag(tp, TSO_CAPABLE)) {
- netdev_notice(tp->dev, "TSO capability restored\n");
- tg3_flag_set(tp, TSO_CAPABLE);
- }
- }
-
- netif_carrier_off(tp->dev);
-
- err = tg3_power_up(tp);
- if (err)
- return err;
-
- tg3_full_lock(tp, 0);
-
- tg3_disable_ints(tp);
- tg3_flag_clear(tp, INIT_COMPLETE);
-
- tg3_full_unlock(tp);
-
/*
* Setup interrupts first so we know how
* many NAPI resources to allocate
@@ -10284,7 +10353,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
- err = tg3_init_hw(tp, 1);
+ err = tg3_init_hw(tp, reset_phy);
if (err) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
@@ -10295,7 +10364,7 @@ static int tg3_open(struct net_device *dev)
if (err)
goto err_out3;
- if (tg3_flag(tp, USING_MSI)) {
+ if (test_irq && tg3_flag(tp, USING_MSI)) {
err = tg3_test_msi(tp);
if (err) {
@@ -10351,20 +10420,18 @@ err_out2:
err_out1:
tg3_ints_fini(tp);
- tg3_frob_aux_power(tp, false);
- pci_set_power_state(tp->pdev, PCI_D3hot);
+
return err;
}
-static int tg3_close(struct net_device *dev)
+static void tg3_stop(struct tg3 *tp)
{
int i;
- struct tg3 *tp = netdev_priv(dev);
tg3_napi_disable(tp);
tg3_reset_task_cancel(tp);
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(tp->dev);
tg3_timer_stop(tp);
@@ -10389,13 +10456,60 @@ static int tg3_close(struct net_device *dev)
tg3_ints_fini(tp);
- /* Clear stats across close / open calls */
- memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
- memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
-
tg3_napi_fini(tp);
tg3_free_consistent(tp);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (tp->fw_needed) {
+ err = tg3_request_firmware(tp);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (err)
+ return err;
+ } else if (err) {
+ netdev_warn(tp->dev, "TSO capability disabled\n");
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else if (!tg3_flag(tp, TSO_CAPABLE)) {
+ netdev_notice(tp->dev, "TSO capability restored\n");
+ tg3_flag_set(tp, TSO_CAPABLE);
+ }
+ }
+
+ netif_carrier_off(tp->dev);
+
+ err = tg3_power_up(tp);
+ if (err)
+ return err;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_disable_ints(tp);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+
+ tg3_full_unlock(tp);
+
+ err = tg3_start(tp, true, true);
+ if (err) {
+ tg3_frob_aux_power(tp, false);
+ pci_set_power_state(tp->pdev, PCI_D3hot);
+ }
+ return err;
+}
+
+static int tg3_close(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ tg3_stop(tp);
+
+ /* Clear stats across close / open calls */
+ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_power_down(tp);
@@ -11185,11 +11299,11 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
if (netif_running(tp->dev))
- info->data = tp->irq_cnt;
+ info->data = tp->rxq_cnt;
else {
info->data = num_online_cpus();
- if (info->data > TG3_IRQ_MAX_VECS_RSS)
- info->data = TG3_IRQ_MAX_VECS_RSS;
+ if (info->data > TG3_RSS_MAX_NUM_QS)
+ info->data = TG3_RSS_MAX_NUM_QS;
}
/* The first interrupt vector only
@@ -11246,6 +11360,58 @@ static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
return 0;
}
+static void tg3_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 deflt_qs = netif_get_num_default_rss_queues();
+
+ channel->max_rx = tp->rxq_max;
+ channel->max_tx = tp->txq_max;
+
+ if (netif_running(dev)) {
+ channel->rx_count = tp->rxq_cnt;
+ channel->tx_count = tp->txq_cnt;
+ } else {
+ if (tp->rxq_req)
+ channel->rx_count = tp->rxq_req;
+ else
+ channel->rx_count = min(deflt_qs, tp->rxq_max);
+
+ if (tp->txq_req)
+ channel->tx_count = tp->txq_req;
+ else
+ channel->tx_count = min(deflt_qs, tp->txq_max);
+ }
+}
+
+static int tg3_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return -EOPNOTSUPP;
+
+ if (channel->rx_count > tp->rxq_max ||
+ channel->tx_count > tp->txq_max)
+ return -EINVAL;
+
+ tp->rxq_req = channel->rx_count;
+ tp->txq_req = channel->tx_count;
+
+ if (!netif_running(dev))
+ return 0;
+
+ tg3_stop(tp);
+
+ netif_carrier_off(dev);
+
+ tg3_start(tp, true, false);
+
+ return 0;
+}
+
static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
@@ -12494,6 +12660,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
.get_rxfh_indir = tg3_get_rxfh_indir,
.set_rxfh_indir = tg3_set_rxfh_indir,
+ .get_channels = tg3_get_channels,
+ .set_channels = tg3_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -14510,10 +14678,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 57765_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
- tg3_rss_init_dflt_indir_tbl(tp);
}
}
+ tp->txq_max = 1;
+ tp->rxq_max = 1;
+ if (tp->irq_max > 1) {
+ tp->rxq_max = TG3_RSS_MAX_NUM_QS;
+ tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tp->txq_max = tp->irq_max - 1;
+ }
+
if (tg3_flag(tp, 5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 6d52cb2..d9308c32 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2860,7 +2860,8 @@ struct tg3_rx_prodring_set {
dma_addr_t rx_jmb_mapping;
};
-#define TG3_IRQ_MAX_VECS_RSS 5
+#define TG3_RSS_MAX_NUM_QS 4
+#define TG3_IRQ_MAX_VECS_RSS (TG3_RSS_MAX_NUM_QS + 1)
#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
struct tg3_napi {
@@ -3037,6 +3038,9 @@ struct tg3 {
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
u32 dma_limit;
+ u32 txq_req;
+ u32 txq_cnt;
+ u32 txq_max;
/* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3051,6 +3055,9 @@ struct tg3 {
u32 rx_std_max_post;
u32 rx_offset;
u32 rx_pkt_map_sz;
+ u32 rxq_req;
+ u32 rxq_cnt;
+ u32 rxq_max;
bool rx_refill;
OpenPOWER on IntegriCloud