summaryrefslogtreecommitdiffstats
path: root/sys/dev/e1000
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2010-09-28 00:13:15 +0000
committerjfv <jfv@FreeBSD.org>2010-09-28 00:13:15 +0000
commit6e3a6af4dca11cb0ff57310539129574a52697ec (patch)
tree21280311b19d0a44f429c7a43c6e9ff933e22698 /sys/dev/e1000
parent0d99f2e43e7ec663501399402dc10f4d73b985d4 (diff)
downloadFreeBSD-src-6e3a6af4dca11cb0ff57310539129574a52697ec.zip
FreeBSD-src-6e3a6af4dca11cb0ff57310539129574a52697ec.tar.gz
Update code from Intel:
- Sync shared code with Intel internal - New client chipset support added - em driver - fixes to 82574, limit queues to 1 but use MSIX - em driver - large changes in TX checksum offload and tso code, thanks to yongari. - some small changes for watchdog issues. - igb driver - local timer watchdog code was missing locking this and a couple other watchdog related fixes. - bug in rx discard found by Andrew Boyer, check for null pointer MFC: a week
Diffstat (limited to 'sys/dev/e1000')
-rw-r--r--sys/dev/e1000/e1000_82571.c264
-rw-r--r--sys/dev/e1000/e1000_82571.h8
-rw-r--r--sys/dev/e1000/e1000_82575.c160
-rw-r--r--sys/dev/e1000/e1000_82575.h1
-rw-r--r--sys/dev/e1000/e1000_api.c40
-rw-r--r--sys/dev/e1000/e1000_api.h3
-rw-r--r--sys/dev/e1000/e1000_defines.h37
-rw-r--r--sys/dev/e1000/e1000_hw.h69
-rw-r--r--sys/dev/e1000/e1000_ich8lan.c831
-rw-r--r--sys/dev/e1000/e1000_ich8lan.h40
-rw-r--r--sys/dev/e1000/e1000_mac.c10
-rw-r--r--sys/dev/e1000/e1000_nvm.c186
-rw-r--r--sys/dev/e1000/e1000_nvm.h5
-rw-r--r--sys/dev/e1000/e1000_phy.c22
-rw-r--r--sys/dev/e1000/e1000_phy.h2
-rw-r--r--sys/dev/e1000/e1000_regs.h6
-rw-r--r--sys/dev/e1000/if_em.c607
-rw-r--r--sys/dev/e1000/if_em.h5
-rw-r--r--sys/dev/e1000/if_igb.c37
-rw-r--r--sys/dev/e1000/if_igb.h20
-rw-r--r--sys/dev/e1000/if_lem.c5
21 files changed, 1822 insertions, 536 deletions
diff --git a/sys/dev/e1000/e1000_82571.c b/sys/dev/e1000/e1000_82571.c
index afeb1a0..3554dbe 100644
--- a/sys/dev/e1000/e1000_82571.c
+++ b/sys/dev/e1000/e1000_82571.c
@@ -78,6 +78,10 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
@@ -91,6 +95,7 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_dev_spec_82571 *dev_spec = &hw->dev_spec._82571;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_init_phy_params_82571");
@@ -104,9 +109,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
- phy->ops.acquire = e1000_get_hw_semaphore_82571;
phy->ops.check_reset_block = e1000_check_reset_block_generic;
- phy->ops.release = e1000_put_hw_semaphore_82571;
phy->ops.reset = e1000_phy_hw_reset_generic;
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82571;
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
@@ -124,6 +127,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.read_reg = e1000_read_phy_reg_igp;
phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.acquire = e1000_get_hw_semaphore_82571;
+ phy->ops.release = e1000_put_hw_semaphore_82571;
/* This uses above function pointers */
ret_val = e1000_get_phy_id_82571(hw);
@@ -145,6 +150,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.read_reg = e1000_read_phy_reg_m88;
phy->ops.write_reg = e1000_write_phy_reg_m88;
+ phy->ops.acquire = e1000_get_hw_semaphore_82571;
+ phy->ops.release = e1000_put_hw_semaphore_82571;
/* This uses above function pointers */
ret_val = e1000_get_phy_id_82571(hw);
@@ -158,6 +165,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
break;
case e1000_82574:
case e1000_82583:
+ E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
+
phy->type = e1000_phy_bm;
phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
phy->ops.get_info = e1000_get_phy_info_m88;
@@ -167,6 +176,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.read_reg = e1000_read_phy_reg_bm2;
phy->ops.write_reg = e1000_write_phy_reg_bm2;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
/* This uses above function pointers */
ret_val = e1000_get_phy_id_82571(hw);
@@ -250,9 +261,18 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
}
/* Function Pointers */
- nvm->ops.acquire = e1000_acquire_nvm_82571;
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ nvm->ops.acquire = e1000_acquire_nvm_82571;
+ nvm->ops.release = e1000_release_nvm_82571;
+ break;
+ }
nvm->ops.read = e1000_read_nvm_eerd;
- nvm->ops.release = e1000_release_nvm_82571;
nvm->ops.update = e1000_update_nvm_checksum_82571;
nvm->ops.validate = e1000_validate_nvm_checksum_82571;
nvm->ops.valid_led_default = e1000_valid_led_default_82571;
@@ -578,6 +598,96 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
}
/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82573");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ do {
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ msec_delay(2);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ DEBUGOUT("Driver can't access the PHY\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_82573");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+}
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_get_hw_semaphore_82574");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex);
+ return e1000_get_hw_semaphore_82573(hw);
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_put_hw_semaphore_82574");
+
+ e1000_put_hw_semaphore_82573(hw);
+ E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
+}
+
+/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
* @hw: pointer to the HW structure
*
@@ -598,8 +708,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
- case e1000_82574:
- case e1000_82583:
break;
default:
ret_val = e1000_acquire_nvm_generic(hw);
@@ -926,9 +1034,8 @@ out:
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+ u32 ctrl, ctrl_ext, icr;
s32 ret_val;
- u16 i = 0;
DEBUGFUNC("e1000_reset_hw_82571");
@@ -955,33 +1062,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
- extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- do {
- E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
-
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
- break;
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- msec_delay(2);
- i++;
- } while (i < MDIO_OWNERSHIP_TIMEOUT);
+ ret_val = e1000_get_hw_semaphore_82574(hw);
break;
default:
break;
}
+ if (ret_val)
+ DEBUGOUT("Cannot acquire MDIO ownership\n");
ctrl = E1000_READ_REG(hw, E1000_CTRL);
DEBUGOUT("Issuing a global reset to MAC\n");
E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
if (hw->nvm.type == e1000_nvm_flash_hw) {
usec_delay(10);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
@@ -1015,12 +1122,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
- e1000_set_laa_state_82571(hw, TRUE);
+ e1000_set_laa_state_82571(hw, TRUE);
+ }
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1330,6 +1439,42 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
return E1000_SUCCESS;
}
+/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ bool phy_hung = FALSE;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_check_phy_82574");
+
+ /*
+ * Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER,
+ &receive_errors);
+ if (ret_val)
+ goto out;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS,
+ &status_1kbt);
+ if (ret_val)
+ goto out;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ phy_hung = TRUE;
+ }
+out:
+ return phy_hung;
+}
+
/**
* e1000_setup_link_82571 - Setup flow control and link settings
@@ -1460,6 +1605,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
u32 rxcw;
u32 ctrl;
u32 status;
+ u32 txcw;
+ u32 i;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_check_for_serdes_link_82571");
@@ -1482,8 +1629,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = FALSE;
DEBUGOUT("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = TRUE;
}
- break;
+ break;
case e1000_serdes_link_forced_up:
/*
@@ -1491,8 +1640,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
+ * If the partner code word is null, stop forcing
+ * and restart auto negotiation.
*/
- if (rxcw & E1000_RXCW_C) {
+ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
/* Enable autoneg, and unforce link up */
E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
E1000_WRITE_REG(hw, E1000_CTRL,
@@ -1501,6 +1652,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = FALSE;
DEBUGOUT("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = TRUE;
}
break;
@@ -1559,6 +1712,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
(ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = FALSE;
DEBUGOUT("DOWN -> AN_PROG\n");
break;
}
@@ -1569,16 +1723,32 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
DEBUGOUT("ANYSTATE -> DOWN\n");
} else {
/*
- * We have sync, and can tolerate one invalid (IV)
- * codeword before declaring link down, so reread
- * to look again.
+ * Check several times, if Sync and Config
+ * both are consistently 1 then simply ignore
+ * the Invalid bit and restart Autoneg
*/
- usec_delay(10);
- rxcw = E1000_READ_REG(hw, E1000_RXCW);
- if (rxcw & E1000_RXCW_IV) {
- mac->serdes_link_state = e1000_serdes_link_down;
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if ((rxcw & E1000_RXCW_IV) &&
+ !((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))) {
+ mac->serdes_has_link = FALSE;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ DEBUGOUT("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = E1000_READ_REG(hw, E1000_TXCW);
+ txcw |= E1000_TXCW_ANE;
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = FALSE;
- DEBUGOUT("ANYSTATE -> DOWN\n");
+ DEBUGOUT("ANYSTATE -> AN_PROG\n");
}
}
}
@@ -1736,14 +1906,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_read_mac_addr_82571");
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
+ if (hw->mac.type == e1000_82571) {
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+ }
ret_val = e1000_read_mac_addr_generic(hw);
diff --git a/sys/dev/e1000/e1000_82571.h b/sys/dev/e1000/e1000_82571.h
index 5e66793..c76f16f 100644
--- a/sys/dev/e1000/e1000_82571.h
+++ b/sys/dev/e1000/e1000_82571.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,6 +42,7 @@
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
/* Intr Throttling - RW */
#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
@@ -53,6 +54,11 @@
#define E1000_RXCFGL 0x0B634 /* TimeSync Rx EtherType & Msg Type Reg - RW */
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
bool e1000_get_laa_state_82571(struct e1000_hw *hw);
void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index 65b3ce4..bc04b45 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -85,6 +85,7 @@ static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
{ 36, 72, 144, 1, 2, 4, 8, 16,
@@ -92,6 +93,37 @@ static const u16 e1000_82580_rxpbs_table[] =
#define E1000_82580_RXPBS_TABLE_SIZE \
(sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
+/**
+ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * @hw: pointer to the HW structure
+ *
+ * Called to determine if the I2C pins are being used for I2C or as an
+ * external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+ u32 reg = 0;
+ bool ext_mdio = FALSE;
+
+ DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ reg = E1000_READ_REG(hw, E1000_MDIC);
+ ext_mdio = !!(reg & E1000_MDIC_DEST);
+ break;
+ case e1000_82580:
+ reg = E1000_READ_REG(hw, E1000_MDICNFG);
+ ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+ break;
+ default:
+ break;
+ }
+ return ext_mdio;
+}
+
/**
* e1000_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
@@ -100,6 +132,7 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext;
DEBUGFUNC("e1000_init_phy_params_82575");
@@ -120,16 +153,26 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
phy->ops.release = e1000_release_phy_82575;
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
if (e1000_sgmii_active_82575(hw)) {
phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ e1000_reset_mdicnfg_82580(hw);
+
+ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
} else if (hw->mac.type >= e1000_82580) {
- phy->ops.reset = e1000_phy_hw_reset_generic;
phy->ops.read_reg = e1000_read_phy_reg_82580;
phy->ops.write_reg = e1000_write_phy_reg_82580;
} else {
- phy->ops.reset = e1000_phy_hw_reset_generic;
phy->ops.read_reg = e1000_read_phy_reg_igp;
phy->ops.write_reg = e1000_write_phy_reg_igp;
}
@@ -256,27 +299,15 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
case E1000_CTRL_EXT_LINK_MODE_SGMII:
dev_spec->sgmii_active = TRUE;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
break;
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
hw->phy.media_type = e1000_media_type_internal_serdes;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
break;
default:
- ctrl_ext &= ~E1000_CTRL_I2C_ENA;
break;
}
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
-
- /*
- * if using i2c make certain the MDICNFG register is cleared to prevent
- * communications from being misrouted to the mdic registers
- */
- if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
- E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
-
/* Set mta register count */
mac->mta_reg_count = 128;
/* Set uta register count */
@@ -367,6 +398,7 @@ void e1000_init_function_pointers_82575(struct e1000_hw *hw)
hw->mac.ops.init_params = e1000_init_mac_params_82575;
hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
hw->phy.ops.init_params = e1000_init_phy_params_82575;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
}
/**
@@ -492,6 +524,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 phy_id;
u32 ctrl_ext;
+ u32 mdic;
DEBUGFUNC("e1000_get_phy_id_82575");
@@ -508,6 +541,28 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
goto out;
}
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ mdic &= E1000_MDIC_PHY_MASK;
+ phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+ break;
+ case e1000_82580:
+ mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+ mdic &= E1000_MDICNFG_PHY_MASK;
+ phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
/* Power on sgmii phy if it is disabled */
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
E1000_WRITE_REG(hw, E1000_CTRL_EXT,
@@ -1243,6 +1298,7 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = FALSE;
+ /* fall through to default case */
default:
/*
* non-SGMII modes only supports a speed of 1000/Full for the
@@ -1638,6 +1694,39 @@ out:
return ret_val;
}
+
+/**
+ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+ if (enable) {
+ dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs */
+ dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+ break;
+ default:
+ break;
+ }
+}
+
/**
* e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
* @hw: pointer to the hardware struct
@@ -1739,6 +1828,45 @@ out:
}
/**
+ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * @hw: pointer to the HW structure
+ *
+ * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * the values found in the EEPROM. This addresses an issue in which these
+ * bits are not restored from EEPROM after reset.
+ **/
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mdicnfg;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
+ if (hw->mac.type != e1000_82580)
+ goto out;
+ if (!e1000_sgmii_active_82575(hw))
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ if (nvm_data & NVM_WORD24_EXT_MDIO)
+ mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+ if (nvm_data & NVM_WORD24_COM_MDIO)
+ mdicnfg |= E1000_MDICNFG_COM_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+out:
+ return ret_val;
+}
+
+/**
* e1000_reset_hw_82580 - Reset hardware
* @hw: pointer to the HW structure
*
@@ -1814,6 +1942,10 @@ static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
+ ret_val = e1000_reset_mdicnfg_82580(hw);
+ if (ret_val)
+ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
/* Install any alternate MAC address into RAR0 */
ret_val = e1000_check_alt_mac_addr_generic(hw);
diff --git a/sys/dev/e1000/e1000_82575.h b/sys/dev/e1000/e1000_82575.h
index d64538c..eb05262 100644
--- a/sys/dev/e1000/e1000_82575.h
+++ b/sys/dev/e1000/e1000_82575.h
@@ -458,6 +458,7 @@ struct e1000_adv_tx_context_desc {
/* RX packet buffer size defines */
#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
enum e1000_promisc_type {
e1000_promisc_disabled = 0, /* all promisc modes disabled */
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index 8a49c2b..3ae023c 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -276,6 +276,8 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
break;
case E1000_DEV_ID_ICH10_D_BM_LM:
case E1000_DEV_ID_ICH10_D_BM_LF:
+ case E1000_DEV_ID_ICH10_D_BM_V:
+ case E1000_DEV_ID_ICH10_HANKSVILLE:
mac->type = e1000_ich10lan;
break;
case E1000_DEV_ID_PCH_D_HV_DM:
@@ -284,6 +286,10 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_M_HV_LC:
mac->type = e1000_pchlan;
break;
+ case E1000_DEV_ID_PCH2_LV_LM:
+ case E1000_DEV_ID_PCH2_LV_V:
+ mac->type = e1000_pch2lan;
+ break;
case E1000_DEV_ID_82575EB_COPPER:
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82575GB_QUAD_COPPER:
@@ -294,6 +300,7 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
case E1000_DEV_ID_82576_NS:
case E1000_DEV_ID_82576_NS_SERDES:
case E1000_DEV_ID_82576_SERDES_QUAD:
@@ -304,6 +311,7 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
mac->type = e1000_82580;
break;
case E1000_DEV_ID_82576_VF:
@@ -396,6 +404,7 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pchlan:
+ case e1000_pch2lan:
e1000_init_function_pointers_ich8lan(hw);
break;
case e1000_82575:
@@ -1131,6 +1140,37 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
}
/**
+ * e1000_read_pba_string - Read device part number string
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_length - Read device part number string length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
* e1000_read_pba_num - Read device part number
* @hw: pointer to the HW structure
* @pba_num: pointer to device part number
diff --git a/sys/dev/e1000/e1000_api.h b/sys/dev/e1000/e1000_api.h
index 9fc7469..36ba712 100644
--- a/sys/dev/e1000/e1000_api.h
+++ b/sys/dev/e1000/e1000_api.h
@@ -97,6 +97,9 @@ void e1000_power_up_phy(struct e1000_hw *hw);
void e1000_power_down_phy(struct e1000_hw *hw);
s32 e1000_read_mac_addr(struct e1000_hw *hw);
s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
void e1000_reload_nvm(struct e1000_hw *hw);
s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index 7ac5d8b..5d09ad9 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -49,6 +49,8 @@
#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */
#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+#define E1000_WUC_FLX6_PHY 0x4000 /* Flexible Filter 6 Enable */
+#define E1000_WUC_FLX7_PHY 0x8000 /* Flexible Filter 7 Enable */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -73,6 +75,8 @@
#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
#define E1000_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
#define E1000_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define E1000_WUFC_FLX6 0x00400000 /* Flexible Filter 6 Enable */
+#define E1000_WUFC_FLX7 0x00800000 /* Flexible Filter 7 Enable */
#define E1000_WUFC_ALL_FILTERS_PHY_4 0x0000F0FF /*Mask for all wakeup filters*/
#define E1000_WUFC_FLX_OFFSET_PHY 12 /* Offset to the Flexible Filters bits */
#define E1000_WUFC_FLX_FILTERS_PHY_4 0x0000F000 /*Mask for 4 flexible filters*/
@@ -80,9 +84,11 @@
#define E1000_WUFC_FLX_FILTERS_PHY_6 0x0000F600 /*Mask for 6 flexible filters*/
#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
#define E1000_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask for all 6 wakeup filters*/
+#define E1000_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask for all 8 wakeup filters*/
#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
#define E1000_WUFC_FLX_FILTERS 0x000F0000 /*Mask for the 4 flexible filters */
#define E1000_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flexible filters */
+#define E1000_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flexible filters */
/*
* For 82576 to utilize Extended filter masks in addition to
* existing (filter) masks
@@ -109,10 +115,15 @@
#define E1000_WUS_FLX3 E1000_WUFC_FLX3
#define E1000_WUS_FLX4 E1000_WUFC_FLX4
#define E1000_WUS_FLX5 E1000_WUFC_FLX5
+#define E1000_WUS_FLX6 E1000_WUFC_FLX6
+#define E1000_WUS_FLX7 E1000_WUFC_FLX7
#define E1000_WUS_FLX4_PHY E1000_WUFC_FLX4_PHY
#define E1000_WUS_FLX5_PHY E1000_WUFC_FLX5_PHY
+#define E1000_WUS_FLX6_PHY 0x0400
+#define E1000_WUS_FLX7_PHY 0x0800
#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS
#define E1000_WUS_FLX_FILTERS_6 E1000_WUFC_FLX_FILTERS_6
+#define E1000_WUS_FLX_FILTERS_8 E1000_WUFC_FLX_FILTERS_8
#define E1000_WUS_FLX_FILTERS_PHY_6 E1000_WUFC_FLX_FILTERS_PHY_6
/* Wake Up Packet Length */
@@ -122,6 +133,8 @@
#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
/* Six Flexible Filters are supported */
#define E1000_FLEXIBLE_FILTER_COUNT_MAX_6 6
+/* Eight Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX_8 8
/* Two Extended Flexible Filters are supported (82576) */
#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
@@ -132,6 +145,7 @@
#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
#define E1000_FFLT_SIZE_6 E1000_FLEXIBLE_FILTER_COUNT_MAX_6
+#define E1000_FFLT_SIZE_8 E1000_FLEXIBLE_FILTER_COUNT_MAX_8
#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
@@ -710,6 +724,7 @@
#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
@@ -1013,6 +1028,9 @@
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
@@ -1106,6 +1124,11 @@
#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK 0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT 21
+
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
@@ -1301,6 +1324,10 @@
#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
+
/* Mask bits for fields in Word 0x0f of the NVM */
#define NVM_WORD0F_PAUSE_MASK 0x3000
#define NVM_WORD0F_PAUSE 0x1000
@@ -1312,12 +1339,19 @@
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
#define NVM_MAC_ADDR_OFFSET 0
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_RESERVED_WORD 0xFFFF
#define NVM_PHY_CLASS_A 0x8000
#define NVM_SERDES_AMPLITUDE_MASK 0x000F
@@ -1422,6 +1456,7 @@
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
#define I82580_I_PHY_ID 0x015403A0
#define IGP04E1000_E_PHY_ID 0x02A80391
#define M88_VENDOR 0x0141
@@ -1622,6 +1657,7 @@
#define E1000_MDIC_READY 0x10000000
#define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000
+#define E1000_MDIC_DEST 0x80000000
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
@@ -1683,4 +1719,5 @@
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
on DMA coal */
+
#endif /* _E1000_DEFINES_H_ */
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index b494074..ca9c97d 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -120,17 +120,22 @@ struct e1000_hw;
#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
+#define E1000_DEV_ID_ICH10_HANKSVILLE 0xF0FE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
#define E1000_DEV_ID_82576_NS 0x150A
#define E1000_DEV_ID_82576_NS_SERDES 0x1518
#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
@@ -144,6 +149,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
@@ -184,6 +190,7 @@ enum e1000_mac_type {
e1000_ich9lan,
e1000_ich10lan,
e1000_pchlan,
+ e1000_pch2lan,
e1000_82575,
e1000_82576,
e1000_82580,
@@ -228,6 +235,7 @@ enum e1000_phy_type {
e1000_phy_bm,
e1000_phy_82578,
e1000_phy_82577,
+ e1000_phy_82579,
e1000_phy_82580,
e1000_phy_vf,
};
@@ -316,6 +324,9 @@ enum e1000_serdes_link_state {
e1000_serdes_link_forced_up
};
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
/* Receive Descriptor */
struct e1000_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -799,6 +810,34 @@ struct e1000_fc_info {
enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
};
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
struct e1000_dev_spec_82541 {
enum e1000_dsp_config dsp_config;
enum e1000_ffe_config ffe_config;
@@ -819,6 +858,7 @@ struct e1000_dev_spec_82543 {
struct e1000_dev_spec_82571 {
bool laa_is_present;
u32 smb_counter;
+ E1000_MUTEX swflag_mutex;
};
struct e1000_dev_spec_80003es2lan {
@@ -838,33 +878,7 @@ struct e1000_dev_spec_ich8lan {
E1000_MUTEX nvm_mutex;
E1000_MUTEX swflag_mutex;
bool nvm_k1_enabled;
-};
-
-struct e1000_mbx_operations {
- s32 (*init_params)(struct e1000_hw *hw);
- s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct e1000_hw *, u16);
- s32 (*check_for_ack)(struct e1000_hw *, u16);
- s32 (*check_for_rst)(struct e1000_hw *, u16);
-};
-
-struct e1000_mbx_stats {
- u32 msgs_tx;
- u32 msgs_rx;
- u32 acks;
- u32 reqs;
- u32 rsts;
-};
-
-struct e1000_mbx_info {
- struct e1000_mbx_operations ops;
- struct e1000_mbx_stats stats;
- u32 timeout;
- u32 usec_delay;
- u16 size;
+ bool eee_disable;
};
struct e1000_dev_spec_82575 {
@@ -877,7 +891,6 @@ struct e1000_dev_spec_vf {
u32 v2p_mailbox;
};
-
struct e1000_hw {
void *back;
diff --git a/sys/dev/e1000/e1000_ich8lan.c b/sys/dev/e1000/e1000_ich8lan.c
index 6b3c25d..fdd5172 100644
--- a/sys/dev/e1000/e1000_ich8lan.c
+++ b/sys/dev/e1000/e1000_ich8lan.c
@@ -58,6 +58,8 @@
* 82577LC Gigabit Network Connection
* 82578DM Gigabit Network Connection
* 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
*/
#include "e1000_api.h"
@@ -71,6 +73,8 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
@@ -126,6 +130,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
@@ -177,7 +183,7 @@ union ich8_hws_flash_regacc {
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- u32 ctrl;
+ u32 ctrl, fwsm;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_init_phy_params_pchlan");
@@ -200,15 +206,14 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- if ((hw->mac.type == e1000_pchlan) &&
- (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))) {
-
- /*
- * The MAC-PHY interconnect may still be in SMBus mode
- * after Sx->S0. Toggle the LANPHYPC Value bit to force
- * the interconnect to PCIe mode, but only if there is no
- * firmware present otherwise firmware will have done it.
- */
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. If the manageability engine (ME) is
+ * disabled, then toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode.
+ */
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -217,6 +222,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
msec_delay(50);
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
}
/*
@@ -229,13 +241,25 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
if (ret_val)
goto out;
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ msec_delay(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
+ }
+
phy->id = e1000_phy_unknown;
- ret_val = e1000_get_phy_id(hw);
- if (ret_val)
- goto out;
- if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
/*
- * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -244,11 +268,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
ret_val = e1000_get_phy_id(hw);
if (ret_val)
goto out;
+ break;
}
phy->type = e1000_get_phy_type_from_id(phy->id);
switch (phy->type) {
case e1000_phy_82577:
+ case e1000_phy_82579:
phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
e1000_phy_force_speed_duplex_82577;
@@ -485,8 +511,6 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
/* check for link */
mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
- /* check management mode */
- mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
/* multicast address update */
@@ -499,6 +523,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
@@ -511,10 +537,16 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan;
break;
+ case e1000_pch2lan:
+ mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch2lan;
+ /* fall-through */
case e1000_pchlan:
/* save PCH revision_id */
e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
hw->revision_id = (u8)(pci_cfg &= 0x000F);
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_pchlan;
/* setup LED */
@@ -533,10 +565,46 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
+ /* Gate automatic PHY configuration by hardware on managed 82579 */
+ if ((mac->type == e1000_pch2lan) &&
+ (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
+
return E1000_SUCCESS;
}
/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure. The bits in
+ * the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg;
+
+ DEBUGFUNC("e1000_set_eee_pchlan");
+
+ if (hw->phy.type != e1000_phy_82579)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (hw->dev_spec.ich8lan.eee_disable)
+ phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+ else
+ phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+ ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
+out:
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -589,12 +657,23 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
goto out;
}
+ if (hw->mac.type == e1000_pch2lan) {
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ goto out;
+ }
+
/*
* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000_check_downshift_generic(hw);
+ /* Enable/Disable EEE after link up */
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ goto out;
+
/*
* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -644,6 +723,7 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
break;
case e1000_pchlan:
+ case e1000_pch2lan:
hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
break;
default:
@@ -767,7 +847,7 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
* e1000_check_mng_mode_ich8lan - Checks management mode
* @hw: pointer to the HW structure
*
- * This checks if the adapter has manageability enabled.
+ * This checks if the adapter has any manageability enabled.
* This is a function pointer entry point only called by read/write
* routines for the PHY and NVM parts.
**/
@@ -779,8 +859,86 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
fwsm = E1000_READ_REG(hw, E1000_FWSM);
- return (fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ DEBUGFUNC("e1000_check_mng_mode_pchlan");
+
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_rar_set_pch2lan - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr. For 82579, RAR[0] is the base address register that is to
+ * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
+ * Use SHRA[0-3] in place of those reserved for ME.
+ **/
+static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_pch2lan");
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] |
+ ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+ return;
+ }
+
+ if (index < hw->mac.rar_entry_count) {
+ E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
+ E1000_WRITE_FLUSH(hw);
+
+ /* verify the register updates */
+ if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
+ (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
+ return;
+
+ DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
+ (index - 1), E1000_READ_REG(hw, E1000_FWSM));
+ }
+
+ DEBUGOUT1("Failed to write receive address at index %d\n", index);
}
/**
@@ -807,6 +965,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = E1000_READ_REG(hw, E1000_STRAP);
+ s32 ret_val = E1000_SUCCESS;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
* @hw: pointer to the HW structure
*
@@ -820,13 +1006,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
- if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
- !(hw->mac.type == e1000_pchlan))
- return ret_val;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
+ DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
/*
* Initialize the PHY from the NVM on ICH platforms. This
@@ -835,27 +1015,42 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
data = E1000_READ_REG(hw, E1000_FEXTNVM);
if (!(data & sw_cfg_mask))
goto out;
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
-
/*
* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration
*/
data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
- goto out;
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+ }
cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -866,20 +1061,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
+ if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) ||
+ (hw->mac.type == e1000_pch2lan)) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
*/
- data = E1000_READ_REG(hw, E1000_STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
+ ret_val = e1000_write_smbus_addr(hw);
if (ret_val)
goto out;
@@ -1026,6 +1217,8 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
u32 reg = 0;
u16 kmrn_reg = 0;
+ DEBUGFUNC("e1000_configure_k1_ich8lan");
+
ret_val = e1000_read_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
&kmrn_reg);
@@ -1076,16 +1269,20 @@ s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
u32 mac_reg;
u16 oem_reg;
- if (hw->mac.type != e1000_pchlan)
+ DEBUGFUNC("e1000_oem_bits_config_ich8lan");
+
+ if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
- goto out;
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto out;
+ }
mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
@@ -1130,6 +1327,8 @@ out:
**/
s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
{
+ DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
+
if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
return E1000_SUCCESS;
@@ -1145,6 +1344,8 @@ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
s32 ret_val;
u16 data;
+ DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
+
ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
if (ret_val)
return ret_val;
@@ -1165,6 +1366,8 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 phy_data;
+ DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
+
if (hw->mac.type != e1000_pchlan)
goto out;
@@ -1294,6 +1497,336 @@ out:
}
/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i;
+
+ DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
+ hw->phy.ops.write_reg(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+ mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
+ hw->phy.ops.write_reg(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
+ }
+}
+
+static u32 e1000_calc_rx_da_crc(u8 mac[])
+{
+ u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
+ u32 i, j, mask, crc;
+
+ DEBUGFUNC("e1000_calc_rx_da_crc");
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ crc = crc ^ mac[i];
+ for (j = 8; j > 0; j--) {
+ mask = (crc & 1) * (-1);
+ crc = (crc >> 1) ^ (poly & mask);
+ }
+ }
+ return ~crc;
+}
+
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* disable Rx path while enabling/disabling workaround */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ if (ret_val)
+ goto out;
+
+ if (enable) {
+ /*
+ * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+ u32 addr_high, addr_low;
+
+ addr_high = E1000_READ_REG(hw, E1000_RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = E1000_READ_REG(hw, E1000_RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+ E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
+ e1000_calc_rx_da_crc(mac_addr));
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
+
+ mac_reg = E1000_READ_REG(hw, E1000_RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
+
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Enable jumbo frame workaround in the PHY */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x1A << 2);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
+ if (ret_val)
+ goto out;
+ hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10));
+ if (ret_val)
+ goto out;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
+
+ mac_reg = E1000_READ_REG(hw, E1000_RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
+
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Write PHY register values back to h/w defaults */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
+ data |= (1 << 13);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ goto out;
+ hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10));
+ if (ret_val)
+ goto out;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 status_reg = 0;
+ u32 mac_reg;
+
+ DEBUGFUNC("e1000_k1_workaround_lv");
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ goto out;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000)
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ else
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to TRUE to gate, FALSE to un-gate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ return;
+}
+
+/**
+ * e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
+ * needed for Nahum3 + Hanksville testing, requested by HW team
+ **/
+static s32 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_hv_phy_tuning_workaround_ich8lan");
+
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_lan_init_done_ich8lan - Check for PHY config completion
* @hw: pointer to the HW structure
*
@@ -1328,25 +1861,20 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
* @hw: pointer to the HW structure
- *
- * Resets the PHY
- * This is a function pointer entry point called by drivers
- * or other shared routines.
**/
-static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 reg;
- DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
+ DEBUGFUNC("e1000_post_phy_reset_ich8lan");
- ret_val = e1000_phy_hw_reset_generic(hw);
- if (ret_val)
+ if (hw->phy.ops.check_reset_block(hw))
goto out;
- /* Allow time for h/w to get to a quiescent state after reset */
+ /* Allow time for h/w to get to quiescent state after reset */
msec_delay(10);
/* Perform any necessary post-reset workarounds */
@@ -1356,12 +1884,23 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto out;
break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ break;
default:
break;
}
+ if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
+ ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ }
+
/* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
+ if (hw->mac.type >= e1000_pchlan)
hw->phy.ops.read_reg(hw, BM_WUC, &reg);
/* Configure the LCD with the extended configuration region in NVM */
@@ -1372,6 +1911,42 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ msec_delay(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
+
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
+
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+
out:
return ret_val;
}
@@ -1621,6 +2196,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
u8 sig_byte = 0;
s32 ret_val = E1000_SUCCESS;
+ DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
+
switch (hw->mac.type) {
case e1000_ich8lan:
case e1000_ich9lan:
@@ -2670,20 +3247,21 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = E1000_READ_REG(hw, E1000_CTRL);
- if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) {
- /* Clear PHY Reset Asserted bit */
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = E1000_READ_REG(hw, E1000_STATUS);
- E1000_WRITE_REG(hw, E1000_STATUS, status &
- ~E1000_STATUS_PHYRA);
- }
-
+ if (!hw->phy.ops.check_reset_block(hw)) {
/*
- * PHY HW reset requires MAC CORE reset at the same
+ * Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
ctrl |= E1000_CTRL_PHY_RST;
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
DEBUGOUT("Issuing a global reset to ich8lan\n");
@@ -2693,44 +3271,16 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- /* Perform any necessary post-reset workarounds */
- switch (hw->mac.type) {
- case e1000_pchlan:
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ctrl & E1000_CTRL_PHY_RST) {
+ ret_val = hw->phy.ops.get_cfg_done(hw);
if (ret_val)
goto out;
- break;
- default:
- break;
- }
-
- if (ctrl & E1000_CTRL_PHY_RST)
- ret_val = hw->phy.ops.get_cfg_done(hw);
- if (hw->mac.type >= e1000_ich10lan) {
- e1000_lan_init_done_ich8lan(hw);
- } else {
- ret_val = e1000_get_auto_rd_done_generic(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- DEBUGOUT("Auto Read Done did not complete\n");
- }
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+ if (ret_val)
+ goto out;
}
- /* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
- hw->phy.ops.read_reg(hw, BM_WUC, &reg);
- ret_val = e1000_sw_lcd_config_ich8lan(hw);
- if (ret_val)
- goto out;
-
- ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
- if (ret_val)
- goto out;
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@@ -2948,7 +3498,10 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
+ E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
+
ret_val = hw->phy.ops.write_reg(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 27),
hw->fc.pause_time);
@@ -3017,6 +3570,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
goto out;
break;
case e1000_phy_82577:
+ case e1000_phy_82579:
ret_val = e1000_copper_link_setup_82577(hw);
if (ret_val)
goto out;
@@ -3300,21 +3854,21 @@ out:
void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
+ s32 ret_val;
- switch (hw->mac.type) {
- case e1000_ich8lan:
- case e1000_ich9lan:
- case e1000_ich10lan:
- case e1000_pchlan:
- phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
- phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
- E1000_PHY_CTRL_GBE_DISABLE;
- E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+ DEBUGFUNC("e1000_disable_gig_wol_ich8lan");
- if (hw->mac.type == e1000_pchlan)
- e1000_phy_hw_reset_ich8lan(hw);
- default:
- break;
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, FALSE);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
}
return;
@@ -3469,31 +4023,49 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
/**
- * e1000_get_cfg_done_ich8lan - Read config done bit
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
* @hw: pointer to the HW structure
*
- * Read the management control register for the config done bit for
- * completion status. NOTE: silicon which is EEPROM-less will fail trying
- * to read the config done bit, so an error is *ONLY* logged and returns
- * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
- * would not be able to be reset or change link.
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
**/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u32 bank = 0;
+ u32 status;
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = E1000_READ_REG(hw, E1000_STATUS);
+ DEBUGFUNC("e1000_get_cfg_done_ich8lan");
- if (status & E1000_STATUS_PHYRA)
- E1000_WRITE_REG(hw, E1000_STATUS, status &
- ~E1000_STATUS_PHYRA);
- else
- DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
+ e1000_get_cfg_done_generic(hw);
+
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ ret_val = E1000_SUCCESS;
+ }
}
- e1000_get_cfg_done_generic(hw);
+ /* Clear PHY Reset Asserted bit */
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
if (hw->mac.type <= e1000_ich9lan) {
@@ -3560,6 +4132,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
/* Clear PHY statistics registers */
if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
diff --git a/sys/dev/e1000/e1000_ich8lan.h b/sys/dev/e1000/e1000_ich8lan.h
index f26b7b9..2935af5 100644
--- a/sys/dev/e1000/e1000_ich8lan.h
+++ b/sys/dev/e1000/e1000_ich8lan.h
@@ -70,6 +70,23 @@
#define E1000_ICH_MNG_IAMT_MODE 0x2
+#define E1000_FWSM_PROXY_MODE 0x00000008 /* FW is in proxy mode */
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_SHRAH_AV 0x80000000 /* Addr Valid bit */
+#define E1000_SHRAH_MAV 0x40000000 /* Multicast Addr Valid bit */
+
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_LSECREQ 0x00000001 /* Linksec Request */
+#define E1000_H2ME_LSECA 0x00000002 /* Linksec Active */
+#define E1000_H2ME_LSECSF 0x00000004 /* Linksec Failed */
+#define E1000_H2ME_LSECD 0x00000008 /* Linksec Disabled */
+#define E1000_H2ME_SLCAPD 0x00000010 /* Start LCAPD */
+#define E1000_H2ME_IPV4_ARP_EN 0x00000020 /* Arp Offload enable bit */
+#define E1000_H2ME_IPV6_NS_EN 0x00000040 /* NS Offload enable bit */
+
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
@@ -85,9 +102,14 @@
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M */
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
#define PHY_PAGE_SHIFT 5
#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
@@ -112,6 +134,14 @@
#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+#define BM_IPAV (BM_PHY_REG(BM_WUC_PAGE, 64))
+#define BM_IP4AT_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 82 + ((_i) * 2)))
+#define BM_IP4AT_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 83 + ((_i) * 2)))
+
+#define BM_SHRAL_LOWER(_i) (BM_PHY_REG(BM_WUC_PAGE, 44 + ((_i) * 4)))
+#define BM_SHRAL_UPPER(_i) (BM_PHY_REG(BM_WUC_PAGE, 45 + ((_i) * 4)))
+#define BM_SHRAH_LOWER(_i) (BM_PHY_REG(BM_WUC_PAGE, 46 + ((_i) * 4)))
+#define BM_SHRAH_UPPER(_i) (BM_PHY_REG(BM_WUC_PAGE, 47 + ((_i) * 4)))
#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
@@ -147,6 +177,7 @@
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
@@ -172,6 +203,10 @@
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+
/*
* Additional interrupts need to be handled for ICH family:
* DSW = The FW changed the status of the DISSW bit in FWSM
@@ -195,6 +230,9 @@
#define E1000_RXDEXT_LINKSEC_ERROR_REPLAY_ERROR 0x40000000
#define E1000_RXDEXT_LINKSEC_ERROR_BAD_SIG 0x60000000
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
@@ -203,4 +241,6 @@ void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw);
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_config);
s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
#endif
diff --git a/sys/dev/e1000/e1000_mac.c b/sys/dev/e1000/e1000_mac.c
index d4d2bec..1216671 100644
--- a/sys/dev/e1000/e1000_mac.c
+++ b/sys/dev/e1000/e1000_mac.c
@@ -395,6 +395,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+ if (!((nvm_data & NVM_COMPAT_LOM) ||
+ (hw->device_id == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+ (hw->device_id == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+ goto out;
+
ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
diff --git a/sys/dev/e1000/e1000_nvm.c b/sys/dev/e1000/e1000_nvm.c
index 83557ef..05c7931 100644
--- a/sys/dev/e1000/e1000_nvm.c
+++ b/sys/dev/e1000/e1000_nvm.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -775,6 +775,184 @@ out:
}
/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_string_generic");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_read_pba_length_generic - Read device part number length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_length_generic");
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ *pba_num_size = 11;
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+
+ /*
+ * Convert from length in u16 values to u8 chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((u32)length * 2) - 1;
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_read_pba_num_generic - Read device part number
* @hw: pointer to the HW structure
* @pba_num: pointer to device part number
@@ -793,6 +971,10 @@ s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
if (ret_val) {
DEBUGOUT("NVM Read Error\n");
goto out;
+ } else if (nvm_data == NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM Not Supported\n");
+ ret_val = E1000_NOT_IMPLEMENTED;
+ goto out;
}
*pba_num = (u32)(nvm_data << 16);
@@ -880,7 +1062,7 @@ out:
**/
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
{
- s32 ret_val;
+ s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
diff --git a/sys/dev/e1000/e1000_nvm.h b/sys/dev/e1000/e1000_nvm.h
index 974c407..36ee444 100644
--- a/sys/dev/e1000/e1000_nvm.h
+++ b/sys/dev/e1000/e1000_nvm.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,9 @@ s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index 24ca36f..fb66623 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -281,6 +281,13 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
*data = (u16) mdic;
+
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usec_delay(100);
out:
return ret_val;
@@ -345,6 +352,13 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usec_delay(100);
+
out:
return ret_val;
}
@@ -868,9 +882,10 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
/* For BM PHY this bit is downshift enable */
- if (phy->type != e1000_phy_bm)
- phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ if (phy->type == e1000_phy_bm)
+ phy_data &= ~M88E1000_PSCR_ASSERT_CRS_ON_TX;
/*
* Options:
@@ -2618,6 +2633,9 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
case I82577_E_PHY_ID:
phy_type = e1000_phy_82577;
break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
case I82580_I_PHY_ID:
phy_type = e1000_phy_82580;
break;
diff --git a/sys/dev/e1000/e1000_phy.h b/sys/dev/e1000/e1000_phy.h
index 692cbaa..c42ef6c 100644
--- a/sys/dev/e1000/e1000_phy.h
+++ b/sys/dev/e1000/e1000_phy.h
@@ -230,12 +230,14 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
diff --git a/sys/dev/e1000/e1000_regs.h b/sys/dev/e1000/e1000_regs.h
index 2cb4397..38ccff3 100644
--- a/sys/dev/e1000/e1000_regs.h
+++ b/sys/dev/e1000/e1000_regs.h
@@ -53,6 +53,7 @@
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FEXT 0x0002C /* Future Extended - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
@@ -186,6 +187,8 @@
(0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -457,6 +460,7 @@
#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
@@ -539,4 +543,6 @@
/* PCIe Parity Status Register */
#define E1000_PCIEERRSTS 0x05BA8
+
+
#endif
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index c8e675e..af6100a 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -93,7 +93,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char em_driver_version[] = "7.0.6";
+char em_driver_version[] = "7.0.8";
/*********************************************************************
@@ -165,6 +165,7 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH10_D_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -237,9 +238,10 @@ static bool em_rxeof(struct rx_ring *, int, int *);
static int em_fixup_rx(struct rx_ring *);
#endif
static void em_receive_checksum(struct e1000_rx_desc *, struct mbuf *);
-static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *,
- u32 *, u32 *);
-static bool em_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *);
+static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int,
+ struct ip *, u32 *, u32 *);
+static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *,
+ struct tcphdr *, u32 *, u32 *);
static void em_set_promisc(struct adapter *);
static void em_disable_promisc(struct adapter *);
static void em_set_multi(struct adapter *);
@@ -346,16 +348,8 @@ TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
static int em_debug_sbp = FALSE;
TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
-/* Local controls for MSI/MSIX */
-#ifdef EM_MULTIQUEUE
static int em_enable_msix = TRUE;
-static int em_msix_queues = 2; /* for 82574, can be 1 or 2 */
-#else
-static int em_enable_msix = FALSE;
-static int em_msix_queues = 0; /* disable */
-#endif
TUNABLE_INT("hw.em.enable_msix", &em_enable_msix);
-TUNABLE_INT("hw.em.msix_queues", &em_msix_queues);
/* How many packets rxeof tries to clean at a time */
static int em_rx_process_limit = 100;
@@ -870,21 +864,18 @@ em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
}
/*
-** Multiqueue capable stack interface, this is not
-** yet truely multiqueue, but that is coming...
+** Multiqueue capable stack interface
*/
static int
em_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr;
- int i, error = 0;
+ int i = 0, error = 0;
/* Which queue to use */
if ((m->m_flags & M_FLOWID) != 0)
i = m->m_pkthdr.flowid % adapter->num_queues;
- else
- i = curcpu % adapter->num_queues;
txr = &adapter->tx_rings[i];
@@ -1467,8 +1458,7 @@ em_handle_que(void *context, int pending)
more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
EM_TX_LOCK(txr);
- if (em_txeof(txr))
- more = TRUE;
+ em_txeof(txr);
#ifdef EM_MULTIQUEUE
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
@@ -1476,6 +1466,7 @@ em_handle_que(void *context, int pending)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp, txr);
#endif
+ em_txeof(txr);
EM_TX_UNLOCK(txr);
if (more) {
taskqueue_enqueue(adapter->tq, &adapter->que_task);
@@ -1580,11 +1571,8 @@ em_handle_tx(void *context, int pending)
struct adapter *adapter = txr->adapter;
struct ifnet *ifp = adapter->ifp;
- if (!EM_TX_TRYLOCK(txr))
- return;
-
+ EM_TX_LOCK(txr);
em_txeof(txr);
-
#ifdef EM_MULTIQUEUE
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
@@ -1592,6 +1580,7 @@ em_handle_tx(void *context, int pending)
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp, txr);
#endif
+ em_txeof(txr);
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
EM_TX_UNLOCK(txr);
}
@@ -1745,13 +1734,18 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
struct em_buffer *tx_buffer, *tx_buffer_mapped;
struct e1000_tx_desc *ctxd = NULL;
struct mbuf *m_head;
+ struct ether_header *eh;
+ struct ip *ip = NULL;
+ struct tcphdr *tp = NULL;
u32 txd_upper, txd_lower, txd_used, txd_saved;
+ int ip_off, poff;
int nsegs, i, j, first, last = 0;
int error, do_tso, tso_desc = 0;
m_head = *m_headp;
txd_upper = txd_lower = txd_used = txd_saved = 0;
do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
+ ip_off = poff = 0;
/*
** When doing checksum offload, it is critical to
@@ -1767,15 +1761,100 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
}
/*
- * TSO workaround:
- * If an mbuf is only header we need
- * to pull 4 bytes of data into it.
+ * Intel recommends entire IP/TCP header length reside in a single
+ * buffer. If multiple descriptors are used to describe the IP and
+ * TCP header, each descriptor should describe one or more
+ * complete headers; descriptors referencing only parts of headers
+ * are not supported. If all layer headers are not coalesced into
+ * a single buffer, each buffer should not cross a 4KB boundary,
+ * or be larger than the maximum read request size.
+ * Controller also requires modifing IP/TCP header to make TSO work
+ * so we firstly get a writable mbuf chain then coalesce ethernet/
+ * IP/TCP header into a single buffer to meet the requirement of
+ * controller. This also simplifies IP/TCP/UDP checksum offloading
+ * which also has similiar restrictions.
*/
- if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
- m_head = m_pullup(m_head, M_TSO_LEN + 4);
- *m_headp = m_head;
- if (m_head == NULL)
+ if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
+ if (do_tso || (m_head->m_next != NULL &&
+ m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) {
+ if (M_WRITABLE(*m_headp) == 0) {
+ m_head = m_dup(*m_headp, M_DONTWAIT);
+ m_freem(*m_headp);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ *m_headp = m_head;
+ }
+ }
+ /*
+ * XXX
+ * Assume IPv4, we don't have TSO/checksum offload support
+ * for IPv6 yet.
+ */
+ ip_off = sizeof(struct ether_header);
+ m_head = m_pullup(m_head, ip_off);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ eh = mtod(m_head, struct ether_header *);
+ if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
+ ip_off = sizeof(struct ether_vlan_header);
+ m_head = m_pullup(m_head, ip_off);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ }
+ m_head = m_pullup(m_head, ip_off + sizeof(struct ip));
+ if (m_head == NULL) {
+ *m_headp = NULL;
return (ENOBUFS);
+ }
+ ip = (struct ip *)(mtod(m_head, char *) + ip_off);
+ poff = ip_off + (ip->ip_hl << 2);
+ m_head = m_pullup(m_head, poff + sizeof(struct tcphdr));
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ if (do_tso) {
+ tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
+ /*
+ * TSO workaround:
+ * pull 4 more bytes of data into it.
+ */
+ m_head = m_pullup(m_head, poff + (tp->th_off << 2) + 4);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ ip->ip_len = 0;
+ ip->ip_sum = 0;
+ /*
+ * The pseudo TCP checksum does not include TCP payload
+ * length so driver should recompute the checksum here
+ * what hardware expect to see. This is adherence of
+ * Microsoft's Large Send specification.
+ */
+ tp->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ } else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
+ tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
+ m_head = m_pullup(m_head, poff + (tp->th_off << 2));
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
+ m_head = m_pullup(m_head, poff + sizeof(struct udphdr));
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ }
+ *m_headp = m_head;
}
/*
@@ -1852,16 +1931,15 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
/* Do hardware assists */
#if __FreeBSD_version >= 700000
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- error = em_tso_setup(txr, m_head, &txd_upper, &txd_lower);
- if (error != TRUE)
- return (ENXIO); /* something foobar */
+ em_tso_setup(txr, m_head, ip_off, ip, tp, &txd_upper,
+ &txd_lower);
/* we need to make a final sentinel transmit desc */
tso_desc = TRUE;
} else
#endif
if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
- em_transmit_checksum_setup(txr, m_head,
- &txd_upper, &txd_lower);
+ em_transmit_checksum_setup(txr, m_head,
+ ip_off, ip, &txd_upper, &txd_lower);
i = txr->next_avail_desc;
@@ -1946,6 +2024,8 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
*/
tx_buffer = &txr->tx_buffers[first];
tx_buffer->next_eop = last;
+ /* Update the watchdog time early and often */
+ txr->watchdog_time = ticks;
/*
* Advance the Transmit Descriptor Tail (TDT), this tells the E1000
@@ -2087,6 +2167,14 @@ em_local_timer(void *arg)
if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ /*
+ ** If flow control has paused us since last checking
+ ** it invalidates the watchdog timing, so dont run it.
+ */
+ if (adapter->pause_frames) {
+ adapter->pause_frames = 0;
+ goto out;
+ }
/*
** Check for time since any descriptor was cleaned
*/
@@ -2100,11 +2188,18 @@ em_local_timer(void *arg)
goto hung;
EM_TX_UNLOCK(txr);
}
-
+out:
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
return;
hung:
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+ device_printf(adapter->dev,
+ "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+ E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
+ E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
+ device_printf(adapter->dev,"TX(%d) desc avail = %d,"
+ "Next TX to Clean = %d\n",
+ txr->me, txr->tx_avail, txr->next_to_clean);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
EM_TX_UNLOCK(txr);
@@ -2118,6 +2213,7 @@ em_update_link_status(struct adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
+ struct tx_ring *txr = adapter->tx_rings;
u32 link_check = 0;
/* Get the cached link value or read phy for real */
@@ -2175,8 +2271,8 @@ em_update_link_status(struct adapter *adapter)
device_printf(dev, "Link is Down\n");
adapter->link_active = 0;
/* Link down, disable watchdog */
- // JFV change later
- //adapter->watchdog_check = FALSE;
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ txr->watchdog_check = FALSE;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
@@ -2378,6 +2474,9 @@ em_allocate_msix(struct adapter *adapter)
device_printf(dev, "Failed to register RX handler");
return (error);
}
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, rxr->res, rxr->tag, "rx %d", i);
+#endif
rxr->msix = vector++; /* NOTE increment vector for TX */
TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr);
rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT,
@@ -2409,6 +2508,9 @@ em_allocate_msix(struct adapter *adapter)
device_printf(dev, "Failed to register TX handler");
return (error);
}
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, txr->res, txr->tag, "tx %d", i);
+#endif
txr->msix = vector++; /* Increment vector for next pass */
TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
@@ -2443,6 +2545,9 @@ em_allocate_msix(struct adapter *adapter)
device_printf(dev, "Failed to register LINK handler");
return (error);
}
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, adapter->res, adapter->tag, "link");
+#endif
adapter->linkvec = vector;
adapter->ivars |= (8 | vector) << 16;
adapter->ivars |= 0x80000000;
@@ -2524,7 +2629,12 @@ em_setup_msix(struct adapter *adapter)
int val = 0;
- /* Setup MSI/X for Hartwell */
+ /*
+ ** Setup MSI/X for Hartwell: tests have shown
+ ** use of two queues to be unstable, and to
+ ** provide no great gain anyway, so we simply
+ ** seperate the interrupts and use a single queue.
+ */
if ((adapter->hw.mac.type == e1000_82574) &&
(em_enable_msix == TRUE)) {
/* Map the MSIX BAR */
@@ -2538,21 +2648,16 @@ em_setup_msix(struct adapter *adapter)
goto msi;
}
val = pci_msix_count(dev);
- if (val != 5) {
+ if (val < 3) {
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
adapter->msix_mem = NULL;
device_printf(adapter->dev,
- "MSIX vectors wrong, using MSI \n");
+ "MSIX: insufficient vectors, using MSI\n");
goto msi;
}
- if (em_msix_queues == 2) {
- val = 5;
- adapter->num_queues = 2;
- } else {
- val = 3;
- adapter->num_queues = 1;
- }
+ val = 3;
+ adapter->num_queues = 1;
if (pci_alloc_msix(dev, &val) == 0) {
device_printf(adapter->dev,
"Using MSIX interrupts "
@@ -3069,6 +3174,13 @@ em_setup_transmit_ring(struct tx_ring *txr)
/* Set number of descriptors available */
txr->tx_avail = adapter->num_tx_desc;
+ /* Clear checksum offload context. */
+ txr->last_hw_offload = 0;
+ txr->last_hw_ipcss = 0;
+ txr->last_hw_ipcso = 0;
+ txr->last_hw_tucss = 0;
+ txr->last_hw_tucso = 0;
+
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
EM_TX_UNLOCK(txr);
@@ -3263,146 +3375,138 @@ em_free_transmit_buffers(struct tx_ring *txr)
/*********************************************************************
- *
- * The offload context needs to be set when we transfer the first
- * packet of a particular protocol (TCP/UDP). This routine has been
- * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
- *
- * Added back the old method of keeping the current context type
- * and not setting if unnecessary, as this is reported to be a
- * big performance win. -jfv
+ * The offload context is protocol specific (TCP/UDP) and thus
+ * only needs to be set when the protocol changes. The occasion
+ * of a context change can be a performance detriment, and
+ * might be better just disabled. The reason arises in the way
+ * in which the controller supports pipelined requests from the
+ * Tx data DMA. Up to four requests can be pipelined, and they may
+ * belong to the same packet or to multiple packets. However all
+ * requests for one packet are issued before a request is issued
+ * for a subsequent packet and if a request for the next packet
+ * requires a context change, that request will be stalled
+ * until the previous request completes. This means setting up
+ * a new context effectively disables pipelined Tx data DMA which
+ * in turn greatly slow down performance to send small sized
+ * frames.
**********************************************************************/
static void
-em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp,
- u32 *txd_upper, u32 *txd_lower)
+em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
+ struct ip *ip, u32 *txd_upper, u32 *txd_lower)
{
struct adapter *adapter = txr->adapter;
struct e1000_context_desc *TXD = NULL;
- struct em_buffer *tx_buffer;
- struct ether_vlan_header *eh;
- struct ip *ip = NULL;
- struct ip6_hdr *ip6;
- int cur, ehdrlen;
- u32 cmd, hdr_len, ip_hlen;
- u16 etype;
- u8 ipproto;
-
-
- cmd = hdr_len = ipproto = 0;
- *txd_upper = *txd_lower = 0;
- cur = txr->next_avail_desc;
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /*
- * We only support TCP/UDP for IPv4 and IPv6 for the moment.
- * TODO: Support SCTP too when it hits the tree.
- */
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
-
- /* Setup of IP header checksum. */
- if (mp->m_pkthdr.csum_flags & CSUM_IP) {
- /*
- * Start offset for header checksum calculation.
- * End offset for header checksum calculation.
- * Offset of place to put the checksum.
- */
- TXD = (struct e1000_context_desc *)
- &txr->tx_base[cur];
- TXD->lower_setup.ip_fields.ipcss = ehdrlen;
- TXD->lower_setup.ip_fields.ipcse =
- htole16(ehdrlen + ip_hlen);
- TXD->lower_setup.ip_fields.ipcso =
- ehdrlen + offsetof(struct ip, ip_sum);
- cmd |= E1000_TXD_CMD_IP;
- *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
- }
-
- hdr_len = ehdrlen + ip_hlen;
- ipproto = ip->ip_p;
- break;
-
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
-
- /* IPv6 doesn't have a header checksum. */
+ struct em_buffer *tx_buffer;
+ int cur, hdr_len;
+ u32 cmd = 0;
+ u16 offload = 0;
+ u8 ipcso, ipcss, tucso, tucss;
- hdr_len = ehdrlen + ip_hlen;
- ipproto = ip6->ip6_nxt;
- break;
+ ipcss = ipcso = tucss = tucso = 0;
+ hdr_len = ip_off + (ip->ip_hl << 2);
+ cur = txr->next_avail_desc;
- default:
- return;
+ /* Setup of IP header checksum. */
+ if (mp->m_pkthdr.csum_flags & CSUM_IP) {
+ *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ offload |= CSUM_IP;
+ ipcss = ip_off;
+ ipcso = ip_off + offsetof(struct ip, ip_sum);
+ /*
+ * Start offset for header checksum calculation.
+ * End offset for header checksum calculation.
+ * Offset of place to put the checksum.
+ */
+ TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
+ TXD->lower_setup.ip_fields.ipcss = ipcss;
+ TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len);
+ TXD->lower_setup.ip_fields.ipcso = ipcso;
+ cmd |= E1000_TXD_CMD_IP;
}
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
- *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
- /* no need for context if already set */
- if (txr->last_hw_offload == CSUM_TCP)
- return;
- txr->last_hw_offload = CSUM_TCP;
- /*
- * Start offset for payload checksum calculation.
- * End offset for payload checksum calculation.
- * Offset of place to put the checksum.
- */
- TXD = (struct e1000_context_desc *)
- &txr->tx_base[cur];
- TXD->upper_setup.tcp_fields.tucss = hdr_len;
- TXD->upper_setup.tcp_fields.tucse = htole16(0);
- TXD->upper_setup.tcp_fields.tucso =
- hdr_len + offsetof(struct tcphdr, th_sum);
- cmd |= E1000_TXD_CMD_TCP;
- }
- break;
- case IPPROTO_UDP:
- {
- if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
- *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
- /* no need for context if already set */
- if (txr->last_hw_offload == CSUM_UDP)
- return;
- txr->last_hw_offload = CSUM_UDP;
- /*
- * Start offset for header checksum calculation.
- * End offset for header checksum calculation.
- * Offset of place to put the checksum.
- */
- TXD = (struct e1000_context_desc *)
- &txr->tx_base[cur];
- TXD->upper_setup.tcp_fields.tucss = hdr_len;
- TXD->upper_setup.tcp_fields.tucse = htole16(0);
- TXD->upper_setup.tcp_fields.tucso =
- hdr_len + offsetof(struct udphdr, uh_sum);
- }
- /* Fall Thru */
- }
- default:
- break;
- }
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
+ *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ offload |= CSUM_TCP;
+ tucss = hdr_len;
+ tucso = hdr_len + offsetof(struct tcphdr, th_sum);
+ /*
+ * Setting up new checksum offload context for every frames
+ * takes a lot of processing time for hardware. This also
+ * reduces performance a lot for small sized frames so avoid
+ * it if driver can use previously configured checksum
+ * offload context.
+ */
+ if (txr->last_hw_offload == offload) {
+ if (offload & CSUM_IP) {
+ if (txr->last_hw_ipcss == ipcss &&
+ txr->last_hw_ipcso == ipcso &&
+ txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ } else {
+ if (txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ }
+ }
+ txr->last_hw_offload = offload;
+ txr->last_hw_tucss = tucss;
+ txr->last_hw_tucso = tucso;
+ /*
+ * Start offset for payload checksum calculation.
+ * End offset for payload checksum calculation.
+ * Offset of place to put the checksum.
+ */
+ TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
+ TXD->upper_setup.tcp_fields.tucss = hdr_len;
+ TXD->upper_setup.tcp_fields.tucse = htole16(0);
+ TXD->upper_setup.tcp_fields.tucso = tucso;
+ cmd |= E1000_TXD_CMD_TCP;
+ } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
+ *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ tucss = hdr_len;
+ tucso = hdr_len + offsetof(struct udphdr, uh_sum);
+ /*
+ * Setting up new checksum offload context for every frames
+ * takes a lot of processing time for hardware. This also
+ * reduces performance a lot for small sized frames so avoid
+ * it if driver can use previously configured checksum
+ * offload context.
+ */
+ if (txr->last_hw_offload == offload) {
+ if (offload & CSUM_IP) {
+ if (txr->last_hw_ipcss == ipcss &&
+ txr->last_hw_ipcso == ipcso &&
+ txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ } else {
+ if (txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ }
+ }
+ txr->last_hw_offload = offload;
+ txr->last_hw_tucss = tucss;
+ txr->last_hw_tucso = tucso;
+ /*
+ * Start offset for header checksum calculation.
+ * End offset for header checksum calculation.
+ * Offset of place to put the checksum.
+ */
+ TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
+ TXD->upper_setup.tcp_fields.tucss = tucss;
+ TXD->upper_setup.tcp_fields.tucse = htole16(0);
+ TXD->upper_setup.tcp_fields.tucso = tucso;
+ }
+
+ if (offload & CSUM_IP) {
+ txr->last_hw_ipcss = ipcss;
+ txr->last_hw_ipcso = ipcso;
+ }
- if (TXD == NULL)
- return;
TXD->tcp_seg_setup.data = htole32(0);
TXD->cmd_and_length =
htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
@@ -3423,124 +3527,52 @@ em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp,
* Setup work for hardware segmentation offload (TSO)
*
**********************************************************************/
-static bool
-em_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *txd_upper,
- u32 *txd_lower)
+static void
+em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
+ struct ip *ip, struct tcphdr *tp, u32 *txd_upper, u32 *txd_lower)
{
struct adapter *adapter = txr->adapter;
struct e1000_context_desc *TXD;
struct em_buffer *tx_buffer;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- struct tcphdr *th;
- int cur, ehdrlen, hdr_len, ip_hlen, isip6;
- u16 etype;
-
- /*
- * This function could/should be extended to support IP/IPv6
- * fragmentation as well. But as they say, one step at a time.
- */
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
- return FALSE; /* -1 */
+ int cur, hdr_len;
/*
- * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
- * TODO: Support SCTP too when it hits the tree.
+ * In theory we can use the same TSO context if and only if
+ * frame is the same type(IP/TCP) and the same MSS. However
+ * checking whether a frame has the same IP/TCP structure is
+ * hard thing so just ignore that and always restablish a
+ * new TSO context.
*/
- switch (etype) {
- case ETHERTYPE_IP:
- isip6 = 0;
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip->ip_len = 0;
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
- return FALSE; /* -1 */
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
-#if 1
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
-#else
- th->th_sum = mp->m_pkthdr.csum_data;
-#endif
- break;
- case ETHERTYPE_IPV6:
- isip6 = 1;
- return FALSE; /* Not supported yet. */
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip6->ip6_plen = 0;
- ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
- if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
- return FALSE; /* -1 */
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
-#if 0
- th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
- htons(IPPROTO_TCP)); /* XXX: function notyet. */
-#else
- th->th_sum = mp->m_pkthdr.csum_data;
-#endif
- break;
- default:
- return FALSE;
- }
- hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
-
+ hdr_len = ip_off + (ip->ip_hl << 2) + (tp->th_off << 2);
*txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
E1000_TXD_DTYP_D | /* Data descr type */
E1000_TXD_CMD_TSE); /* Do TSE on this packet */
/* IP and/or TCP header checksum calculation and insertion. */
- *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
- E1000_TXD_POPTS_TXSM) << 8;
+ *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
cur = txr->next_avail_desc;
tx_buffer = &txr->tx_buffers[cur];
TXD = (struct e1000_context_desc *) &txr->tx_base[cur];
- /* IPv6 doesn't have a header checksum. */
- if (!isip6) {
- /*
- * Start offset for header checksum calculation.
- * End offset for header checksum calculation.
- * Offset of place put the checksum.
- */
- TXD->lower_setup.ip_fields.ipcss = ehdrlen;
- TXD->lower_setup.ip_fields.ipcse =
- htole16(ehdrlen + ip_hlen - 1);
- TXD->lower_setup.ip_fields.ipcso =
- ehdrlen + offsetof(struct ip, ip_sum);
- }
+ /*
+ * Start offset for header checksum calculation.
+ * End offset for header checksum calculation.
+ * Offset of place put the checksum.
+ */
+ TXD->lower_setup.ip_fields.ipcss = ip_off;
+ TXD->lower_setup.ip_fields.ipcse =
+ htole16(ip_off + (ip->ip_hl << 2) - 1);
+ TXD->lower_setup.ip_fields.ipcso = ip_off + offsetof(struct ip, ip_sum);
/*
* Start offset for payload checksum calculation.
* End offset for payload checksum calculation.
* Offset of place to put the checksum.
*/
- TXD->upper_setup.tcp_fields.tucss =
- ehdrlen + ip_hlen;
+ TXD->upper_setup.tcp_fields.tucss = ip_off + (ip->ip_hl << 2);
TXD->upper_setup.tcp_fields.tucse = 0;
TXD->upper_setup.tcp_fields.tucso =
- ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
+ ip_off + (ip->ip_hl << 2) + offsetof(struct tcphdr, th_sum);
/*
* Payload size per packet w/o any headers.
* Length of all headers up to payload.
@@ -3551,7 +3583,7 @@ em_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *txd_upper,
TXD->cmd_and_length = htole32(adapter->txd_cmd |
E1000_TXD_CMD_DEXT | /* Extended descr */
E1000_TXD_CMD_TSE | /* TSE context */
- (isip6 ? 0 : E1000_TXD_CMD_IP) |
+ E1000_TXD_CMD_IP | /* Do IP csum */
E1000_TXD_CMD_TCP | /* Do TCP checksum */
(mp->m_pkthdr.len - (hdr_len))); /* Total len */
@@ -3564,8 +3596,6 @@ em_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *txd_upper,
txr->tx_avail--;
txr->next_avail_desc = cur;
txr->tx_tso = TRUE;
-
- return TRUE;
}
@@ -3580,7 +3610,7 @@ static bool
em_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
- int first, last, done, num_avail;
+ int first, last, done;
struct em_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
@@ -3590,7 +3620,6 @@ em_txeof(struct tx_ring *txr)
if (txr->tx_avail == adapter->num_tx_desc)
return (FALSE);
- num_avail = txr->tx_avail;
first = txr->next_to_clean;
tx_desc = &txr->tx_base[first];
tx_buffer = &txr->tx_buffers[first];
@@ -3616,16 +3645,14 @@ em_txeof(struct tx_ring *txr)
tx_desc->upper.data = 0;
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
- ++num_avail;
+ ++txr->tx_avail;
if (tx_buffer->m_head) {
- ifp->if_opackets++;
bus_dmamap_sync(txr->txtag,
tx_buffer->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txr->txtag,
tx_buffer->map);
-
m_freem(tx_buffer->m_head);
tx_buffer->m_head = NULL;
}
@@ -3638,6 +3665,7 @@ em_txeof(struct tx_ring *txr)
tx_buffer = &txr->tx_buffers[first];
tx_desc = &txr->tx_base[first];
}
+ ++ifp->if_opackets;
/* See if we can continue to the next packet */
last = tx_buffer->next_eop;
if (last != -1) {
@@ -3654,20 +3682,18 @@ em_txeof(struct tx_ring *txr)
txr->next_to_clean = first;
/*
- * If we have enough room, clear IFF_DRV_OACTIVE to
- * tell the stack that it is OK to send packets.
- * If there are no pending descriptors, clear the watchdog.
+ * If we have enough room, clear IFF_DRV_OACTIVE
+ * to tell the stack that it is OK to send packets.
*/
- if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
+ if (txr->tx_avail > EM_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (num_avail == adapter->num_tx_desc) {
+ /* Disable watchdog if all clean */
+ if (txr->tx_avail == adapter->num_tx_desc) {
txr->watchdog_check = FALSE;
- txr->tx_avail = num_avail;
return (FALSE);
}
}
- txr->tx_avail = num_avail;
return (TRUE);
}
@@ -4827,7 +4853,12 @@ em_update_stats_counters(struct adapter *adapter)
adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
- adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
+ /*
+ ** For watchdog management we need to know if we have been
+ ** paused during the last interval, so capture that here.
+ */
+ adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
+ adapter->stats.xoffrxc += adapter->pause_frames;
adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 229cc0c..fec34ac 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -284,6 +284,10 @@ struct tx_ring {
volatile u16 tx_avail;
u32 tx_tso; /* last tx was tso */
u16 last_hw_offload;
+ u8 last_hw_ipcso;
+ u8 last_hw_ipcss;
+ u8 last_hw_tucso;
+ u8 last_hw_tucss;
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
@@ -355,6 +359,7 @@ struct adapter {
int if_flags;
int max_frame_size;
int min_frame_size;
+ int pause_frames;
struct mtx core_mtx;
int em_insert_vlan_header;
u32 ims;
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 04b29ae..ee7ba71 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -99,7 +99,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char igb_driver_version[] = "version - 2.0.1";
+char igb_driver_version[] = "version - 2.0.4";
/*********************************************************************
@@ -128,6 +128,8 @@ static igb_vendor_info_t igb_vendor_info_array[] =
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -1933,16 +1935,31 @@ igb_local_timer(void *arg)
igb_update_link_status(adapter);
igb_update_stats_counters(adapter);
+ /*
+ ** If flow control has paused us since last checking
+ ** it invalidates the watchdog timing, so dont run it.
+ */
+ if (adapter->pause_frames) {
+ adapter->pause_frames = 0;
+ goto out;
+ }
+
/*
** Watchdog: check for time since any descriptor was cleaned
*/
for (int i = 0; i < adapter->num_queues; i++, txr++) {
- if (txr->watchdog_check == FALSE)
+ IGB_TX_LOCK(txr);
+ if ((txr->watchdog_check == FALSE) ||
+ (txr->tx_avail == adapter->num_tx_desc)) {
+ IGB_TX_UNLOCK(txr);
continue;
+ }
if ((ticks - txr->watchdog_time) > IGB_WATCHDOG)
goto timeout;
+ IGB_TX_UNLOCK(txr);
}
+out:
callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
return;
@@ -1956,6 +1973,7 @@ timeout:
txr->me, txr->tx_avail, txr->next_to_clean);
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
+ IGB_TX_UNLOCK(txr);
igb_init_locked(adapter);
}
@@ -4192,9 +4210,11 @@ igb_rx_discard(struct rx_ring *rxr, int i)
mp = rbuf->m_pack;
/* Reuse loaded DMA map and just update mbuf chain */
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
+ if (mh) { /* with no hdr split would be null */
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_next = NULL;
+ }
mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
mp->m_data = mp->m_ext.ext_buf;
@@ -4805,7 +4825,12 @@ igb_update_stats_counters(struct adapter *adapter)
stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
- stats->xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
+ /*
+ ** For watchdog management we need to know if we have been
+ ** paused during the last interval, so capture that here.
+ */
+ adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
+ stats->xoffrxc += adapter->pause_frames;
stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index 28f5d32..059a8b9 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -203,14 +203,6 @@
/* PCI Config defines */
#define IGB_MSIX_BAR 3
-/*
-** This is the total number of MSIX vectors you wish
-** to use, it also controls the size of resources.
-** The 82575 has a total of 10, 82576 has 25. Set this
-** to the real amount you need to streamline data storage.
-*/
-#define IGB_MSIX_VEC 6 /* MSIX vectors configured */
-
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
@@ -247,12 +239,6 @@
#define IGB_INTS_PER_SEC 8000
#define IGB_DEFAULT_ITR 1000000000/(IGB_INTS_PER_SEC * 256)
-
-/* Header split codes for get_buf */
-#define IGB_CLEAN_HEADER 0x01
-#define IGB_CLEAN_PAYLOAD 0x02
-#define IGB_CLEAN_BOTH (IGB_CLEAN_HEADER | IGB_CLEAN_PAYLOAD)
-
#define IGB_LINK_ITR 2000
/* Precision Time Sync (IEEE 1588) defines */
@@ -385,6 +371,7 @@ struct adapter {
int if_flags;
int max_frame_size;
int min_frame_size;
+ int pause_frames;
struct mtx core_mtx;
int igb_insert_vlan_header;
u16 num_queues;
@@ -412,6 +399,9 @@ struct adapter {
struct tx_ring *tx_rings;
u16 num_tx_desc;
+ /* Multicast array pointer */
+ u8 *mta;
+
/*
* Receive rings
*/
@@ -422,8 +412,6 @@ struct adapter {
u32 rx_mbuf_sz;
u32 rx_mask;
- /* Multicast array memory */
- u8 *mta;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
diff --git a/sys/dev/e1000/if_lem.c b/sys/dev/e1000/if_lem.c
index 30f3d7b..846d6bf 100644
--- a/sys/dev/e1000/if_lem.c
+++ b/sys/dev/e1000/if_lem.c
@@ -86,7 +86,7 @@
/*********************************************************************
* Legacy Em Driver version:
*********************************************************************/
-char lem_driver_version[] = "1.0.1";
+char lem_driver_version[] = "1.0.2";
/*********************************************************************
@@ -1753,6 +1753,7 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
*/
tx_buffer = &adapter->tx_buffer_area[first];
tx_buffer->next_eop = last;
+ adapter->watchdog_time = ticks;
/*
* Advance the Transmit Descriptor Tail (TDT), this tells the E1000
@@ -2010,8 +2011,6 @@ lem_local_timer(void *arg)
EM_CORE_LOCK_ASSERT(adapter);
- taskqueue_enqueue(adapter->tq,
- &adapter->rxtx_task);
lem_update_link_status(adapter);
lem_update_stats_counters(adapter);
OpenPOWER on IntegriCloud