summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2010-01-26 22:32:22 +0000
committerjfv <jfv@FreeBSD.org>2010-01-26 22:32:22 +0000
commit54091abe67852e91ca4e6f545a6885b5d4bed87c (patch)
treeeb911cd9a67d97f75d734c5a30c583fe1d98da03 /sys/dev
parent3967eef496321857d82af9f980626ad7b504f70d (diff)
downloadFreeBSD-src-54091abe67852e91ca4e6f545a6885b5d4bed87c.zip
FreeBSD-src-54091abe67852e91ca4e6f545a6885b5d4bed87c.tar.gz
Update the 1G drivers, shared code sync with Intel,
igb now has a queue notion that has a single interrupt with an RX/TX pair, this will reduce the total interrupts seen on a system. Both em and igb have a new watchdog method. igb has fixes from Pyun Yong-Hyeon that have improved stability, thank you :) I wish to MFC this for 7.3 asap, please test if able.
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/e1000/LICENSE2
-rw-r--r--sys/dev/e1000/e1000_80003es2lan.c115
-rw-r--r--sys/dev/e1000/e1000_82540.c4
-rw-r--r--sys/dev/e1000/e1000_82541.c4
-rw-r--r--sys/dev/e1000/e1000_82542.c4
-rw-r--r--sys/dev/e1000/e1000_82543.c44
-rw-r--r--sys/dev/e1000/e1000_82571.c4
-rw-r--r--sys/dev/e1000/e1000_82575.c144
-rw-r--r--sys/dev/e1000/e1000_82575.h6
-rw-r--r--sys/dev/e1000/e1000_api.c33
-rw-r--r--sys/dev/e1000/e1000_api.h4
-rw-r--r--sys/dev/e1000/e1000_defines.h3
-rw-r--r--sys/dev/e1000/e1000_hw.h7
-rw-r--r--sys/dev/e1000/e1000_ich8lan.c331
-rw-r--r--sys/dev/e1000/e1000_ich8lan.h6
-rw-r--r--sys/dev/e1000/e1000_mac.c85
-rw-r--r--sys/dev/e1000/e1000_mac.h4
-rw-r--r--sys/dev/e1000/e1000_manage.c37
-rw-r--r--sys/dev/e1000/e1000_osdep.h6
-rw-r--r--sys/dev/e1000/e1000_phy.c112
-rw-r--r--sys/dev/e1000/e1000_phy.h3
-rw-r--r--sys/dev/e1000/e1000_regs.h3
-rw-r--r--sys/dev/e1000/if_em.c12
-rw-r--r--sys/dev/e1000/if_em.h221
-rw-r--r--sys/dev/e1000/if_igb.c1773
-rw-r--r--sys/dev/e1000/if_igb.h107
26 files changed, 1368 insertions, 1706 deletions
diff --git a/sys/dev/e1000/LICENSE b/sys/dev/e1000/LICENSE
index f271dae..f70a7cb 100644
--- a/sys/dev/e1000/LICENSE
+++ b/sys/dev/e1000/LICENSE
@@ -1,6 +1,6 @@
$FreeBSD$
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_80003es2lan.c b/sys/dev/e1000/e1000_80003es2lan.c
index cee3164..db32f8c 100644
--- a/sys/dev/e1000/e1000_80003es2lan.c
+++ b/sys/dev/e1000/e1000_80003es2lan.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -256,8 +256,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan;
/* ID LED init */
@@ -1048,72 +1046,73 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan");
- if (!phy->reset_disable) {
- ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
- &data);
- if (ret_val)
- goto out;
+ if (phy->reset_disable)
+ goto skip_reset;
- data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
- /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
- data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
- ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
- data);
- if (ret_val)
- goto out;
+ data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+ data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
- /*
- * Options:
- * MDI/MDI-X = 0 (default)
- * 0 - Auto for all speeds
- * 1 - MDI mode
- * 2 - MDI-X mode
- * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
- */
- ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
- if (ret_val)
- goto out;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ data);
+ if (ret_val)
+ goto out;
- data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
+ if (ret_val)
+ goto out;
- switch (phy->mdix) {
- case 1:
- data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
- break;
- case 2:
- data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
- break;
- case 0:
- default:
- data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
- break;
- }
+ data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
- /*
- * Options:
- * disable_polarity_correction = 0 (default)
- * Automatic Correction for Reversed Cable Polarity
- * 0 - Disabled
- * 1 - Enabled
- */
- data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
- if (phy->disable_polarity_correction)
- data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ switch (phy->mdix) {
+ case 1:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
- ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data);
- if (ret_val)
- goto out;
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (phy->disable_polarity_correction)
+ data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
- /* SW Reset the PHY so all changes take effect */
- ret_val = hw->phy.ops.commit(hw);
- if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
- goto out;
- }
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data);
+ if (ret_val)
+ goto out;
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ goto out;
}
+skip_reset:
/* Bypass Rx and Tx FIFO's */
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
diff --git a/sys/dev/e1000/e1000_82540.c b/sys/dev/e1000/e1000_82540.c
index 14dcbb3..80a58777 100644
--- a/sys/dev/e1000/e1000_82540.c
+++ b/sys/dev/e1000/e1000_82540.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -228,8 +228,6 @@ static s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82540;
/* ID LED init */
diff --git a/sys/dev/e1000/e1000_82541.c b/sys/dev/e1000/e1000_82541.c
index c58a602..fd8d8eb 100644
--- a/sys/dev/e1000/e1000_82541.c
+++ b/sys/dev/e1000/e1000_82541.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -260,8 +260,6 @@ static s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82541;
/* ID LED init */
diff --git a/sys/dev/e1000/e1000_82542.c b/sys/dev/e1000/e1000_82542.c
index 46ef66a..282814b 100644
--- a/sys/dev/e1000/e1000_82542.c
+++ b/sys/dev/e1000/e1000_82542.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -134,8 +134,6 @@ static s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82542;
/* set RAR */
diff --git a/sys/dev/e1000/e1000_82543.c b/sys/dev/e1000/e1000_82543.c
index 41c9f8f..4bb0cbd 100644
--- a/sys/dev/e1000/e1000_82543.c
+++ b/sys/dev/e1000/e1000_82543.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -63,7 +63,6 @@ static s32 e1000_led_on_82543(struct e1000_hw *hw);
static s32 e1000_led_off_82543(struct e1000_hw *hw);
static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
u32 value);
-static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value);
static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
@@ -246,8 +245,6 @@ static s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_82543;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_82543;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82543;
/* turn on/off LED */
@@ -1481,45 +1478,6 @@ static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
- * e1000_mta_set_82543 - Set multicast filter table address
- * @hw: pointer to the HW structure
- * @hash_value: determines the MTA register and bit to set
- *
- * The multicast table address is a register array of 32-bit registers.
- * The hash_value is used to determine what register the bit is in, the
- * current value is read, the new bit is OR'd in and the new value is
- * written back into the register.
- **/
-static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value)
-{
- u32 hash_bit, hash_reg, mta, temp;
-
- DEBUGFUNC("e1000_mta_set_82543");
-
- hash_reg = (hash_value >> 5);
-
- /*
- * If we are on an 82544 and we are trying to write an odd offset
- * in the MTA, save off the previous entry before writing and
- * restore the old value after writing.
- */
- if ((hw->mac.type == e1000_82544) && (hash_reg & 1)) {
- hash_reg &= (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
- mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
- mta |= (1 << hash_bit);
- temp = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg - 1);
-
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
- E1000_WRITE_FLUSH(hw);
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg - 1, temp);
- E1000_WRITE_FLUSH(hw);
- } else {
- e1000_mta_set_generic(hw, hash_value);
- }
-}
-
-/**
* e1000_led_on_82543 - Turn on SW controllable LED
* @hw: pointer to the HW structure
*
diff --git a/sys/dev/e1000/e1000_82571.c b/sys/dev/e1000/e1000_82571.c
index be59b3b..96a3b2f 100644
--- a/sys/dev/e1000/e1000_82571.c
+++ b/sys/dev/e1000/e1000_82571.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -336,8 +336,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_82571;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82571;
/* ID LED init */
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index 5c877c2..4227556 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -80,8 +80,10 @@ static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
@@ -122,8 +124,7 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
- } else if ((hw->mac.type == e1000_82580) ||
- (hw->mac.type == e1000_82580er)) {
+ } else if (hw->mac.type >= e1000_82580) {
phy->ops.reset = e1000_phy_hw_reset_generic;
phy->ops.read_reg = e1000_read_phy_reg_82580;
phy->ops.write_reg = e1000_write_phy_reg_82580;
@@ -273,8 +274,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
* if using i2c make certain the MDICNFG register is cleared to prevent
* communications from being misrouted to the mdic registers
*/
- if ((ctrl_ext & E1000_CTRL_I2C_ENA) &&
- ((hw->mac.type == e1000_82580) || (hw->mac.type == e1000_82580er)))
+ if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
/* Set mta register count */
@@ -285,7 +285,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
if (mac->type == e1000_82576)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
- if ((mac->type == e1000_82580) || (mac->type == e1000_82580er))
+ if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
@@ -299,7 +299,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* bus type/speed/width */
mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
/* reset */
- if ((mac->type == e1000_82580) || (mac->type == e1000_82580er))
+ if (mac->type >= e1000_82580)
mac->ops.reset_hw = e1000_reset_hw_82580;
else
mac->ops.reset_hw = e1000_reset_hw_82575;
@@ -314,20 +314,22 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
: e1000_setup_serdes_link_82575;
/* physical interface shutdown */
mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+ /* physical interface power up */
+ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_82575;
/* receive address register setting */
mac->ops.rar_set = e1000_rar_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+ /* configure collision distance */
+ mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* writing VFTA */
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
@@ -888,6 +890,35 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
}
/**
+ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+}
+
+/**
* e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
@@ -954,28 +985,14 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
- u16 eeprom_data = 0;
+
+ DEBUGFUNC("e1000_shutdown_serdes_link_82575");
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
!e1000_sgmii_active_82575(hw))
return;
- if (hw->bus.func == E1000_FUNC_0)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if ((hw->mac.type == e1000_82580) ||
- (hw->mac.type == e1000_82580er))
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &eeprom_data);
- else if (hw->bus.func == E1000_FUNC_1)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
-
- /*
- * If APM is not enabled in the EEPROM and management interface is
- * not enabled, then power down.
- */
- if (!(eeprom_data & E1000_NVM_APME_82575) &&
- !e1000_enable_mng_pass_thru(hw)) {
+ if (!e1000_enable_mng_pass_thru(hw)) {
/* Disable PCS to turn off link */
reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -1205,16 +1222,10 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
ctrl_reg |= E1000_CTRL_SLU;
- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
- /* set both sw defined pins */
+ /* set both sw defined pins on 82575/82576*/
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
- /* Set switch control to serdes energy detect */
- reg = E1000_READ_REG(hw, E1000_CONNSW);
- reg |= E1000_CONNSW_ENRGSRC;
- E1000_WRITE_REG(hw, E1000_CONNSW, reg);
- }
-
reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
/* default pcs_autoneg to the same setting as mac autoneg */
@@ -1268,10 +1279,7 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
- E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
-
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
@@ -1396,6 +1404,28 @@ out:
}
/**
+ * e1000_config_collision_dist_82575 - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+ u32 tctl_ext;
+
+ DEBUGFUNC("e1000_config_collision_dist_82575");
+
+ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+ tctl_ext &= ~E1000_TCTL_EXT_COLD;
+ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
* e1000_power_down_phy_copper_82575 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1656,7 +1686,6 @@ void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
**/
static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
- u32 mdicnfg = 0;
s32 ret_val;
DEBUGFUNC("e1000_read_phy_reg_82580");
@@ -1665,15 +1694,6 @@ static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
-
ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1692,7 +1712,6 @@ out:
**/
static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
- u32 mdicnfg = 0;
s32 ret_val;
DEBUGFUNC("e1000_write_phy_reg_82580");
@@ -1701,15 +1720,6 @@ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
-
ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1717,6 +1727,7 @@ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
out:
return ret_val;
}
+
/**
* e1000_reset_hw_82580 - Reset hardware
* @hw: pointer to the HW structure
@@ -1822,20 +1833,3 @@ u16 e1000_rxpbs_adjust_82580(u32 data)
return ret_val;
}
-/**
- * e1000_erfuse_check_82580 - ER Fuse check
- * @hw: pointer to the HW structure
- *
- * This function returns the status of the ER Fuse
- **/
-s32 e1000_erfuse_check_82580(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- s32 ufuse_reg;
-
- ufuse_reg = E1000_READ_REG(hw, E1000_UFUSE);
- if ((ufuse_reg & E1000_ERFUSE) == E1000_ERFUSE)
- ret_val = E1000_ERFUSE_FAILURE;
-
- return ret_val;
-}
diff --git a/sys/dev/e1000/e1000_82575.h b/sys/dev/e1000/e1000_82575.h
index f22a963f..582f4a4 100644
--- a/sys/dev/e1000/e1000_82575.h
+++ b/sys/dev/e1000/e1000_82575.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -443,6 +443,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_RPLOLR_STRVLAN 0x40000000
#define E1000_RPLOLR_STRCRC 0x80000000
+#define E1000_TCTL_EXT_COLD 0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
#define E1000_DTXCTL_8023LL 0x0004
#define E1000_DTXCTL_VLAN_ADDED 0x0008
#define E1000_DTXCTL_OOS_ENABLE 0x0010
@@ -456,5 +459,4 @@ struct e1000_adv_tx_context_desc {
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
u16 e1000_rxpbs_adjust_82580(u32 data);
-s32 e1000_erfuse_check_82580(struct e1000_hw *);
#endif /* _E1000_82575_H_ */
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index 154eff9..bf9fa2a 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -281,10 +281,6 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82580_COPPER_DUAL:
mac->type = e1000_82580;
break;
- case E1000_DEV_ID_82580_ER:
- case E1000_DEV_ID_82580_ER_DUAL:
- mac->type = e1000_82580er;
- break;
default:
/* Should never have loaded on this device */
ret_val = -E1000_ERR_MAC_INIT;
@@ -376,7 +372,6 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_82575:
case e1000_82576:
case e1000_82580:
- case e1000_82580er:
e1000_init_function_pointers_82575(hw);
break;
default:
@@ -760,20 +755,6 @@ s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
}
/**
- * e1000_mta_set - Sets multicast table bit
- * @hw: pointer to the HW structure
- * @hash_value: Multicast hash value.
- *
- * This sets the bit in the multicast table corresponding to the
- * hash value. This is a function pointer entry point called by drivers.
- **/
-void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
-{
- if (hw->mac.ops.mta_set)
- hw->mac.ops.mta_set(hw, hash_value);
-}
-
-/**
* e1000_hash_mc_addr - Determines address location in multicast table
* @hw: pointer to the HW structure
* @mc_addr: Multicast address to hash.
@@ -1252,6 +1233,18 @@ void e1000_power_down_phy(struct e1000_hw *hw)
}
/**
+ * e1000_power_up_fiber_serdes_link - Power up serdes link
+ * @hw: pointer to the HW structure
+ *
+ * Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.power_up_serdes)
+ hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
* e1000_shutdown_fiber_serdes_link - Remove link during power down
* @hw: pointer to the HW structure
*
diff --git a/sys/dev/e1000/e1000_api.h b/sys/dev/e1000/e1000_api.h
index b492e57..b7bc14c 100644
--- a/sys/dev/e1000/e1000_api.h
+++ b/sys/dev/e1000/e1000_api.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@ extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw);
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
s32 e1000_set_mac_type(struct e1000_hw *hw);
@@ -67,7 +68,6 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
s32 e1000_disable_pcie_master(struct e1000_hw *hw);
void e1000_config_collision_dist(struct e1000_hw *hw);
void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
void e1000_update_mc_addr_list(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count);
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index 4636506..4c80ca03 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -1004,7 +1004,6 @@
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
-#define E1000_ERFUSE_FAILURE 16
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index b01c5d1..fd30173 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -142,8 +142,6 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
-#define E1000_DEV_ID_82580_ER 0x151D
-#define E1000_DEV_ID_82580_ER_DUAL 0x151E
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
@@ -187,7 +185,6 @@ enum e1000_mac_type {
e1000_82575,
e1000_82576,
e1000_82580,
- e1000_82580er,
e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */
};
@@ -603,11 +600,11 @@ struct e1000_mac_operations {
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
void (*shutdown_serdes)(struct e1000_hw *);
+ void (*power_up_serdes)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
- void (*mta_set)(struct e1000_hw *, u32);
void (*config_collision_dist)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
diff --git a/sys/dev/e1000/e1000_ich8lan.c b/sys/dev/e1000/e1000_ich8lan.c
index 09c08eb..e2483b9 100644
--- a/sys/dev/e1000/e1000_ich8lan.c
+++ b/sys/dev/e1000/e1000_ich8lan.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -125,6 +125,7 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
@@ -199,7 +200,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
- e1000_get_phy_id(hw);
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ /*
+ * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ }
phy->type = e1000_get_phy_type_from_id(phy->id);
switch (phy->type) {
@@ -221,6 +236,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
+out:
return ret_val;
}
@@ -442,8 +458,6 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
- /* setting MTA */
- mac->ops.mta_set = e1000_mta_set_generic;
/* clear hardware counters */
mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
@@ -464,6 +478,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan;
break;
+#if defined(NAHUM4) || defined(NAHUM5)
case e1000_pchlan:
/* save PCH revision_id */
e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
@@ -478,6 +493,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.led_on = e1000_led_on_pchlan;
mac->ops.led_off = e1000_led_off_pchlan;
break;
+#endif /* defined(NAHUM4) || defined(NAHUM5) */
default:
break;
}
@@ -596,9 +612,11 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
case e1000_ich10lan:
hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
break;
+#if defined(NAHUM4) || defined(NAHUM5)
case e1000_pchlan:
hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
break;
+#endif /* defined(NAHUM4) || defined(NAHUM5) */
default:
break;
}
@@ -767,9 +785,13 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
- s32 ret_val;
+ s32 ret_val = E1000_SUCCESS;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
+ !(hw->mac.type == e1000_pchlan))
+ return ret_val;
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
@@ -781,95 +803,92 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
- (hw->mac.type == e1000_pchlan)) {
- /* Check if SW needs to configure the PHY */
- if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+ (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
+ (hw->mac.type == e1000_pchlan))
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ else
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
- data = E1000_READ_REG(hw, E1000_FEXTNVM);
- if (!(data & sw_cfg_mask))
- goto out;
+ data = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
+ /* Wait for basic configuration completes before proceeding */
+ e1000_lan_init_done_ich8lan(hw);
+
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+ cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+#if defined(NAHUM4) || defined(NAHUM5)
+ if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) {
/*
- * Make sure HW does not configure LCD from PHY
- * extended configuration before SW configuration
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
*/
- data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ data = E1000_READ_REG(hw, E1000_STRAP);
+ data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+ reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+ reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+ reg_data);
+ if (ret_val)
goto out;
- cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
- cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
- cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
- if (!cnf_size)
+ data = E1000_READ_REG(hw, E1000_LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
goto out;
+ }
- cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
- cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
- * OEM and LCD Write Enable bits are set in the NVM.
- * When both NVM bits are cleared, SW will configure
- * them instead.
- */
- data = E1000_READ_REG(hw, E1000_STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
- if (ret_val)
- goto out;
-
- data = E1000_READ_REG(hw, E1000_LEDCTL);
- ret_val = e1000_write_phy_reg_hv_locked(hw,
- HV_LED_CONFIG,
- (u16)data);
- if (ret_val)
- goto out;
- }
-
- /* Configure LCD from extended configuration region. */
+#endif /* defined(NAHUM4) || defined(NAHUM5) */
+ /* Configure LCD from extended configuration region. */
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
- for (i = 0; i < cnf_size; i++) {
- ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
- &reg_data);
- if (ret_val)
- goto out;
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
- ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
- 1, &reg_addr);
- if (ret_val)
- goto out;
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
- /* Save off the PHY page for future writes. */
- if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
- phy_page = reg_data;
- continue;
- }
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
- reg_addr &= PHY_REG_MASK;
- reg_addr |= phy_page;
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
- ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
- reg_data);
- if (ret_val)
- goto out;
- }
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
}
out:
@@ -1088,16 +1107,44 @@ s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
if (hw->mac.type != e1000_pchlan)
goto out;
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
/* Hanksville M Phy init for IEEE. */
if ((hw->revision_id == 2) &&
(hw->phy.type == e1000_phy_82577) &&
@@ -1186,16 +1233,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
hw->phy.addr = 1;
ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
if (ret_val)
goto out;
- hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
+ if (ret_val)
+ goto out;
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
@@ -1256,10 +1319,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Allow time for h/w to get to a quiescent state after reset */
msec_delay(10);
- if (hw->mac.type == e1000_pchlan) {
+ /* Perform any necessary post-reset workarounds */
+ switch (hw->mac.type) {
+ case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
goto out;
+ break;
+ default:
+ break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -1272,8 +1340,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Configure the LCD with the OEM bits in NVM */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
+ ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
out:
return ret_val;
@@ -1972,18 +2039,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
@@ -2038,8 +2101,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
if (ret_val) {
DEBUGOUT("Flash commit failed.\n");
- nvm->ops.release(hw);
- goto out;
+ goto release;
}
/*
@@ -2050,19 +2112,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
(u8)(data >> 8));
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/*
* And invalidate the previously valid segment by setting
@@ -2072,10 +2130,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/* Great! Everything worked, we can now clear the cached entries. */
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
@@ -2083,14 +2139,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
+release:
nvm->ops.release(hw);
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
- nvm->ops.reload(hw);
- msec_delay(10);
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ msec_delay(10);
+ }
out:
if (ret_val)
@@ -2604,6 +2663,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
+ /* Perform any necessary post-reset workarounds */
+ switch (hw->mac.type) {
+ case e1000_pchlan:
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ break;
+ default:
+ break;
+ }
+
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -2620,19 +2690,23 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
DEBUGOUT("Auto Read Done did not complete\n");
}
}
+#if defined(NAHUM4) || defined(NAHUM5)
/* Dummy read to clear the phy wakeup bit after lcd reset */
+#if defined(NAHUM4) && defined(NAHUM5)
+ if ((hw->mac.type == e1000_pchlan) || (hw->mac.type == e1000_pch2lan))
+#else
if (hw->mac.type == e1000_pchlan)
+#endif
hw->phy.ops.read_reg(hw, BM_WUC, &reg);
+#endif /* defined(NAHUM4) || defined(NAHUM5) */
ret_val = e1000_sw_lcd_config_ich8lan(hw);
if (ret_val)
goto out;
- if (hw->mac.type == e1000_pchlan) {
- ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
- if (ret_val)
- goto out;
- }
+ ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
+ if (ret_val)
+ goto out;
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@@ -2648,9 +2722,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
out:
return ret_val;
}
@@ -3225,17 +3296,14 @@ void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
-
DEBUGFUNC("e1000_cleanup_led_ich8lan");
if (hw->phy.type == e1000_phy_ife)
- ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
- 0);
- else
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ 0);
- return ret_val;
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
}
/**
@@ -3246,17 +3314,14 @@ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
-
DEBUGFUNC("e1000_led_on_ich8lan");
if (hw->phy.type == e1000_phy_ife)
- ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
- else
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
- return ret_val;
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ return E1000_SUCCESS;
}
/**
@@ -3267,18 +3332,14 @@ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
-
DEBUGFUNC("e1000_led_off_ich8lan");
if (hw->phy.type == e1000_phy_ife)
- ret_val = hw->phy.ops.write_reg(hw,
- IFE_PHY_SPECIAL_CONTROL_LED,
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
- else
- E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
- return ret_val;
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ return E1000_SUCCESS;
}
/**
diff --git a/sys/dev/e1000/e1000_ich8lan.h b/sys/dev/e1000/e1000_ich8lan.h
index 33398c4..cc8ba16 100644
--- a/sys/dev/e1000/e1000_ich8lan.h
+++ b/sys/dev/e1000/e1000_ich8lan.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -163,6 +163,10 @@
#define LCD_CFG_PHY_ADDR_BIT 0x0020 /* Phy address bit from LCD Config word */
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
/*
diff --git a/sys/dev/e1000/e1000_mac.c b/sys/dev/e1000/e1000_mac.c
index f311565..3c525c1 100644
--- a/sys/dev/e1000/e1000_mac.c
+++ b/sys/dev/e1000/e1000_mac.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -78,7 +78,6 @@ void e1000_init_mac_ops_generic(struct e1000_hw *hw)
mac->ops.update_mc_addr_list = e1000_null_update_mc;
mac->ops.clear_vfta = e1000_null_mac_generic;
mac->ops.write_vfta = e1000_null_write_vfta;
- mac->ops.mta_set = e1000_null_mta_set;
mac->ops.rar_set = e1000_rar_set_generic;
mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
}
@@ -144,16 +143,6 @@ void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b)
}
/**
- * e1000_null_set_mta - No-op function, return void
- * @hw: pointer to the HW structure
- **/
-void e1000_null_mta_set(struct e1000_hw *hw, u32 a)
-{
- DEBUGFUNC("e1000_null_mta_set");
- return;
-}
-
-/**
* e1000_null_rar_set - No-op function, return void
* @hw: pointer to the HW structure
**/
@@ -482,42 +471,6 @@ void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
}
/**
- * e1000_mta_set_generic - Set multicast filter table address
- * @hw: pointer to the HW structure
- * @hash_value: determines the MTA register and bit to set
- *
- * The multicast table address is a register array of 32-bit registers.
- * The hash_value is used to determine what register the bit is in, the
- * current value is read, the new bit is OR'd in and the new value is
- * written back into the register.
- **/
-void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
-{
- u32 hash_bit, hash_reg, mta;
-
- DEBUGFUNC("e1000_mta_set_generic");
- /*
- * The MTA is a register array of 32-bit registers. It is
- * treated like an array of (32*mta_reg_count) bits. We want to
- * set bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
- * mask to bits 31:5 of the hash value which gives us the
- * register we're modifying. The hash bit within that register
- * is determined by the lower 5 bits of the hash value.
- */
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
-
- mta |= (1 << hash_bit);
-
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
- E1000_WRITE_FLUSH(hw);
-}
-
-/**
* e1000_update_mc_addr_list_generic - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
@@ -560,8 +513,7 @@ void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
- * the multicast filter table array address and new table value. See
- * e1000_mta_set_generic()
+ * the multicast filter table array address and new table value.
**/
u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
{
@@ -774,7 +726,7 @@ s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
- e1000_config_collision_dist_generic(hw);
+ mac->ops.config_collision_dist(hw);
/*
* Configure Flow Control now that Auto-Neg has completed.
@@ -1047,6 +999,7 @@ out:
**/
s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
{
+ struct e1000_mac_info *mac = &hw->mac;
u32 ctrl;
s32 ret_val = E1000_SUCCESS;
@@ -1057,7 +1010,7 @@ s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
/* Take the link out of reset */
ctrl &= ~E1000_CTRL_LRST;
- e1000_config_collision_dist_generic(hw);
+ mac->ops.config_collision_dist(hw);
ret_val = e1000_commit_fc_settings_generic(hw);
if (ret_val)
@@ -1097,8 +1050,7 @@ out:
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
- * during link setup. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
+ * during link setup.
**/
void e1000_config_collision_dist_generic(struct e1000_hw *hw)
{
@@ -1152,7 +1104,7 @@ s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
* link up if we detect a signal. This will allow us to
* communicate with non-autonegotiating link partners.
*/
- ret_val = hw->mac.ops.check_for_link(hw);
+ ret_val = mac->ops.check_for_link(hw);
if (ret_val) {
DEBUGOUT("Error while checking for link\n");
goto out;
@@ -1209,7 +1161,7 @@ s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
* Rx Flow control is enabled and Tx Flow control is disabled
* by a software over-ride. Since there really isn't a way to
* advertise that we are capable of Rx Pause ONLY, we will
- * advertise that we support both symmetric and asymmetric RX
+ * advertise that we support both symmetric and asymmetric Rx
* PAUSE. Later, we will disable the adapter's ability to send
* PAUSE frames.
*/
@@ -1253,7 +1205,6 @@ out:
**/
s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
u32 fcrtl = 0, fcrth = 0;
DEBUGFUNC("e1000_set_fc_watermarks_generic");
@@ -1280,7 +1231,7 @@ s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -1519,7 +1470,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
DEBUGOUT("Flow Control = "
- "RX PAUSE frames only.\r\n");
+ "Rx PAUSE frames only.\r\n");
}
}
/*
@@ -1535,7 +1486,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\r\n");
}
/*
* For transmitting PAUSE frames ONLY.
@@ -1550,7 +1501,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\r\n");
} else {
/*
* Per the IEEE spec, at this point flow control
@@ -1892,19 +1843,10 @@ out:
**/
s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
-
DEBUGFUNC("e1000_cleanup_led_generic");
- if (hw->mac.ops.cleanup_led != e1000_cleanup_led_generic) {
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- }
-
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
-
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -2063,7 +2005,6 @@ s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
if (!timeout) {
DEBUGOUT("Master requests are pending.\n");
ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
- goto out;
}
out:
diff --git a/sys/dev/e1000/e1000_mac.h b/sys/dev/e1000/e1000_mac.h
index b7a5b2c..348d660 100644
--- a/sys/dev/e1000/e1000_mac.h
+++ b/sys/dev/e1000/e1000_mac.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,6 @@ s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
bool e1000_null_mng_mode(struct e1000_hw *hw);
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
-void e1000_null_mta_set(struct e1000_hw *hw, u32 a);
void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
s32 e1000_blink_led_generic(struct e1000_hw *hw);
s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
@@ -87,7 +86,6 @@ void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
void e1000_config_collision_dist_generic(struct e1000_hw *hw);
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
-void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
diff --git a/sys/dev/e1000/e1000_manage.c b/sys/dev/e1000/e1000_manage.c
index aa0a0d4..2cd85b3 100644
--- a/sys/dev/e1000/e1000_manage.c
+++ b/sys/dev/e1000/e1000_manage.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -74,7 +74,7 @@ s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
{
u32 hicr;
s32 ret_val = E1000_SUCCESS;
- u8 i;
+ u8 i;
DEBUGFUNC("e1000_mng_enable_host_if_generic");
@@ -112,11 +112,10 @@ out:
**/
bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
{
- u32 fwsm;
+ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
DEBUGFUNC("e1000_check_mng_mode_generic");
- fwsm = E1000_READ_REG(hw, E1000_FWSM);
return (fwsm & E1000_FWSM_MODE_MASK) ==
(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
@@ -136,13 +135,14 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
u32 offset;
s32 ret_val, hdr_csum, csum;
u8 i, len;
- bool tx_filter = TRUE;
DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+ hw->mac.tx_pkt_filtering = TRUE;
+
/* No manageability, no filtering */
if (!hw->mac.ops.check_mng_mode(hw)) {
- tx_filter = FALSE;
+ hw->mac.tx_pkt_filtering = FALSE;
goto out;
}
@@ -152,7 +152,7 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
*/
ret_val = hw->mac.ops.mng_enable_host_if(hw);
if (ret_val != E1000_SUCCESS) {
- tx_filter = FALSE;
+ hw->mac.tx_pkt_filtering = FALSE;
goto out;
}
@@ -171,18 +171,19 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
- if (hdr_csum != csum)
- goto out;
- if (hdr->signature != E1000_IAMT_SIGNATURE)
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = TRUE;
goto out;
+ }
/* Cookie area is valid, make the final check for filtering. */
- if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
- tx_filter = FALSE;
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
+ hw->mac.tx_pkt_filtering = FALSE;
+ goto out;
+ }
out:
- hw->mac.tx_pkt_filtering = tx_filter;
- return tx_filter;
+ return hw->mac.tx_pkt_filtering;
}
/**
@@ -342,10 +343,11 @@ out:
}
/**
- * e1000_enable_mng_pass_thru - Enable processing of ARP's
+ * e1000_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -360,8 +362,7 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
manc = E1000_READ_REG(hw, E1000_MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {
diff --git a/sys/dev/e1000/e1000_osdep.h b/sys/dev/e1000/e1000_osdep.h
index bfcd4bf..a56b93f 100644
--- a/sys/dev/e1000/e1000_osdep.h
+++ b/sys/dev/e1000/e1000_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -39,8 +39,6 @@
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
@@ -82,7 +80,7 @@
/* Mutex used in the shared code */
#define E1000_MUTEX struct mtx
#define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \
- "E1000 Shared Lock", MTX_DEF)
+ MTX_NETWORK_LOCK, MTX_DEF)
#define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex)
#define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex)
#define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex)
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index 51504e2..dbc422a 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -191,32 +191,9 @@ s32 e1000_get_phy_id(struct e1000_hw *hw)
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
- /*
- * If the PHY ID is still unknown, we may have an 82577
- * without link. We will try again after setting Slow MDIC
- * mode. No harm in trying again in this case since the PHY
- * ID is unknown at this point anyway.
- */
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- goto out;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
- if (ret_val)
- goto out;
- phy->ops.release(hw);
-
retry_count++;
}
out:
- /* Revert to MDIO fast mode, if applicable */
- if (retry_count) {
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
- phy->ops.release(hw);
- }
-
return ret_val;
}
@@ -262,6 +239,11 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
DEBUGFUNC("e1000_read_phy_reg_mdic");
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
/*
* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
@@ -320,6 +302,11 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
DEBUGFUNC("e1000_write_phy_reg_mdic");
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
/*
* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
@@ -822,18 +809,17 @@ s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
**/
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
DEBUGFUNC("e1000_copper_link_setup_82577");
- if (phy->reset_disable) {
+ if (hw->phy.reset_disable) {
ret_val = E1000_SUCCESS;
goto out;
}
- if (phy->type == e1000_phy_82580) {
+ if (hw->phy.type == e1000_phy_82580) {
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
DEBUGOUT("Error resetting the PHY.\n");
@@ -842,7 +828,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
}
/* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
goto out;
@@ -851,7 +837,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
- ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
out:
return ret_val;
@@ -877,7 +863,7 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
goto out;
}
- /* Enable CRS on TX. This must be set for half-duplex operation. */
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
@@ -3058,38 +3044,6 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
}
/**
- * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
- * @hw: pointer to the HW structure
- * @slow: TRUE for slow mode, FALSE for normal mode
- *
- * Assumes semaphore already acquired.
- **/
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 data = 0;
-
- /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
- hw->phy.addr = 1;
- ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val)
- goto out;
-
- ret_val = e1000_write_phy_reg_mdic(hw, BM_CS_CTRL1,
- (0x2180 | (slow << 10)));
- if (ret_val)
- goto out;
-
- /* dummy read when reverting to fast mode - throw away result */
- if (!slow)
- ret_val = e1000_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-out:
- return ret_val;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -3106,9 +3060,8 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = FALSE;
- DEBUGFUNC("e1000_read_phy_reg_hv");
+ DEBUGFUNC("__e1000_read_phy_reg_hv");
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -3116,16 +3069,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
- if (ret_val)
- goto out;
-
- in_slow_mode = TRUE;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3162,10 +3105,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, FALSE);
-
if (!locked)
hw->phy.ops.release(hw);
@@ -3217,9 +3156,8 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = FALSE;
- DEBUGFUNC("e1000_write_phy_reg_hv");
+ DEBUGFUNC("__e1000_write_phy_reg_hv");
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -3227,16 +3165,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
- if (ret_val)
- goto out;
-
- in_slow_mode = TRUE;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3290,10 +3218,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, FALSE);
-
if (!locked)
hw->phy.ops.release(hw);
diff --git a/sys/dev/e1000/e1000_phy.h b/sys/dev/e1000/e1000_phy.h
index 01e91d6..692cbaa 100644
--- a/sys/dev/e1000/e1000_phy.h
+++ b/sys/dev/e1000/e1000_phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -96,7 +96,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/sys/dev/e1000/e1000_regs.h b/sys/dev/e1000/e1000_regs.h
index 9b0bb91..56418a6 100644
--- a/sys/dev/e1000/e1000_regs.h
+++ b/sys/dev/e1000/e1000_regs.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -518,5 +518,4 @@
/* PCIe Parity Status Register */
#define E1000_PCIEERRSTS 0x05BA8
-#define E1000_ERFUSE 0x00000400
#endif
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index f0d24ac..5df665a 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -2487,8 +2487,10 @@ em_local_timer(void *arg)
EM_CORE_LOCK_ASSERT(adapter);
+#ifndef DEVICE_POLLING
taskqueue_enqueue(adapter->tq,
&adapter->rxtx_task);
+#endif
em_update_link_status(adapter);
em_update_stats_counters(adapter);
@@ -3132,10 +3134,10 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities |= IFCAP_POLLING;
#endif
- /* Enable All WOL methods by default */
+ /* Limit WOL to MAGIC, not clear others are used */
if (adapter->wol) {
- ifp->if_capabilities |= IFCAP_WOL;
- ifp->if_capenable |= IFCAP_WOL;
+ ifp->if_capabilities |= IFCAP_WOL_MAGIC;
+ ifp->if_capenable |= IFCAP_WOL_MAGIC;
}
/*
@@ -4346,7 +4348,7 @@ em_free_receive_structures(struct adapter *adapter)
static int
em_rxeof(struct adapter *adapter, int count)
{
- struct ifnet *ifp = adapter->ifp;
+ struct ifnet *ifp = adapter->ifp;;
struct mbuf *mp;
u8 status, accept_frame = 0, eop = 0;
u16 len, desc_len, prev_len_adj;
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 5019e0a..4909b9f 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -54,7 +54,7 @@
#define EM_MIN_TXD 80
#define EM_MAX_TXD_82543 256
#define EM_MAX_TXD 4096
-#define EM_DEFAULT_TXD EM_MAX_TXD_82543
+#define EM_DEFAULT_TXD 1024
/*
* EM_RXD - Maximum number of receive Descriptors
@@ -72,7 +72,7 @@
#define EM_MIN_RXD 80
#define EM_MAX_RXD_82543 256
#define EM_MAX_RXD 4096
-#define EM_DEFAULT_RXD EM_MAX_RXD_82543
+#define EM_DEFAULT_RXD 1024
/*
* EM_TIDV - Transmit Interrupt Delay Value
@@ -135,9 +135,9 @@
#define EM_RADV 64
/*
- * This parameter controls the max duration of transmit watchdog.
+ * This parameter controls the duration of transmit watchdog.
*/
-#define EM_WATCHDOG (5 * hz)
+#define EM_WATCHDOG (10 * hz)
/*
* This parameter controls when the driver calls the routine to reclaim
@@ -240,6 +240,7 @@
#define ETH_ZLEN 60
#define ETH_ADDR_LEN 6
#define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */
+#define M_TSO_LEN 66
/*
* 82574 has a nonstandard address for EIAC
@@ -282,131 +283,162 @@ struct em_int_delay_info {
int value; /* Current value in usecs */
};
-/* Our adapter structure */
-struct adapter {
- struct ifnet *ifp;
+/*
+** Driver queue struct: this is the interrupt container
+** for the associated tx and rx ring.
+*/
+struct em_queue {
+ struct adapter *adapter;
+ u32 msix; /* This queue's MSIX vector */
+ u32 eims; /* This queue's EIMS bit */
+ u32 eitr_setting;
+ struct resource *res;
+ void *tag;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ struct task que_task;
+ struct taskqueue *tq;
+ u64 irqs;
+};
+
+/*
+ * Transmit ring: one per queue
+ */
+struct tx_ring {
+ struct adapter *adapter;
+ u32 me;
+ struct mtx tx_mtx;
+ char mtx_name[16];
+ struct em_dma_alloc txdma;
+ struct e1000_tx_desc *tx_base;
+ u32 next_avail_desc;
+ u32 next_to_clean;
+ volatile u16 tx_avail;
+ struct em_tx_buffer *tx_buffers;
#if __FreeBSD_version >= 800000
- struct buf_ring *br;
+ struct buf_ring *br;
#endif
+ bus_dma_tag_t txtag;
+
+ u32 bytes;
+ u32 packets;
+
+ bool watchdog_check;
+ int watchdog_time;
+ u64 no_desc_avail;
+ u64 tx_packets;
+};
+
+/*
+ * Receive ring: one per queue
+ */
+struct rx_ring {
+ struct adapter *adapter;
+ u32 me;
+ struct em_dma_alloc rxdma;
+ union e1000_adv_rx_desc *rx_base;
+ struct lro_ctrl lro;
+ bool lro_enabled;
+ bool hdr_split;
+ bool discard;
+ struct mtx rx_mtx;
+ char mtx_name[16];
+ u32 last_cleaned;
+ u32 next_to_check;
+ struct em_rx_buf *rx_buffers;
+ bus_dma_tag_t rx_htag; /* dma tag for rx head */
+ bus_dmamap_t rx_hspare_map;
+ bus_dma_tag_t rx_ptag; /* dma tag for rx packet */
+ bus_dmamap_t rx_pspare_map;
+ /*
+ * First/last mbuf pointers, for
+ * collecting multisegment RX packets.
+ */
+ struct mbuf *fmp;
+ struct mbuf *lmp;
+
+ /* Temporary stats used by AIM */
+ u32 bytes;
+ u32 packets;
+
+ /* Soft stats */
+ u64 rx_split_packets;
+ u64 rx_discarded;
+ u64 rx_packets;
+ u64 rx_bytes;
+};
+
+struct adapter {
+ struct ifnet *ifp;
struct e1000_hw hw;
- /* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
struct device *dev;
- struct resource *memory;
- struct resource *flash;
- struct resource *msix;
-
- struct resource *ioport;
- int io_rid;
+ struct resource *pci_mem;
+ struct resource *msix_mem;
+ struct resource *res;
+ void *tag;
+ u32 eims_mask;
- /* 82574 may use 3 int vectors */
- struct resource *res[3];
- void *tag[3];
- int rid[3];
+ int linkvec;
+ int link_mask;
+ int link_irq;
struct ifmedia media;
struct callout timer;
- struct callout tx_fifo_timer;
- bool watchdog_check;
- int watchdog_time;
- int msi;
+ int msix; /* total vectors allocated */
int if_flags;
int max_frame_size;
int min_frame_size;
struct mtx core_mtx;
- struct mtx tx_mtx;
- struct mtx rx_mtx;
int em_insert_vlan_header;
-
- /* Task for FAST handling */
- struct task link_task;
struct task rxtx_task;
- struct task rx_task;
- struct task tx_task;
struct taskqueue *tq; /* private task queue */
+ u16 num_queues;
-#if __FreeBSD_version >= 700029
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
- u32 num_vlans;
-#endif
+ u32 num_vlans;
/* Management and WOL features */
- u32 wol;
- bool has_manage;
- bool has_amt;
+ int wol;
+ int has_manage;
/* Info about the board itself */
- uint8_t link_active;
- uint16_t link_speed;
- uint16_t link_duplex;
- uint32_t smartspeed;
- struct em_int_delay_info tx_int_delay;
- struct em_int_delay_info tx_abs_int_delay;
- struct em_int_delay_info rx_int_delay;
- struct em_int_delay_info rx_abs_int_delay;
+ u8 link_active;
+ u16 link_speed;
+ u16 link_duplex;
+ u32 smartspeed;
+
+ /* Interface queues */
+ struct em_queue *queues;
/*
- * Transmit definitions
- *
- * We have an array of num_tx_desc descriptors (handled
- * by the controller) paired with an array of tx_buffers
- * (at tx_buffer_area).
- * The index of the next available descriptor is next_avail_tx_desc.
- * The number of remaining tx_desc is num_tx_desc_avail.
+ * Transmit rings
*/
- struct em_dma_alloc txdma; /* bus_dma glue for tx desc */
- struct e1000_tx_desc *tx_desc_base;
- uint32_t next_avail_tx_desc;
- uint32_t next_tx_to_clean;
- volatile uint16_t num_tx_desc_avail;
- uint16_t num_tx_desc;
- uint16_t last_hw_offload;
- uint32_t txd_cmd;
- struct em_buffer *tx_buffer_area;
- bus_dma_tag_t txtag; /* dma tag for tx */
- uint32_t tx_tso; /* last tx was tso */
+ struct tx_ring *tx_rings;
+ u16 num_tx_desc;
/*
- * Receive definitions
- *
- * we have an array of num_rx_desc rx_desc (handled by the
- * controller), and paired with an array of rx_buffers
- * (at rx_buffer_area).
- * The next pair to check on receive is at offset next_rx_desc_to_check
+ * Receive rings
*/
- struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */
- struct e1000_rx_desc *rx_desc_base;
- uint32_t next_rx_desc_to_check;
- uint32_t rx_buffer_len;
- uint16_t num_rx_desc;
+ struct rx_ring *rx_rings;
+ bool rx_hdr_split;
+ u16 num_rx_desc;
int rx_process_limit;
- struct em_buffer *rx_buffer_area;
- bus_dma_tag_t rxtag;
- bus_dmamap_t rx_sparemap;
-
- /*
- * First/last mbuf pointers, for
- * collecting multisegment RX packets.
- */
- struct mbuf *fmp;
- struct mbuf *lmp;
+ u32 rx_mbuf_sz;
+ u32 rx_mask;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
- unsigned long mbuf_alloc_failed;
- unsigned long mbuf_cluster_failed;
- unsigned long no_tx_desc_avail1;
- unsigned long no_tx_desc_avail2;
+ unsigned long mbuf_defrag_failed;
+ unsigned long mbuf_header_failed;
+ unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long rx_overruns;
- unsigned long rx_irq;
- unsigned long tx_irq;
- unsigned long link_irq;
/* 82547 workaround */
uint32_t tx_fifo_size;
@@ -416,10 +448,9 @@ struct adapter {
uint64_t tx_fifo_wrk_cnt;
uint32_t tx_head_addr;
- /* For 82544 PCIX Workaround */
- boolean_t pcix_82544;
- boolean_t in_detach;
-
+ /* For 82544 PCIX Workaround */
+ boolean_t pcix_82544;
+ boolean_t in_detach;
struct e1000_hw_stats stats;
};
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 844d330..517521d 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -101,7 +101,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char igb_driver_version[] = "version - 1.8.4";
+char igb_driver_version[] = "version - 1.9.1";
/*********************************************************************
@@ -196,13 +196,20 @@ static int igb_setup_receive_ring(struct rx_ring *);
static void igb_initialize_receive_units(struct adapter *);
static void igb_free_receive_structures(struct adapter *);
static void igb_free_receive_buffers(struct rx_ring *);
+static void igb_free_receive_ring(struct rx_ring *);
static void igb_enable_intr(struct adapter *);
static void igb_disable_intr(struct adapter *);
static void igb_update_stats_counters(struct adapter *);
static bool igb_txeof(struct tx_ring *);
+
+static __inline void igb_rx_discard(struct rx_ring *,
+ union e1000_adv_rx_desc *, int);
+static __inline void igb_rx_input(struct rx_ring *,
+ struct ifnet *, struct mbuf *, u32);
+
static bool igb_rxeof(struct rx_ring *, int);
-static void igb_rx_checksum(u32, struct mbuf *, bool);
+static void igb_rx_checksum(u32, struct mbuf *, u32);
static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
static bool igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
static void igb_set_promisc(struct adapter *);
@@ -210,7 +217,7 @@ static void igb_disable_promisc(struct adapter *);
static void igb_set_multi(struct adapter *);
static void igb_print_hw_stats(struct adapter *);
static void igb_update_link_status(struct adapter *);
-static int igb_get_buf(struct rx_ring *, int, int);
+static int igb_get_buf(struct rx_ring *, int, u8);
static void igb_register_vlan(void *, struct ifnet *, u16);
static void igb_unregister_vlan(void *, struct ifnet *, u16);
@@ -236,17 +243,12 @@ static int igb_irq_fast(void *);
static void igb_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
static void igb_handle_rxtx(void *context, int pending);
-static void igb_handle_tx(void *context, int pending);
-static void igb_handle_rx(void *context, int pending);
+static void igb_handle_que(void *context, int pending);
/* These are MSIX only irq handlers */
-static void igb_msix_rx(void *);
-static void igb_msix_tx(void *);
+static void igb_msix_que(void *);
static void igb_msix_link(void *);
-/* Adaptive Interrupt Moderation */
-static void igb_update_aim(struct rx_ring *);
-
/*********************************************************************
* FreeBSD Device Interface Entry Points
*********************************************************************/
@@ -282,25 +284,14 @@ TUNABLE_INT("hw.igb.rxd", &igb_rxd);
TUNABLE_INT("hw.igb.txd", &igb_txd);
/*
-** These parameters are used in Adaptive
-** Interrupt Moderation. The value is set
-** into EITR and controls the interrupt
-** frequency. A variable static scheme can
-** be created by changing the assigned value
-** of igb_ave_latency to the desired value,
-** and then set igb_enable_aim to FALSE.
-** This will result in all EITR registers
-** getting set to that value statically.
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
*/
static int igb_enable_aim = TRUE;
TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim);
-static int igb_low_latency = IGB_LOW_LATENCY;
-TUNABLE_INT("hw.igb.low_latency", &igb_low_latency);
-static int igb_ave_latency = IGB_AVE_LATENCY;
-TUNABLE_INT("hw.igb.ave_latency", &igb_ave_latency);
-static int igb_bulk_latency = IGB_BULK_LATENCY;
-TUNABLE_INT("hw.igb.bulk_latency", &igb_bulk_latency);
-
+
/*
* MSIX should be the default for best performance,
* but this allows it to be forced off for testing.
@@ -310,17 +301,16 @@ TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
/*
* Header split has seemed to be beneficial in
- * all circumstances tested, so its on by default
- * however this variable will allow it to be disabled
- * for some debug purposes.
+ * many circumstances tested, however there have
+ * been some stability issues, so the default is
+ * off.
*/
-static bool igb_header_split = TRUE;
+static bool igb_header_split = FALSE;
TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
/*
-** This will autoconfigure based on the number
-** of CPUs if left at 0. Only a matched pair of
-** TX and RX rings are allowed.
+** This will autoconfigure based on
+** the number of CPUs if left at 0.
*/
static int igb_num_queues = 0;
TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
@@ -437,21 +427,6 @@ igb_attach(device_t dev)
OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
&igb_enable_aim, 1, "Interrupt Moderation");
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
- &igb_low_latency, 1, "Low Latency");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
- &igb_ave_latency, 1, "Average Latency");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
- &igb_bulk_latency, 1, "Bulk Latency");
-
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
/* Determine hardware and mac info */
@@ -578,22 +553,6 @@ igb_attach(device_t dev)
/* Now get a good starting state */
igb_reset(adapter);
-#ifdef IGB_IEEE1588
- /*
- ** Setup the timer: IEEE 1588 support
- */
- adapter->cycles.read = igb_read_clock;
- adapter->cycles.mask = (u64)-1;
- adapter->cycles.mult = 1;
- adapter->cycles.shift = IGB_TSYNC_SHIFT;
- E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
- IGB_TSYNC_CYCLE_TIME * IGB_TSYNC_SHIFT);
- E1000_WRITE_REG(&adapter->hw, E1000_SYSTIML, 0x00000000);
- E1000_WRITE_REG(&adapter->hw, E1000_SYSTIMH, 0xFF800000);
-
- // JFV - this is not complete yet
-#endif
-
/* Initialize statistics */
igb_update_stats_counters(adapter);
@@ -860,54 +819,44 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
{
struct adapter *adapter = txr->adapter;
struct mbuf *next;
- int err = 0;
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- err = drbr_enqueue(ifp, txr->br, m);
- return (err);
- }
+ int err = 0, enq;
- if (m == NULL) /* Called by tasklet */
- goto process;
-
- /* If nothing queued go right to xmit */
- if (drbr_empty(ifp, txr->br)) {
- if (igb_xmit(txr, &m)) {
- if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
- return (err);
- } else {
- /* Success, update stats */
- drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m);
- /* Set the watchdog */
- txr->watchdog_check = TRUE;
- }
-
- } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
- return (err);
+ IGB_TX_LOCK_ASSERT(txr);
-process:
- if (drbr_empty(ifp, txr->br))
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || adapter->link_active == 0) {
+ if (m != NULL)
+ err = drbr_enqueue(ifp, txr->br, m);
return (err);
+ }
+ enq = 0;
+ if (m == NULL)
+ next = drbr_dequeue(ifp, txr->br);
+ else
+ next = m;
/* Process the queue */
- while (TRUE) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ while (next != NULL) {
+ if ((err = igb_xmit(txr, &next)) != 0) {
+ if (next != NULL)
+ err = drbr_enqueue(ifp, txr->br, next);
break;
- next = drbr_dequeue(ifp, txr->br);
- if (next == NULL)
+ }
+ enq++;
+ drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+ ETHER_BPF_MTAP(ifp, next);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
- if (igb_xmit(txr, &next))
+ if (txr->tx_avail <= IGB_TX_OP_THRESHOLD) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
- ETHER_BPF_MTAP(ifp, next);
+ }
+ next = drbr_dequeue(ifp, txr->br);
+ }
+ if (enq > 0) {
/* Set the watchdog */
txr->watchdog_check = TRUE;
}
-
- if (txr->tx_avail <= IGB_TX_OP_THRESHOLD)
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-
return (err);
}
@@ -970,7 +919,8 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
igb_init_locked(adapter);
IGB_CORE_UNLOCK(adapter);
}
- arp_ifinit(ifp, ifa);
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
} else
#endif
error = ether_ioctl(ifp, command, data);
@@ -1061,7 +1011,7 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
- if ((mask & IFCAP_LRO) && (igb_header_split)) {
+ if (mask & IFCAP_LRO) {
ifp->if_capenable ^= IFCAP_LRO;
reinit = 1;
}
@@ -1071,15 +1021,6 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
}
-#ifdef IGB_IEEE1588
- /*
- ** IOCTL support for Precision Time (IEEE 1588) Support
- */
- case SIOCSHWTSTAMP:
- error = igb_hwtstamp_ioctl(adapter, ifp);
- break;
-#endif
-
default:
error = ether_ioctl(ifp, command, data);
break;
@@ -1123,6 +1064,8 @@ igb_init_locked(struct adapter *adapter)
igb_reset(adapter);
igb_update_link_status(adapter);
+ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM) {
@@ -1227,44 +1170,37 @@ igb_handle_rxtx(void *context, int pending)
}
static void
-igb_handle_rx(void *context, int pending)
-{
- struct rx_ring *rxr = context;
- struct adapter *adapter = rxr->adapter;
- u32 loop = IGB_MAX_LOOP;
- bool more;
-
- do {
- more = igb_rxeof(rxr, -1);
- } while (loop-- && more);
-
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
-}
-
-static void
-igb_handle_tx(void *context, int pending)
+igb_handle_que(void *context, int pending)
{
- struct tx_ring *txr = context;
- struct adapter *adapter = txr->adapter;
+ struct igb_queue *que = context;
+ struct adapter *adapter = que->adapter;
+ struct tx_ring *txr = que->txr;
+ struct rx_ring *rxr = que->rxr;
struct ifnet *ifp = adapter->ifp;
u32 loop = IGB_MAX_LOOP;
bool more;
- IGB_TX_LOCK(txr);
+ /* RX first */
do {
- more = igb_txeof(txr);
+ more = igb_rxeof(rxr, -1);
} while (loop-- && more);
+
+ if (IGB_TX_TRYLOCK(txr)) {
+ loop = IGB_MAX_LOOP;
+ do {
+ more = igb_txeof(txr);
+ } while (loop-- && more);
#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, txr->br))
igb_mq_start_locked(ifp, txr, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- igb_start_locked(txr, ifp);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
#endif
- IGB_TX_UNLOCK(txr);
+ IGB_TX_UNLOCK(txr);
+ }
+
/* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
}
@@ -1320,56 +1256,82 @@ igb_irq_fast(void *arg)
*
**********************************************************************/
static void
-igb_msix_tx(void *arg)
+igb_msix_que(void *arg)
{
- struct tx_ring *txr = arg;
- struct adapter *adapter = txr->adapter;
- bool more;
+ struct igb_queue *que = arg;
+ struct adapter *adapter = que->adapter;
+ struct tx_ring *txr = que->txr;
+ struct rx_ring *rxr = que->rxr;
+ u32 newitr = 0;
+ bool more_tx, more_rx;
- E1000_WRITE_REG(&adapter->hw, E1000_EIMC, txr->eims);
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims);
+ ++que->irqs;
IGB_TX_LOCK(txr);
- ++txr->tx_irq;
- more = igb_txeof(txr);
+ more_tx = igb_txeof(txr);
IGB_TX_UNLOCK(txr);
- /* Schedule a clean task if needed*/
- if (more)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
- else
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
- return;
-}
-
-/*********************************************************************
- *
- * MSIX RX Interrupt Service routine
- *
- **********************************************************************/
-
-static void
-igb_msix_rx(void *arg)
-{
- struct rx_ring *rxr = arg;
- struct adapter *adapter = rxr->adapter;
- bool more;
-
- E1000_WRITE_REG(&adapter->hw, E1000_EIMC, rxr->eims);
-
- ++rxr->rx_irq;
- more = igb_rxeof(rxr, adapter->rx_process_limit);
+ more_rx = igb_rxeof(rxr, adapter->rx_process_limit);
- /* Update interrupt rate */
- if (igb_enable_aim == TRUE)
- igb_update_aim(rxr);
-
- /* Schedule another clean */
- if (more)
- taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ if (igb_enable_aim == FALSE)
+ goto no_calc;
+ /*
+ ** Do Adaptive Interrupt Moderation:
+ ** - Write out last calculated setting
+ ** - Calculate based on average size over
+ ** the last interval.
+ */
+ if (que->eitr_setting)
+ E1000_WRITE_REG(&adapter->hw,
+ E1000_EITR(que->msix), que->eitr_setting);
+
+ que->eitr_setting = 0;
+
+ /* Idle, do nothing */
+ if ((txr->bytes == 0) && (rxr->bytes == 0))
+ goto no_calc;
+
+ /* Used half Default if sub-gig */
+ if (adapter->link_speed != 1000)
+ newitr = IGB_DEFAULT_ITR / 2;
+ else {
+ if ((txr->bytes) && (txr->packets))
+ newitr = txr->bytes/txr->packets;
+ if ((rxr->bytes) && (rxr->packets))
+ newitr = max(newitr,
+ (rxr->bytes / rxr->packets));
+ newitr += 24; /* account for hardware frame, crc */
+ /* set an upper boundary */
+ newitr = min(newitr, 3000);
+ /* Be nice to the mid range */
+ if ((newitr > 300) && (newitr < 1200))
+ newitr = (newitr / 3);
+ else
+ newitr = (newitr / 2);
+ }
+ newitr &= 0x7FFC; /* Mask invalid bits */
+ if (adapter->hw.mac.type == e1000_82575)
+ newitr |= newitr << 16;
+ else
+ newitr |= 0x8000000;
+
+ /* save for next interrupt */
+ que->eitr_setting = newitr;
+
+ /* Reset state */
+ txr->bytes = 0;
+ txr->packets = 0;
+ rxr->bytes = 0;
+ rxr->packets = 0;
+
+no_calc:
+ /* Schedule a clean task if needed*/
+ if (more_tx || more_rx)
+ taskqueue_enqueue(que->tq, &que->que_task);
else
/* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
return;
}
@@ -1401,56 +1363,6 @@ spurious:
}
-/*
-** Routine to adjust the RX EITR value based on traffic,
-** its a simple three state model, but seems to help.
-**
-** Note that the three EITR values are tuneable using
-** sysctl in real time. The feature can be effectively
-** nullified by setting them equal.
-*/
-#define BULK_THRESHOLD 10000
-#define AVE_THRESHOLD 1600
-
-static void
-igb_update_aim(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- u32 olditr, newitr;
-
- /* Update interrupt moderation based on traffic */
- olditr = rxr->eitr_setting;
- newitr = olditr;
-
- /* Idle, don't change setting */
- if (rxr->bytes == 0)
- return;
-
- if (olditr == igb_low_latency) {
- if (rxr->bytes > AVE_THRESHOLD)
- newitr = igb_ave_latency;
- } else if (olditr == igb_ave_latency) {
- if (rxr->bytes < AVE_THRESHOLD)
- newitr = igb_low_latency;
- else if (rxr->bytes > BULK_THRESHOLD)
- newitr = igb_bulk_latency;
- } else if (olditr == igb_bulk_latency) {
- if (rxr->bytes < BULK_THRESHOLD)
- newitr = igb_ave_latency;
- }
-
- if (olditr != newitr) {
- /* Change interrupt rate */
- rxr->eitr_setting = newitr;
- E1000_WRITE_REG(&adapter->hw, E1000_EITR(rxr->me),
- newitr | (newitr << 16));
- }
-
- rxr->bytes = 0;
- return;
-}
-
-
/*********************************************************************
*
* Media Ioctl callback
@@ -1683,11 +1595,6 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
} else if (igb_tx_ctx_setup(txr, m_head))
olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
-#ifdef IGB_IEEE1588
- /* This is changing soon to an mtag detection */
- if (we detect this mbuf has a TSTAMP mtag)
- cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
-#endif
/* Calculate payload length */
olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
<< E1000_ADVTXD_PAYLEN_SHIFT);
@@ -1708,8 +1615,7 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
seg_len = segs[j].ds_len;
txd->read.buffer_addr = htole64(seg_addr);
- txd->read.cmd_type_len = htole32(
- adapter->txd_cmd | cmd_type_len | seg_len);
+ txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
txd->read.olinfo_status = htole32(olinfo_status);
last = i;
if (++i == adapter->num_tx_desc)
@@ -1732,7 +1638,7 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
* and Report Status (RS)
*/
txd->read.cmd_type_len |=
- htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
+ htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS);
/*
* Keep track in the first buffer which
* descriptor will be written back
@@ -1824,6 +1730,7 @@ igb_set_multi(struct adapter *adapter)
#else
if_maddr_runlock(ifp);
#endif
+
if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl |= E1000_RCTL_MPE;
@@ -1955,6 +1862,7 @@ igb_stop(void *arg)
{
struct adapter *adapter = arg;
struct ifnet *ifp = adapter->ifp;
+ struct tx_ring *txr = adapter->tx_rings;
IGB_CORE_LOCK_ASSERT(adapter);
@@ -1967,6 +1875,13 @@ igb_stop(void *arg)
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ /* Unarm watchdog timer. */
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IGB_TX_LOCK(txr);
+ txr->watchdog_check = FALSE;
+ IGB_TX_UNLOCK(txr);
+ }
+
e1000_reset_hw(&adapter->hw);
E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
}
@@ -2090,107 +2005,56 @@ igb_allocate_legacy(struct adapter *adapter)
/*********************************************************************
*
- * Setup the MSIX Interrupt handlers:
+ * Setup the MSIX Queue Interrupt handlers:
*
**********************************************************************/
static int
igb_allocate_msix(struct adapter *adapter)
{
- device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
- struct rx_ring *rxr = adapter->rx_rings;
- int error, rid, vector = 0;
-
- /*
- * Setup the interrupt handlers
- */
+ device_t dev = adapter->dev;
+ struct igb_queue *que = adapter->queues;
+ int error, rid, vector = 0;
- /* TX Setup */
- for (int i = 0; i < adapter->num_queues; i++, vector++, txr++) {
- rid = vector +1;
- txr->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (txr->res == NULL) {
- device_printf(dev,
- "Unable to allocate bus resource: "
- "MSIX TX Interrupt\n");
- return (ENXIO);
- }
- error = bus_setup_intr(dev, txr->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- igb_msix_tx, txr, &txr->tag);
- if (error) {
- txr->res = NULL;
- device_printf(dev, "Failed to register TX handler\n");
- return (error);
- }
- bus_describe_intr(dev, txr->res, txr->tag, "tx %d", i);
- txr->msix = vector;
- if (adapter->hw.mac.type == e1000_82575)
- txr->eims = E1000_EICR_TX_QUEUE0 << i;
- else
- txr->eims = 1 << vector;
- /*
- ** Bind the msix vector, and thus the
- ** ring to the corresponding cpu.
- */
- if (adapter->num_queues > 1)
- bus_bind_intr(dev, txr->res, i);
- /* Make tasklet for deferred handling - one per queue */
- TASK_INIT(&txr->tx_task, 0, igb_handle_tx, txr);
- txr->tq = taskqueue_create_fast("igb_txq", M_NOWAIT,
- taskqueue_thread_enqueue, &txr->tq);
- taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
- device_get_nameunit(adapter->dev));
- }
- /* RX Setup */
- for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
rid = vector +1;
- rxr->res = bus_alloc_resource_any(dev,
+ que->res = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (rxr->res == NULL) {
+ if (que->res == NULL) {
device_printf(dev,
"Unable to allocate bus resource: "
- "MSIX RX Interrupt\n");
+ "MSIX Queue Interrupt\n");
return (ENXIO);
}
- error = bus_setup_intr(dev, rxr->res,
+ error = bus_setup_intr(dev, que->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
- igb_msix_rx, rxr, &rxr->tag);
+ igb_msix_que, que, &que->tag);
if (error) {
- rxr->res = NULL;
- device_printf(dev, "Failed to register RX handler\n");
+ que->res = NULL;
+ device_printf(dev, "Failed to register Queue handler");
return (error);
}
- bus_describe_intr(dev, rxr->res, rxr->tag, "rx %d", i);
- rxr->msix = vector;
+ que->msix = vector;
if (adapter->hw.mac.type == e1000_82575)
- rxr->eims = E1000_EICR_RX_QUEUE0 << i;
+ que->eims = E1000_EICR_TX_QUEUE0 << i;
else
- rxr->eims = 1 << vector;
- /* Get a mask for local timer */
- adapter->rx_mask |= rxr->eims;
+ que->eims = 1 << vector;
/*
** Bind the msix vector, and thus the
- ** ring to the corresponding cpu.
- ** Notice that this makes an RX/TX pair
- ** bound to each CPU, limited by the MSIX
- ** vectors.
+ ** rings to the corresponding cpu.
*/
if (adapter->num_queues > 1)
- bus_bind_intr(dev, rxr->res, i);
-
- /* Make tasklet for deferred handling - one per queue */
- TASK_INIT(&rxr->rx_task, 0, igb_handle_rx, rxr);
- rxr->tq = taskqueue_create_fast("igb_rxq", M_NOWAIT,
- taskqueue_thread_enqueue, &rxr->tq);
- taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
+ bus_bind_intr(dev, que->res, i);
+ /* Make tasklet for deferred handling */
+ TASK_INIT(&que->que_task, 0, igb_handle_que, que);
+ que->tq = taskqueue_create_fast("igb_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
device_get_nameunit(adapter->dev));
}
/* And Link */
- rid = vector +1;
+ rid = vector + 1;
adapter->res = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (adapter->res == NULL) {
@@ -2202,10 +2066,9 @@ igb_allocate_msix(struct adapter *adapter)
if ((error = bus_setup_intr(dev, adapter->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
igb_msix_link, adapter, &adapter->tag)) != 0) {
- device_printf(dev, "Failed to register Link handler\n");
+ device_printf(dev, "Failed to register Link handler");
return (error);
}
- bus_describe_intr(dev, adapter->res, adapter->tag, "link");
adapter->linkvec = vector;
return (0);
@@ -2215,10 +2078,10 @@ igb_allocate_msix(struct adapter *adapter)
static void
igb_configure_queues(struct adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- u32 tmp, ivar = 0;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_queue *que;
+ u32 tmp, ivar = 0;
+ u32 newitr = IGB_DEFAULT_ITR;
/* First turn on RSS capability */
if (adapter->hw.mac.type > e1000_82575)
@@ -2229,35 +2092,34 @@ igb_configure_queues(struct adapter *adapter)
/* Turn on MSIX */
switch (adapter->hw.mac.type) {
case e1000_82580:
- /* RX */
+ /* RX entries */
for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i >> 1;
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
- rxr = &adapter->rx_rings[i];
+ que = &adapter->queues[i];
if (i & 1) {
ivar &= 0xFF00FFFF;
- ivar |= (rxr->msix | E1000_IVAR_VALID) << 16;
+ ivar |= (que->msix | E1000_IVAR_VALID) << 16;
} else {
ivar &= 0xFFFFFF00;
- ivar |= rxr->msix | E1000_IVAR_VALID;
+ ivar |= que->msix | E1000_IVAR_VALID;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
- adapter->eims_mask |= rxr->eims;
}
- /* TX */
+ /* TX entries */
for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i >> 1;
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
- txr = &adapter->tx_rings[i];
+ que = &adapter->queues[i];
if (i & 1) {
ivar &= 0x00FFFFFF;
- ivar |= (txr->msix | E1000_IVAR_VALID) << 24;
+ ivar |= (que->msix | E1000_IVAR_VALID) << 24;
} else {
ivar &= 0xFFFF00FF;
- ivar |= (txr->msix | E1000_IVAR_VALID) << 8;
+ ivar |= (que->msix | E1000_IVAR_VALID) << 8;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
- adapter->eims_mask |= txr->eims;
+ adapter->eims_mask |= que->eims;
}
/* And for the link interrupt */
@@ -2267,35 +2129,35 @@ igb_configure_queues(struct adapter *adapter)
E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
break;
case e1000_82576:
- /* RX */
+ /* RX entries */
for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
- rxr = &adapter->rx_rings[i];
+ que = &adapter->queues[i];
if (i < 8) {
ivar &= 0xFFFFFF00;
- ivar |= rxr->msix | E1000_IVAR_VALID;
+ ivar |= que->msix | E1000_IVAR_VALID;
} else {
ivar &= 0xFF00FFFF;
- ivar |= (rxr->msix | E1000_IVAR_VALID) << 16;
+ ivar |= (que->msix | E1000_IVAR_VALID) << 16;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
- adapter->eims_mask |= rxr->eims;
+ adapter->eims_mask |= que->eims;
}
- /* TX */
+ /* TX entries */
for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
- txr = &adapter->tx_rings[i];
+ que = &adapter->queues[i];
if (i < 8) {
ivar &= 0xFFFF00FF;
- ivar |= (txr->msix | E1000_IVAR_VALID) << 8;
+ ivar |= (que->msix | E1000_IVAR_VALID) << 8;
} else {
ivar &= 0x00FFFFFF;
- ivar |= (txr->msix | E1000_IVAR_VALID) << 24;
+ ivar |= (que->msix | E1000_IVAR_VALID) << 24;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
- adapter->eims_mask |= txr->eims;
+ adapter->eims_mask |= que->eims;
}
/* And for the link interrupt */
@@ -2314,20 +2176,15 @@ igb_configure_queues(struct adapter *adapter)
tmp |= E1000_CTRL_EXT_IRCA;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
- /* TX */
+ /* Queues */
for (int i = 0; i < adapter->num_queues; i++) {
- txr = &adapter->tx_rings[i];
- E1000_WRITE_REG(hw, E1000_MSIXBM(txr->msix),
- txr->eims);
- adapter->eims_mask |= txr->eims;
- }
-
- /* RX */
- for (int i = 0; i < adapter->num_queues; i++) {
- rxr = &adapter->rx_rings[i];
- E1000_WRITE_REG(hw, E1000_MSIXBM(rxr->msix),
- rxr->eims);
- adapter->eims_mask |= rxr->eims;
+ que = &adapter->queues[i];
+ tmp = E1000_EICR_RX_QUEUE0 << i;
+ tmp |= E1000_EICR_TX_QUEUE0 << i;
+ que->eims = tmp;
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
+ i, que->eims);
+ adapter->eims_mask |= que->eims;
}
/* Link */
@@ -2339,6 +2196,17 @@ igb_configure_queues(struct adapter *adapter)
break;
}
+ /* Set the starting interrupt rate */
+ if (hw->mac.type == e1000_82575)
+ newitr |= newitr << 16;
+ else
+ newitr |= 0x8000000;
+
+ for (int i = 0; i < adapter->num_queues; i++) {
+ que = &adapter->queues[i];
+ E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr);
+ }
+
return;
}
@@ -2346,8 +2214,7 @@ igb_configure_queues(struct adapter *adapter)
static void
igb_free_pci_resources(struct adapter *adapter)
{
- struct tx_ring *txr = adapter->tx_rings;
- struct rx_ring *rxr = adapter->rx_rings;
+ struct igb_queue *que = adapter->queues;
device_t dev = adapter->dev;
int rid;
@@ -2363,26 +2230,17 @@ igb_free_pci_resources(struct adapter *adapter)
goto mem;
/*
- * First release all the TX/RX interrupt resources:
+ * First release all the interrupt resources:
*/
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- rid = txr->msix + 1;
- if (txr->tag != NULL) {
- bus_teardown_intr(dev, txr->res, txr->tag);
- txr->tag = NULL;
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
}
- if (txr->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res);
- }
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- rid = rxr->msix + 1;
- if (rxr->tag != NULL) {
- bus_teardown_intr(dev, rxr->res, rxr->tag);
- rxr->tag = NULL;
- }
- if (rxr->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res);
+ if (que->res != NULL)
+ bus_release_resource(dev,
+ SYS_RES_IRQ, rid, que->res);
}
/* Clean the Legacy or Link interrupt last */
@@ -2445,15 +2303,24 @@ igb_setup_msix(struct adapter *adapter)
}
/* Figure out a reasonable auto config value */
- queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
+ queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+
+ /* Can have max of 4 queues on 82575 */
+ if (adapter->hw.mac.type == e1000_82575) {
+ if (queues > 4)
+ queues = 4;
+ if (igb_num_queues > 4)
+ igb_num_queues = 4;
+ }
if (igb_num_queues == 0)
igb_num_queues = queues;
+
/*
- ** Two vectors (RX/TX pair) per queue
+ ** One vector (RX/TX pair) per queue
** plus an additional for Link interrupt
*/
- want = (igb_num_queues * 2) + 1;
+ want = igb_num_queues + 1;
if (msgs >= want)
msgs = want;
else {
@@ -2645,8 +2512,8 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_transmit = igb_mq_start;
ifp->if_qflush = igb_qflush;
#endif
- IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
- ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
+ IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
+ ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
IFQ_SET_READY(&ifp->if_snd);
ether_ifattach(ifp, adapter->hw.mac.addr);
@@ -2717,7 +2584,7 @@ igb_dma_malloc(struct adapter *adapter, bus_size_t size,
int error;
error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
- 1, 0, /* alignment, bounds */
+ IGB_DBA_ALIGN, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
@@ -2795,22 +2662,31 @@ static int
igb_allocate_queues(struct adapter *adapter)
{
device_t dev = adapter->dev;
- struct tx_ring *txr;
- struct rx_ring *rxr;
+ struct igb_queue *que = NULL;
+ struct tx_ring *txr = NULL;
+ struct rx_ring *rxr = NULL;
int rsize, tsize, error = E1000_SUCCESS;
int txconf = 0, rxconf = 0;
- /* First allocate the TX ring struct memory */
+ /* First allocate the top level queue structs */
+ if (!(adapter->queues =
+ (struct igb_queue *) malloc(sizeof(struct igb_queue) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* Next allocate the TX ring struct memory */
if (!(adapter->tx_rings =
(struct tx_ring *) malloc(sizeof(struct tx_ring) *
adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX ring memory\n");
error = ENOMEM;
- goto fail;
+ goto tx_fail;
}
- txr = adapter->tx_rings;
- /* Next allocate the RX */
+ /* Now allocate the RX */
if (!(adapter->rx_rings =
(struct rx_ring *) malloc(sizeof(struct rx_ring) *
adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
@@ -2818,7 +2694,6 @@ igb_allocate_queues(struct adapter *adapter)
error = ENOMEM;
goto rx_fail;
}
- rxr = adapter->rx_rings;
tsize = roundup2(adapter->num_tx_desc *
sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN);
@@ -2896,6 +2771,16 @@ igb_allocate_queues(struct adapter *adapter)
}
}
+ /*
+ ** Finally set up the queue holding structs
+ */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ que = &adapter->queues[i];
+ que->adapter = adapter;
+ que->txr = &adapter->tx_rings[i];
+ que->rxr = &adapter->rx_rings[i];
+ }
+
return (0);
err_rx_desc:
@@ -2906,7 +2791,10 @@ err_tx_desc:
igb_dma_free(adapter, &txr->txdma);
free(adapter->rx_rings, M_DEVBUF);
rx_fail:
+ buf_ring_free(txr->br, M_DEVBUF);
free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+ free(adapter->queues, M_DEVBUF);
fail:
return (error);
}
@@ -2929,14 +2817,14 @@ igb_allocate_transmit_buffers(struct tx_ring *txr)
/*
* Setup DMA descriptor areas.
*/
- if ((error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
IGB_TSO_SIZE, /* maxsize */
IGB_MAX_SCATTER, /* nsegments */
- IGB_TSO_SEG_SIZE, /* maxsegsize */
+ PAGE_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
@@ -2983,6 +2871,7 @@ igb_setup_transmit_ring(struct tx_ring *txr)
int i;
/* Clear the old descriptor contents */
+ IGB_TX_LOCK(txr);
bzero((void *)txr->tx_base,
(sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
/* Reset indices */
@@ -3008,7 +2897,7 @@ igb_setup_transmit_ring(struct tx_ring *txr)
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
+ IGB_TX_UNLOCK(txr);
}
/*********************************************************************
@@ -3036,54 +2925,50 @@ static void
igb_initialize_transmit_units(struct adapter *adapter)
{
struct tx_ring *txr = adapter->tx_rings;
+ struct e1000_hw *hw = &adapter->hw;
u32 tctl, txdctl;
INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
- /* Setup Transmit Descriptor Base Settings */
- adapter->txd_cmd = E1000_TXD_CMD_IFCS;
-
/* Setup the Tx Descriptor Rings */
for (int i = 0; i < adapter->num_queues; i++, txr++) {
u64 bus_addr = txr->txdma.dma_paddr;
- E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(i),
+ E1000_WRITE_REG(hw, E1000_TDLEN(i),
adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
- E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(i),
+ E1000_WRITE_REG(hw, E1000_TDBAH(i),
(uint32_t)(bus_addr >> 32));
- E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(i),
+ E1000_WRITE_REG(hw, E1000_TDBAL(i),
(uint32_t)bus_addr);
/* Setup the HW Tx Head and Tail descriptor pointers */
- E1000_WRITE_REG(&adapter->hw, E1000_TDT(i), 0);
- E1000_WRITE_REG(&adapter->hw, E1000_TDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(i), 0);
HW_DEBUGOUT2("Base = %x, Length = %x\n",
- E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
- E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
+ E1000_READ_REG(hw, E1000_TDBAL(i)),
+ E1000_READ_REG(hw, E1000_TDLEN(i)));
txr->watchdog_check = FALSE;
- txdctl = E1000_READ_REG(&adapter->hw, E1000_TXDCTL(i));
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ txdctl |= IGB_TX_PTHRESH;
+ txdctl |= IGB_TX_HTHRESH << 8;
+ txdctl |= IGB_TX_WTHRESH << 16;
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
- E1000_WRITE_REG(&adapter->hw, E1000_TXDCTL(i), txdctl);
-
- /* Default interrupt rate */
- E1000_WRITE_REG(&adapter->hw, E1000_EITR(txr->msix),
- igb_ave_latency);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
}
/* Program the Transmit Control Register */
- tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
- e1000_config_collision_dist(&adapter->hw);
+ e1000_config_collision_dist(hw);
/* This write will effectively turn on the transmit unit. */
- E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
-
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
/*********************************************************************
@@ -3162,8 +3047,7 @@ igb_free_transmit_buffers(struct tx_ring *txr)
/**********************************************************************
*
- * Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors
+ * Setup work for hardware segmentation offload (TSO)
*
**********************************************************************/
static boolean_t
@@ -3418,7 +3302,7 @@ igb_txeof(struct tx_ring *txr)
done = last;
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
/* We clean the range of the packet */
@@ -3429,7 +3313,8 @@ igb_txeof(struct tx_ring *txr)
++txr->tx_avail;
if (tx_buffer->m_head) {
- ifp->if_opackets++;
+ txr->bytes +=
+ tx_buffer->m_head->m_pkthdr.len;
bus_dmamap_sync(txr->txtag,
tx_buffer->map,
BUS_DMASYNC_POSTWRITE);
@@ -3448,6 +3333,8 @@ igb_txeof(struct tx_ring *txr)
tx_buffer = &txr->tx_buffers[first];
tx_desc = &txr->tx_base[first];
}
+ ++txr->packets;
+ ++ifp->if_opackets;
/* See if we can continue to the next packet */
last = tx_buffer->next_eop;
if (last != -1) {
@@ -3476,98 +3363,115 @@ igb_txeof(struct tx_ring *txr)
}
}
- return TRUE;
+ return (TRUE);
}
/*********************************************************************
*
- * Refresh mbuf buffers for a range of descriptors
+ * Setup descriptor buffer(s) from system mbuf buffer pools.
+ * i - designates the ring index
+ * clean - tells the function whether to update
+ * the header, the packet buffer, or both.
*
**********************************************************************/
static int
-igb_get_buf(struct rx_ring *rxr, int first, int limit)
+igb_get_buf(struct rx_ring *rxr, int i, u8 clean)
{
struct adapter *adapter = rxr->adapter;
- bus_dma_segment_t seg[2];
struct igb_rx_buf *rxbuf;
struct mbuf *mh, *mp;
+ bus_dma_segment_t hseg[1];
+ bus_dma_segment_t pseg[1];
bus_dmamap_t map;
- int i, nsegs, error;
+ int nsegs, error;
- i = first;
- while (i != limit) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head == NULL) {
- mh = m_gethdr(M_DONTWAIT, MT_DATA);
- if (mh == NULL)
- goto failure;
- } else /* reuse */
- mh = rxbuf->m_head;
-
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
-
- if (rxbuf->m_pack == NULL) {
- mp = m_getjcl(M_DONTWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (mp == NULL)
- goto failure;
- mp->m_len = adapter->rx_mbuf_sz;
- mp->m_flags &= ~M_PKTHDR;
- } else { /* reusing */
- mp = rxbuf->m_pack;
- mp->m_len = adapter->rx_mbuf_sz;
- mp->m_flags &= ~M_PKTHDR;
+ rxbuf = &rxr->rx_buffers[i];
+ mh = mp = NULL;
+ if ((clean & IGB_CLEAN_HEADER) != 0) {
+ mh = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (mh == NULL) {
+ adapter->mbuf_header_failed++;
+ return (ENOBUFS);
}
-
+ mh->m_pkthdr.len = mh->m_len = MHLEN;
/*
- ** Need to create a chain for the following
- ** dmamap call at this point.
- */
- mh->m_next = mp;
- mh->m_pkthdr.len = mh->m_len + mp->m_len;
-
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
- rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- panic("igb_get_buf: dmamap load failure\n");
-
- /* Unload old mapping and update buffer struct */
- if (rxbuf->m_head != NULL)
- bus_dmamap_unload(rxr->rxtag, rxbuf->map);
- map = rxbuf->map;
- rxbuf->map = rxr->spare_map;
- rxr->spare_map = map;
- rxbuf->m_head = mh;
- rxbuf->m_pack = mp;
- bus_dmamap_sync(rxr->rxtag,
- rxbuf->map, BUS_DMASYNC_PREREAD);
+ * Because IGB_HDR_BUF size is less than MHLEN
+ * and we configure controller to split headers
+ * we can align mbuf on ETHER_ALIGN boundary.
+ */
+ m_adj(mh, ETHER_ALIGN);
+ error = bus_dmamap_load_mbuf_sg(rxr->rx_htag,
+ rxr->rx_hspare_map, mh, hseg, &nsegs, 0);
+ if (error != 0) {
+ m_freem(mh);
+ return (error);
+ }
+ mh->m_flags &= ~M_PKTHDR;
+ }
+ if ((clean & IGB_CLEAN_PAYLOAD) != 0) {
+ mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
+ adapter->rx_mbuf_sz);
+ if (mp == NULL) {
+ if (mh != NULL) {
+ adapter->mbuf_packet_failed++;
+ bus_dmamap_unload(rxr->rx_htag,
+ rxbuf->head_map);
+ mh->m_flags |= M_PKTHDR;
+ m_freem(mh);
+ }
+ return (ENOBUFS);
+ }
+ mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+ error = bus_dmamap_load_mbuf_sg(rxr->rx_ptag,
+ rxr->rx_pspare_map, mp, pseg, &nsegs, 0);
+ if (error != 0) {
+ if (mh != NULL) {
+ bus_dmamap_unload(rxr->rx_htag,
+ rxbuf->head_map);
+ mh->m_flags |= M_PKTHDR;
+ m_freem(mh);
+ }
+ m_freem(mp);
+ return (error);
+ }
+ mp->m_flags &= ~M_PKTHDR;
+ }
- /* Update descriptor */
- rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
- rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
+ /* Loading new DMA maps complete, unload maps for received buffers. */
+ if ((clean & IGB_CLEAN_HEADER) != 0 && rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rx_htag, rxbuf->head_map);
+ }
+ if ((clean & IGB_CLEAN_PAYLOAD) != 0 && rxbuf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rx_ptag, rxbuf->pack_map);
+ }
- /* Calculate next index */
- if (++i == adapter->num_rx_desc)
- i = 0;
+ /* Reflect loaded dmamaps. */
+ if ((clean & IGB_CLEAN_HEADER) != 0) {
+ map = rxbuf->head_map;
+ rxbuf->head_map = rxr->rx_hspare_map;
+ rxr->rx_hspare_map = map;
+ rxbuf->m_head = mh;
+ bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr);
+ }
+ if ((clean & IGB_CLEAN_PAYLOAD) != 0) {
+ map = rxbuf->pack_map;
+ rxbuf->pack_map = rxr->rx_pspare_map;
+ rxr->rx_pspare_map = map;
+ rxbuf->m_pack = mp;
+ bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.pkt_addr = htole64(pseg[0].ds_addr);
}
return (0);
-
-failure:
- /*
- ** Its unforunate to have to panic, but
- ** with the new design I see no other
- ** graceful failure mode, this is ONLY
- ** called in the RX clean path, and the
- ** old mbuf has been used, it MUST be
- ** refreshed. This should be avoided by
- ** proper configuration. -jfv
- */
- panic("igb_get_buf: ENOBUFS\n");
}
/*********************************************************************
@@ -3595,43 +3499,70 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
goto fail;
}
- /*
- ** The tag is made to accomodate the largest buffer size
- ** with packet split (hence the two segments, even though
- ** it may not always use this.
- */
- if ((error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MJUM16BYTES, /* maxsize */
- 2, /* nsegments */
- MJUMPAGESIZE, /* maxsegsize */
+ MSIZE, /* maxsize */
+ 1, /* nsegments */
+ MSIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
- &rxr->rxtag))) {
+ &rxr->rx_htag))) {
device_printf(dev, "Unable to create RX DMA tag\n");
goto fail;
}
- /* Create the spare map (used by getbuf) */
- error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
- &rxr->spare_map);
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUMPAGESIZE, /* maxsize */
+ 1, /* nsegments */
+ MJUMPAGESIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &rxr->rx_ptag))) {
+ device_printf(dev, "Unable to create RX payload DMA tag\n");
+ goto fail;
+ }
+
+ /* Create the spare maps (used by getbuf) */
+ error = bus_dmamap_create(rxr->rx_htag, BUS_DMA_NOWAIT,
+ &rxr->rx_hspare_map);
if (error) {
device_printf(dev,
"%s: bus_dmamap_create header spare failed: %d\n",
__func__, error);
goto fail;
}
+ error = bus_dmamap_create(rxr->rx_ptag, BUS_DMA_NOWAIT,
+ &rxr->rx_pspare_map);
+ if (error) {
+ device_printf(dev,
+ "%s: bus_dmamap_create packet spare failed: %d\n",
+ __func__, error);
+ goto fail;
+ }
- for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+ for (i = 0; i < adapter->num_rx_desc; i++) {
rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->rxtag,
- BUS_DMA_NOWAIT, &rxbuf->map);
+ error = bus_dmamap_create(rxr->rx_htag,
+ BUS_DMA_NOWAIT, &rxbuf->head_map);
if (error) {
- device_printf(dev, "Unable to create RX DMA maps\n");
+ device_printf(dev,
+ "Unable to create RX head DMA maps\n");
+ goto fail;
+ }
+ error = bus_dmamap_create(rxr->rx_ptag,
+ BUS_DMA_NOWAIT, &rxbuf->pack_map);
+ if (error) {
+ device_printf(dev,
+ "Unable to create RX packet DMA maps\n");
goto fail;
}
}
@@ -3644,6 +3575,37 @@ fail:
return (error);
}
+
+static void
+igb_free_receive_ring(struct rx_ring *rxr)
+{
+ struct adapter *adapter;
+ struct igb_rx_buf *rxbuf;
+ int i;
+
+ adapter = rxr->adapter;
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rx_htag, rxbuf->head_map);
+ rxbuf->m_head->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_head);
+ }
+ if (rxbuf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rx_ptag, rxbuf->pack_map);
+ rxbuf->m_pack->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_pack);
+ }
+ rxbuf->m_head = NULL;
+ rxbuf->m_pack = NULL;
+ }
+}
+
+
/*********************************************************************
*
* Initialize a receive ring and its buffers.
@@ -3655,16 +3617,15 @@ igb_setup_receive_ring(struct rx_ring *rxr)
struct adapter *adapter;
struct ifnet *ifp;
device_t dev;
- struct igb_rx_buf *rxbuf;
- bus_dma_segment_t seg[2];
struct lro_ctrl *lro = &rxr->lro;
- int rsize, nsegs, error = 0;
+ int j, rsize, error = 0;
adapter = rxr->adapter;
dev = adapter->dev;
ifp = adapter->ifp;
/* Clear the ring contents */
+ IGB_RX_LOCK(rxr);
rsize = roundup2(adapter->num_rx_desc *
sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
bzero((void *)rxr->rx_base, rsize);
@@ -3672,50 +3633,12 @@ igb_setup_receive_ring(struct rx_ring *rxr)
/*
** Free current RX buffer structures and their mbufs
*/
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- bus_dmamap_sync(rxr->rxtag, rxbuf->map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag, rxbuf->map);
- if (rxbuf->m_head) {
- rxbuf->m_head->m_next = rxbuf->m_pack;
- m_freem(rxbuf->m_head);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- }
+ igb_free_receive_ring(rxr);
- /* Now replenish the mbufs */
- for (int j = 0; j != adapter->num_rx_desc; ++j) {
-
- rxbuf = &rxr->rx_buffers[j];
- rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
- if (rxbuf->m_head == NULL)
- panic("RX ring hdr initialization failed!\n");
- rxbuf->m_head->m_len = MHLEN;
- rxbuf->m_head->m_flags |= M_PKTHDR;
- rxbuf->m_head->m_pkthdr.len = rxbuf->m_head->m_len;
-
- rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (rxbuf->m_pack == NULL)
- panic("RX ring pkt initialization failed!\n");
- rxbuf->m_pack->m_len = adapter->rx_mbuf_sz;
- rxbuf->m_head->m_next = rxbuf->m_pack;
- rxbuf->m_head->m_pkthdr.len += rxbuf->m_pack->m_len;
-
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
- rxbuf->map, rxbuf->m_head, seg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- panic("RX ring dma initialization failed!\n");
- bus_dmamap_sync(rxr->rxtag,
- rxbuf->map, BUS_DMASYNC_PREREAD);
-
- /* Update descriptor */
- rxr->rx_base[j].read.hdr_addr = htole64(seg[0].ds_addr);
- rxr->rx_base[j].read.pkt_addr = htole64(seg[1].ds_addr);
+ /* Now replenish the ring mbufs */
+ for (j = 0; j < adapter->num_rx_desc; j++) {
+ if ((error = igb_get_buf(rxr, j, IGB_CLEAN_BOTH)) != 0)
+ goto fail;
}
/* Setup our descriptor indices */
@@ -3728,6 +3651,10 @@ igb_setup_receive_ring(struct rx_ring *rxr)
else
ifp->if_capabilities &= ~IFCAP_LRO;
+ rxr->fmp = NULL;
+ rxr->lmp = NULL;
+ rxr->discard = FALSE;
+
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -3737,34 +3664,24 @@ igb_setup_receive_ring(struct rx_ring *rxr)
** is enabled, since so often they
** are undesireable in similar setups.
*/
- if ((ifp->if_capenable & IFCAP_LRO) && (rxr->hdr_split)) {
+ if (ifp->if_capenable & IFCAP_LRO) {
int err = tcp_lro_init(lro);
- if (err)
- panic("LRO Initialization failed!\n");
+ if (err) {
+ device_printf(dev, "LRO Initialization failed!\n");
+ goto fail;
+ }
INIT_DEBUGOUT("RX LRO Initialized\n");
rxr->lro_enabled = TRUE;
lro->ifp = adapter->ifp;
}
+ IGB_RX_UNLOCK(rxr);
return (0);
-#if 0
+
fail:
- /*
- * We need to clean up any buffers allocated
- * so far, 'j' is the failing index.
- */
- for (int i = 0; i < j; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag, rxbuf->map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag, rxbuf->map);
- m_freem(rxbuf->m_head);
- rxbuf->m_head = NULL;
- }
- }
- return (ENOBUFS);
-#endif
+ igb_free_receive_ring(rxr);
+ IGB_RX_UNLOCK(rxr);
+ return (error);
}
/*********************************************************************
@@ -3792,17 +3709,8 @@ fail:
*/
rxr = adapter->rx_rings;
for (--i; i > 0; i--, rxr++) {
- for (j = 0; j < adapter->num_rx_desc; j++) {
- struct igb_rx_buf *rxbuf;
- rxbuf = &rxr->rx_buffers[j];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag, rxbuf->map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag, rxbuf->map);
- m_freem(rxbuf->m_head);
- rxbuf->m_head = NULL;
- }
- }
+ for (j = 0; j < adapter->num_rx_desc; j++)
+ igb_free_receive_ring(rxr);
}
return (ENOBUFS);
@@ -3818,6 +3726,7 @@ igb_initialize_receive_units(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
struct ifnet *ifp = adapter->ifp;
+ struct e1000_hw *hw = &adapter->hw;
u32 rctl, rxcsum, psize, srrctl = 0;
INIT_DEBUGOUT("igb_initialize_receive_unit: begin");
@@ -3826,8 +3735,8 @@ igb_initialize_receive_units(struct adapter *adapter)
* Make sure receives are disabled while setting
* up the descriptor ring
*/
- rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
/*
** Set up for header split
@@ -3864,32 +3773,27 @@ igb_initialize_receive_units(struct adapter *adapter)
u64 bus_addr = rxr->rxdma.dma_paddr;
u32 rxdctl;
- E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(i),
+ E1000_WRITE_REG(hw, E1000_RDLEN(i),
adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
- E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(i),
+ E1000_WRITE_REG(hw, E1000_RDBAH(i),
(uint32_t)(bus_addr >> 32));
- E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(i),
+ E1000_WRITE_REG(hw, E1000_RDBAL(i),
(uint32_t)bus_addr);
- E1000_WRITE_REG(&adapter->hw, E1000_SRRCTL(i), srrctl);
+ E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
/* Enable this Queue */
- rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(i));
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
rxdctl &= 0xFFF00000;
rxdctl |= IGB_RX_PTHRESH;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(i), rxdctl);
-
- /* Initial RX interrupt moderation */
- rxr->eitr_setting = igb_ave_latency;
- E1000_WRITE_REG(&adapter->hw,
- E1000_EITR(rxr->msix), igb_ave_latency);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
/*
** Setup for RX MultiQueue
*/
- rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
if (adapter->num_queues >1) {
u32 random[10], mrqc, shift = 0;
union igb_reta {
@@ -3905,13 +3809,13 @@ igb_initialize_receive_units(struct adapter *adapter)
reta.bytes[i & 3] =
(i % adapter->num_queues) << shift;
if ((i & 3) == 3)
- E1000_WRITE_REG(&adapter->hw,
+ E1000_WRITE_REG(hw,
E1000_RETA(i >> 2), reta.dword);
}
/* Now fill in hash table */
mrqc = E1000_MRQC_ENABLE_RSS_4Q;
for (int i = 0; i < 10; i++)
- E1000_WRITE_REG_ARRAY(&adapter->hw,
+ E1000_WRITE_REG_ARRAY(hw,
E1000_RSSRK(0), i, random[i]);
mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
@@ -3923,7 +3827,7 @@ igb_initialize_receive_units(struct adapter *adapter)
mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
- E1000_WRITE_REG(&adapter->hw, E1000_MRQC, mrqc);
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
/*
** NOTE: Receive Full-Packet Checksum Offload
@@ -3934,7 +3838,7 @@ igb_initialize_receive_units(struct adapter *adapter)
rxcsum |= E1000_RXCSUM_PCSD;
#if __FreeBSD_version >= 800000
/* For SCTP Offload */
- if ((adapter->hw.mac.type == e1000_82576)
+ if ((hw->mac.type == e1000_82576)
&& (ifp->if_capenable & IFCAP_RXCSUM))
rxcsum |= E1000_RXCSUM_CRCOFL;
#endif
@@ -3949,29 +3853,30 @@ igb_initialize_receive_units(struct adapter *adapter)
} else
rxcsum &= ~E1000_RXCSUM_TUOFL;
}
- E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ /* Strip CRC bytes. */
+ rctl |= E1000_RCTL_SECRC;
/* Make sure VLAN Filters are off */
rctl &= ~E1000_RCTL_VFE;
/* Don't store bad packets */
rctl &= ~E1000_RCTL_SBP;
/* Enable Receives */
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
/*
* Setup the HW Rx Head and Tail Descriptor Pointers
* - needs to be after enable
*/
for (int i = 0; i < adapter->num_queues; i++) {
- E1000_WRITE_REG(&adapter->hw, E1000_RDH(i), 0);
- E1000_WRITE_REG(&adapter->hw, E1000_RDT(i),
+ E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(i),
adapter->num_rx_desc - 1);
}
return;
@@ -4006,47 +3911,111 @@ static void
igb_free_receive_buffers(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
- struct igb_rx_buf *rx_buffer;
+ struct igb_rx_buf *rxbuf;
+ int i;
INIT_DEBUGOUT("free_receive_structures: begin");
- if (rxr->spare_map) {
- bus_dmamap_destroy(rxr->rxtag, rxr->spare_map);
- rxr->spare_map = NULL;
+ if (rxr->rx_hspare_map != NULL) {
+ bus_dmamap_destroy(rxr->rx_htag, rxr->rx_hspare_map);
+ rxr->rx_hspare_map = NULL;
+ }
+
+ if (rxr->rx_hspare_map != NULL) {
+ bus_dmamap_destroy(rxr->rx_ptag, rxr->rx_pspare_map);
+ rxr->rx_pspare_map = NULL;
}
/* Cleanup any existing buffers */
if (rxr->rx_buffers != NULL) {
- rx_buffer = &rxr->rx_buffers[0];
- for (int i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
- if (rx_buffer->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag, rx_buffer->map,
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rx_htag, rxbuf->head_map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag,
- rx_buffer->map);
- m_freem(rx_buffer->m_head);
- rx_buffer->m_head = NULL;
- } else if (rx_buffer->map != NULL)
- bus_dmamap_unload(rxr->rxtag,
- rx_buffer->map);
- if (rx_buffer->map != NULL) {
- bus_dmamap_destroy(rxr->rxtag,
- rx_buffer->map);
- rx_buffer->map = NULL;
+ bus_dmamap_unload(rxr->rx_htag,
+ rxbuf->head_map);
+ rxbuf->m_head->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_head);
+ }
+ if (rxbuf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->rx_ptag, rxbuf->pack_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rx_ptag,
+ rxbuf->pack_map);
+ rxbuf->m_pack->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_pack);
}
+ rxbuf->m_head = NULL;
+ rxbuf->m_pack = NULL;
+ if (rxbuf->head_map != NULL) {
+ bus_dmamap_destroy(rxr->rx_htag,
+ rxbuf->head_map);
+ rxbuf->head_map = NULL;
+ }
+ if (rxbuf->pack_map != NULL) {
+ bus_dmamap_destroy(rxr->rx_ptag,
+ rxbuf->pack_map);
+ rxbuf->pack_map = NULL;
+ }
+ }
+ if (rxr->rx_buffers != NULL) {
+ free(rxr->rx_buffers, M_DEVBUF);
+ rxr->rx_buffers = NULL;
}
}
- if (rxr->rx_buffers != NULL) {
- free(rxr->rx_buffers, M_DEVBUF);
- rxr->rx_buffers = NULL;
+ if (rxr->rx_htag != NULL) {
+ bus_dma_tag_destroy(rxr->rx_htag);
+ rxr->rx_htag = NULL;
+ }
+ if (rxr->rx_ptag != NULL) {
+ bus_dma_tag_destroy(rxr->rx_ptag);
+ rxr->rx_ptag = NULL;
}
+}
- if (rxr->rxtag != NULL) {
- bus_dma_tag_destroy(rxr->rxtag);
- rxr->rxtag = NULL;
+static __inline void
+igb_rx_discard(struct rx_ring *rxr, union e1000_adv_rx_desc *cur, int i)
+{
+
+ if (rxr->fmp != NULL) {
+ rxr->fmp->m_flags |= M_PKTHDR;
+ m_freem(rxr->fmp);
+ rxr->fmp = NULL;
+ rxr->lmp = NULL;
}
}
+
+static __inline void
+igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+
+ /*
+ * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+ * should be computed by hardware. Also it should not have VLAN tag in
+ * ethernet header.
+ */
+ if (rxr->lro_enabled &&
+ (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+ (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) ==
+ (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) &&
+ (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+ /*
+ * Send to the stack if:
+ ** - LRO not enabled, or
+ ** - no LRO resources, or
+ ** - lro enqueue fails
+ */
+ if (rxr->lro.lro_cnt != 0)
+ if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+ return;
+ }
+ (*ifp->if_input)(ifp, m);
+}
+
/*********************************************************************
*
* This routine executes in interrupt context. It replenishes
@@ -4058,258 +4027,196 @@ igb_free_receive_buffers(struct rx_ring *rxr)
*
* Return TRUE if more to clean, FALSE otherwise
*********************************************************************/
-
static bool
igb_rxeof(struct rx_ring *rxr, int count)
{
- struct adapter *adapter = rxr->adapter;
- struct ifnet *ifp = adapter->ifp;
+ struct adapter *adapter = rxr->adapter;
+ struct ifnet *ifp = adapter->ifp;
struct lro_ctrl *lro = &rxr->lro;
struct lro_entry *queued;
- int i, processed = 0;
- u32 staterr;
+ int i, prog = 0;
+ u32 ptype, staterr = 0;
union e1000_adv_rx_desc *cur;
-
IGB_RX_LOCK(rxr);
- i = rxr->next_to_check;
- cur = &rxr->rx_base[i];
- staterr = cur->wb.upper.status_error;
-
- if (!(staterr & E1000_RXD_STAT_DD)) {
- IGB_RX_UNLOCK(rxr);
- return FALSE;
- }
-
- /* Sync the ring */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD);
-
- while ((staterr & E1000_RXD_STAT_DD) && (count != 0) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- struct mbuf *sendmp, *mh, *mp, *nh, *np;
- struct igb_rx_buf *nxtbuf;
- u32 ptype;
- u16 hlen, plen, hdr, nextp, vtag;
- bool accept_frame, eop, sctp = FALSE;
-
- accept_frame = TRUE;
- hlen = plen = nextp = 0;
- sendmp = mh = mp = nh = np = NULL;
-
- ptype = (le32toh(cur->wb.lower.lo_dword.data) &
- IGB_PKTTYPE_MASK);
- if (((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0) &&
- ((ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0))
- sctp = TRUE;
-
- /* Sync the buffers */
- bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
- BUS_DMASYNC_POSTREAD);
- mh = rxr->rx_buffers[i].m_head;
- mp = rxr->rx_buffers[i].m_pack;
- vtag = le16toh(cur->wb.upper.vlan);
- eop = ((staterr & E1000_RXD_STAT_EOP) != 0);
-
- /* Get the next descriptor we will process */
- if (!eop) {
- nextp = i + 1;
- if (nextp == adapter->num_rx_desc)
- nextp = 0;
- nxtbuf = &rxr->rx_buffers[nextp];
- prefetch(nxtbuf);
+ /* Main clean loop */
+ for (i = rxr->next_to_check; count > 0; prog++) {
+ struct mbuf *sendmp, *mh, *mp;
+ u16 hlen, plen, hdr, vtag;
+ bool eop = FALSE;
+ u8 dopayload;
+
+ /* Sync the ring. */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ cur = &rxr->rx_base[i];
+ staterr = le32toh(cur->wb.upper.status_error);
+ if ((staterr & E1000_RXD_STAT_DD) == 0)
+ break;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ count--;
+ sendmp = mh = mp = NULL;
+ cur->wb.upper.status_error = 0;
+ plen = le16toh(cur->wb.upper.length);
+ ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
+ hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+ eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
+
+ /* Make sure all segments of a bad packet are discarded */
+ if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) ||
+ (rxr->discard)) {
+ ifp->if_ierrors++;
+ ++rxr->rx_discarded;
+ if (!eop) /* Catch subsequent segs */
+ rxr->discard = TRUE;
+ else
+ rxr->discard = FALSE;
+ igb_rx_discard(rxr, cur, i);
+ goto next_desc;
}
/*
** The way the hardware is configured to
** split, it will ONLY use the header buffer
** when header split is enabled, otherwise we
- ** get legacy behavior, ie, both header and
- ** payload are DMA'd into JUST the payload buffer.
- **
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** m_nextpkt pointer and pass this along from one
- ** descriptor to the next, until we get EOP.
+ ** get normal behavior, ie, both header and
+ ** payload are DMA'd into the payload buffer.
**
+ ** The fmp test is to catch the case where a
+ ** packet spans multiple descriptors, in that
+ ** case only the first header is valid.
*/
- if ((rxr->hdr_split) && (mh->m_nextpkt == NULL)) {
- hdr = le16toh(cur->
- wb.lower.lo_dword.hs_rss.hdr_info);
+ if (rxr->hdr_split && rxr->fmp == NULL) {
hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
E1000_RXDADV_HDRBUFLEN_SHIFT;
if (hlen > IGB_HDR_BUF)
hlen = IGB_HDR_BUF;
- plen = le16toh(cur->wb.upper.length);
+ /* Handle the header mbuf */
+ mh = rxr->rx_buffers[i].m_head;
mh->m_len = hlen;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mh->m_pkthdr.len = mh->m_len;
- /* Null this so getbuf replenishes */
- rxr->rx_buffers[i].m_head = NULL;
+ dopayload = IGB_CLEAN_HEADER;
/*
** Get the payload length, this
** could be zero if its a small
** packet.
*/
- if (plen) {
+ if (plen > 0) {
+ mp = rxr->rx_buffers[i].m_pack;
mp->m_len = plen;
- mp->m_next = NULL;
- mp->m_flags &= ~M_PKTHDR;
mh->m_next = mp;
- mh->m_pkthdr.len += mp->m_len;
- /* Null this so getbuf replenishes */
- rxr->rx_buffers[i].m_pack = NULL;
+ dopayload = IGB_CLEAN_BOTH;
rxr->rx_split_packets++;
}
- /* Setup the forward chain */
- if (eop == 0) {
- nh = rxr->rx_buffers[nextp].m_head;
- np = rxr->rx_buffers[nextp].m_pack;
- nh->m_nextpkt = mh;
- if (plen)
- mp->m_next = np;
- else
- mh->m_next = np;
- } else {
- sendmp = mh;
- if (staterr & E1000_RXD_STAT_VP) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
} else {
/*
** Either no header split, or a
** secondary piece of a fragmented
- ** packet.
+ ** split packet.
*/
- mp->m_len = le16toh(cur->wb.upper.length);
- rxr->rx_buffers[i].m_pack = NULL;
- /* stored head pointer */
- sendmp = mh->m_nextpkt;
- if (sendmp != NULL) {
- sendmp->m_pkthdr.len += mp->m_len;
- sendmp->m_nextpkt = NULL;
- } else {
- /* first desc of a non-ps chain */
- sendmp = mp;
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- if (staterr & E1000_RXD_STAT_VP) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- /* Carry head forward */
- if (eop == 0) {
- nh = rxr->rx_buffers[nextp].m_head;
- np = rxr->rx_buffers[nextp].m_pack;
- nh->m_nextpkt = sendmp;
- mp->m_next = np;
- sendmp = NULL;
- }
- mh->m_nextpkt = NULL;
+ mh = rxr->rx_buffers[i].m_pack;
+ mh->m_len = plen;
+ dopayload = IGB_CLEAN_PAYLOAD;
}
- if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)
- accept_frame = FALSE;
-
- if (accept_frame) {
- ++processed;
- if (eop) {
- --count;
- sendmp->m_pkthdr.rcvif = ifp;
- ifp->if_ipackets++;
- rxr->rx_packets++;
- /* capture data for AIM */
- rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes += rxr->bytes;
- if (ifp->if_capenable & IFCAP_RXCSUM)
- igb_rx_checksum(staterr, sendmp, sctp);
- else
- sendmp->m_pkthdr.csum_flags = 0;
-#if __FreeBSD_version >= 800000
- /* Get the RSS Hash */
- sendmp->m_pkthdr.flowid =
- le32toh(cur->wb.lower.hi_dword.rss);
- sendmp->m_flags |= M_FLOWID;
-#endif
+ /*
+ ** get_buf will overwrite the writeback
+ ** descriptor so save the VLAN tag now.
+ */
+ vtag = le16toh(cur->wb.upper.vlan);
+ if (igb_get_buf(rxr, i, dopayload) != 0) {
+ ifp->if_iqdrops++;
+ /*
+ * We've dropped a frame due to lack of resources
+ * so we should drop entire multi-segmented
+ * frames until we encounter EOP.
+ */
+ if ((staterr & E1000_RXD_STAT_EOP) != 0)
+ rxr->discard = TRUE;
+ igb_rx_discard(rxr, cur, i);
+ goto next_desc;
+ }
+
+ /* Initial frame - setup */
+ if (rxr->fmp == NULL) {
+ mh->m_pkthdr.len = mh->m_len;
+ /* Store the first mbuf */
+ rxr->fmp = mh;
+ rxr->lmp = mh;
+ if (mp != NULL) {
+ /* Add payload if split */
+ mh->m_pkthdr.len += mp->m_len;
+ rxr->lmp = mh->m_next;
}
} else {
- ifp->if_ierrors++;
- /* Reuse loaded DMA map and just update mbuf chain */
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
- mp->m_data = mp->m_ext.ext_buf;
- if (mp->m_next) { /* Free chain */
- sendmp = mp->m_next;
- m_free(sendmp);
+ /* Chain mbuf's together */
+ rxr->lmp->m_next = mh;
+ rxr->lmp = rxr->lmp->m_next;
+ rxr->fmp->m_pkthdr.len += mh->m_len;
+ }
+
+ if (eop) {
+ rxr->fmp->m_pkthdr.rcvif = ifp;
+ ifp->if_ipackets++;
+ rxr->rx_packets++;
+ /* capture data for AIM */
+ rxr->packets++;
+ rxr->bytes += rxr->fmp->m_pkthdr.len;
+ rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
+
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ igb_rx_checksum(staterr, rxr->fmp, ptype);
+ /* XXX igb(4) always strips VLAN. */
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (staterr & E1000_RXD_STAT_VP) != 0) {
+ rxr->fmp->m_pkthdr.ether_vtag = vtag;
+ rxr->fmp->m_flags |= M_VLANTAG;
}
- mp->m_next = NULL;
- if (adapter->max_frame_size <=
- (MCLBYTES - ETHER_ALIGN))
- m_adj(mp, ETHER_ALIGN);
- sendmp = NULL;
+#if __FreeBSD_version >= 800000
+ rxr->fmp->m_pkthdr.flowid = curcpu;
+ rxr->fmp->m_flags |= M_FLOWID;
+#endif
+ sendmp = rxr->fmp;
+ /* Make sure to set M_PKTHDR. */
+ sendmp->m_flags |= M_PKTHDR;
+ rxr->fmp = NULL;
+ rxr->lmp = NULL;
}
+
+next_desc:
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- rxr->last_cleaned = i; /* for updating tail */
+ rxr->last_cleaned = i; /* For updating tail */
+
+ /* Advance our pointers to the next descriptor. */
if (++i == adapter->num_rx_desc)
i = 0;
- /* Prefetch next descriptor */
- cur = &rxr->rx_base[i];
- prefetch(cur);
-
+
/*
- ** Now send up to the stack,
- ** note that the RX lock is
- ** held thru this call.
+ ** Note that we hold the RX lock thru
+ ** the following call so this ring's
+ ** next_to_check is not gonna change.
*/
- if (sendmp != NULL) {
- /*
- ** Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if ((!rxr->lro_enabled) ||
- ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
- (*ifp->if_input)(ifp, sendmp);
- }
-
- /* Replenish every 4 max */
- if (processed == 4) {
- igb_get_buf(rxr, rxr->next_to_check, i);
- processed = 0;
- E1000_WRITE_REG(&adapter->hw,
- E1000_RDT(rxr->me), rxr->last_cleaned);
- rxr->next_to_check = i;
- }
-
- /* Next iteration */
- staterr = cur->wb.upper.status_error;
+ if (sendmp != NULL)
+ igb_rx_input(rxr, ifp, sendmp, ptype);
}
- /* Replenish remaining */
- if (processed != 0) {
- igb_get_buf(rxr, rxr->next_to_check, i);
- processed = 0;
- E1000_WRITE_REG(&adapter->hw,
- E1000_RDT(rxr->me), rxr->last_cleaned);
+ if (prog == 0) {
+ IGB_RX_UNLOCK(rxr);
+ return (FALSE);
}
rxr->next_to_check = i;
+ /* Advance the E1000's Receive Queue "Tail Pointer". */
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->last_cleaned);
+
/*
* Flush any outstanding LRO work
*/
- while (!SLIST_EMPTY(&lro->lro_active)) {
- queued = SLIST_FIRST(&lro->lro_active);
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
SLIST_REMOVE_HEAD(&lro->lro_active, next);
tcp_lro_flush(lro, queued);
}
@@ -4317,15 +4224,13 @@ igb_rxeof(struct rx_ring *rxr, int count)
IGB_RX_UNLOCK(rxr);
/*
- ** Leaving with more to clean?
- ** then schedule another interrupt.
+ ** We still have cleaning to do?
+ ** Schedule another interrupt if so.
*/
- if (staterr & E1000_RXD_STAT_DD) {
- E1000_WRITE_REG(&adapter->hw, E1000_EICS, rxr->eims);
- return TRUE;
- }
+ if ((staterr & E1000_RXD_STAT_DD) != 0)
+ return (TRUE);
- return FALSE;
+ return (FALSE);
}
/*********************************************************************
@@ -4336,10 +4241,11 @@ igb_rxeof(struct rx_ring *rxr, int count)
*
*********************************************************************/
static void
-igb_rx_checksum(u32 staterr, struct mbuf *mp, bool sctp)
+igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype)
{
u16 status = (u16)staterr;
u8 errors = (u8) (staterr >> 24);
+ int sctp;
/* Ignore Checksum bit is set */
if (status & E1000_RXD_STAT_IXSM) {
@@ -4347,6 +4253,11 @@ igb_rx_checksum(u32 staterr, struct mbuf *mp, bool sctp)
return;
}
+ if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+ (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)
+ sctp = 1;
+ else
+ sctp = 0;
if (status & E1000_RXD_STAT_IPCS) {
/* Did it pass? */
if (!(errors & E1000_RXD_ERR_IPE)) {
@@ -4366,7 +4277,7 @@ igb_rx_checksum(u32 staterr, struct mbuf *mp, bool sctp)
/* Did it pass? */
if (!(errors & E1000_RXD_ERR_TCPE)) {
mp->m_pkthdr.csum_flags |= type;
- if (sctp == FALSE)
+ if (sctp == 0)
mp->m_pkthdr.csum_data = htons(0xffff);
}
}
@@ -4383,7 +4294,7 @@ igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
struct adapter *adapter = ifp->if_softc;
u32 index, bit;
- if (ifp->if_softc != arg) /* Not our event */
+ if (ifp->if_softc != arg) /* Not our event */
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@@ -4407,7 +4318,7 @@ igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
struct adapter *adapter = ifp->if_softc;
u32 index, bit;
- if (ifp->if_softc != arg)
+ if (ifp->if_softc != arg)
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@@ -4714,6 +4625,7 @@ static void
igb_print_debug_info(struct adapter *adapter)
{
device_t dev = adapter->dev;
+ struct igb_queue *que = adapter->queues;
struct rx_ring *rxr = adapter->rx_rings;
struct tx_ring *txr = adapter->tx_rings;
uint8_t *hw_addr = adapter->hw.hw_addr;
@@ -4736,16 +4648,19 @@ igb_print_debug_info(struct adapter *adapter)
adapter->hw.fc.high_water,
adapter->hw.fc.low_water);
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- device_printf(dev, "Queue(%d) tdh = %d, tdt = %d\n", i,
+ for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
+ device_printf(dev, "Queue(%d) tdh = %d, tdt = %d ", i,
E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
+ device_printf(dev, "rdh = %d, rdt = %d\n",
+ E1000_READ_REG(&adapter->hw, E1000_RDH(i)),
+ E1000_READ_REG(&adapter->hw, E1000_RDT(i)));
device_printf(dev, "TX(%d) no descriptors avail event = %lld\n",
txr->me, (long long)txr->no_desc_avail);
- device_printf(dev, "TX(%d) MSIX IRQ Handled = %lld\n", txr->me,
- (long long)txr->tx_irq);
- device_printf(dev, "TX(%d) Packets sent = %lld\n", txr->me,
- (long long)txr->tx_packets);
+ device_printf(dev, "TX(%d) Packets sent = %lld\n",
+ txr->me, (long long)txr->tx_packets);
+ device_printf(dev, "RX(%d) Packets received = %lld ",
+ rxr->me, (long long)rxr->rx_packets);
}
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
@@ -4755,20 +4670,20 @@ igb_print_debug_info(struct adapter *adapter)
E1000_READ_REG(&adapter->hw, E1000_RDT(i)));
device_printf(dev, "RX(%d) Packets received = %lld\n", rxr->me,
(long long)rxr->rx_packets);
- device_printf(dev, "RX(%d) Split Packets = %lld\n", rxr->me,
+ device_printf(dev, " Split Packets = %lld ",
(long long)rxr->rx_split_packets);
- device_printf(dev, "RX(%d) Byte count = %lld\n", rxr->me,
+ device_printf(dev, " Byte count = %lld\n",
(long long)rxr->rx_bytes);
- device_printf(dev, "RX(%d) MSIX IRQ Handled = %lld\n", rxr->me,
- (long long)rxr->rx_irq);
- device_printf(dev,"RX(%d) LRO Queued= %d\n",
- rxr->me, lro->lro_queued);
- device_printf(dev,"RX(%d) LRO Flushed= %d\n",
- rxr->me, lro->lro_flushed);
+ device_printf(dev,"RX(%d) LRO Queued= %d ",
+ i, lro->lro_queued);
+ device_printf(dev,"LRO Flushed= %d\n",lro->lro_flushed);
}
- device_printf(dev, "LINK MSIX IRQ Handled = %u\n", adapter->link_irq);
+ for (int i = 0; i < adapter->num_queues; i++, que++)
+ device_printf(dev,"QUE(%d) IRQs = %llx\n",
+ i, (long long)que->irqs);
+ device_printf(dev, "LINK MSIX IRQ Handled = %u\n", adapter->link_irq);
device_printf(dev, "Mbuf defrag failed = %ld\n",
adapter->mbuf_defrag_failed);
device_printf(dev, "Std mbuf header failed = %ld\n",
@@ -4921,173 +4836,3 @@ igb_add_rx_process_limit(struct adapter *adapter, const char *name,
SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
-
-#ifdef IGB_IEEE1588
-/*
-** igb_hwtstamp_ioctl - control hardware time stamping
-**
-** Outgoing time stamping can be enabled and disabled. Play nice and
-** disable it when requested, although it shouldn't case any overhead
-** when no packet needs it. At most one packet in the queue may be
-** marked for time stamping, otherwise it would be impossible to tell
-** for sure to which packet the hardware time stamp belongs.
-**
-** Incoming time stamping has to be configured via the hardware
-** filters. Not all combinations are supported, in particular event
-** type has to be specified. Matching the kind of event packet is
-** not supported, with the exception of "all V2 events regardless of
-** level 2 or 4".
-**
-*/
-static int
-igb_hwtstamp_ioctl(struct adapter *adapter, struct ifreq *ifr)
-{
- struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_ctrl *config;
- u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
- u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_ctl_type = 0;
- u32 tsync_rx_cfg = 0;
- int is_l4 = 0;
- int is_l2 = 0;
- u16 port = 319; /* PTP */
- u32 regval;
-
- config = (struct hwtstamp_ctrl *) ifr->ifr_data;
-
- /* reserved for future extensions */
- if (config->flags)
- return (EINVAL);
-
- switch (config->tx_type) {
- case HWTSTAMP_TX_OFF:
- tsync_tx_ctl_bit = 0;
- break;
- case HWTSTAMP_TX_ON:
- tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
- break;
- default:
- return (ERANGE);
- }
-
- switch (config->rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- tsync_rx_ctl_bit = 0;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
- config->rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
- is_l4 = 1;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
- is_l4 = 1;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = 1;
- is_l4 = 1;
- config->rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = 1;
- is_l4 = 1;
- config->rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = 1;
- break;
- default:
- return -ERANGE;
- }
-
- /* enable/disable TX */
- regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
- regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
- E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
-
- /* enable/disable RX, define which PTP packets are time stamped */
- regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
- regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
- regval = (regval & ~0xE) | tsync_rx_ctl_type;
- E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
- E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
-
- /*
- * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
- * (Ethertype to filter on)
- * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
- * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
- */
- E1000_WRITE_REG(hw, E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
-
- /* L4 Queue Filter[0]: only filter by source and destination port */
- E1000_WRITE_REG(hw, E1000_SPQF0, htons(port));
- E1000_WRITE_REG(hw, E1000_IMIREXT(0), is_l4 ?
- ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
- E1000_WRITE_REG(hw, E1000_IMIR(0), is_l4 ?
- (htons(port)
- | (0<<16) /* immediate interrupt disabled */
- | 0 /* (1<<17) bit cleared: do not bypass
- destination port check */)
- : 0);
- E1000_WRITE_REG(hw, E1000_FTQF0, is_l4 ?
- (0x11 /* UDP */
- | (1<<15) /* VF not compared */
- | (1<<27) /* Enable Timestamping */
- | (7<<28) /* only source port filter enabled,
- source/target address and protocol
- masked */)
- : ((1<<15) | (15<<28) /* all mask bits set = filter not
- enabled */));
-
- wrfl();
-
- adapter->hwtstamp_ctrl = config;
-
- /* clear TX/RX time stamp registers, just to be sure */
- regval = E1000_READ_REG(hw, E1000_TXSTMPH);
- regval = E1000_READ_REG(hw, E1000_RXSTMPH);
-
- return (error);
-}
-
-/*
-** igb_read_clock - read raw cycle counter (to be used by time counter)
-*/
-static cycle_t igb_read_clock(const struct cyclecounter *tc)
-{
- struct igb_adapter *adapter =
- container_of(tc, struct igb_adapter, cycles);
- struct e1000_hw *hw = &adapter->hw;
- u64 stamp;
-
- stamp = E1000_READ_REG(hw, E1000_SYSTIML);
- stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << 32ULL;
-
- return (stamp);
-}
-
-#endif /* IGB_IEEE1588 */
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index ce5b726..f14d734 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_TXD 80
-#define IGB_DEFAULT_TXD 1024
+#define IGB_DEFAULT_TXD 256
#define IGB_MAX_TXD 4096
/*
@@ -63,7 +63,7 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_RXD 80
-#define IGB_DEFAULT_RXD 1024
+#define IGB_DEFAULT_RXD 256
#define IGB_MAX_RXD 4096
/*
@@ -173,10 +173,16 @@
#define IGB_SMARTSPEED_DOWNSHIFT 3
#define IGB_SMARTSPEED_MAX 15
#define IGB_MAX_LOOP 10
-#define IGB_RX_PTHRESH 16
+
+#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
#define IGB_RX_HTHRESH 8
#define IGB_RX_WTHRESH 1
+#define IGB_TX_PTHRESH 8
+#define IGB_TX_HTHRESH 1
+#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_mem) ? 1 : 16)
+
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define PCI_ANY_ID (~0U)
#define ETHER_ALIGN 2
@@ -236,12 +242,16 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
-/*
- * Interrupt Moderation parameters
- */
-#define IGB_LOW_LATENCY 128
-#define IGB_AVE_LATENCY 450
-#define IGB_BULK_LATENCY 1200
+/* Define the starting Interrupt rate per Queue */
+#define IGB_INTS_PER_SEC 8000
+#define IGB_DEFAULT_ITR 1000000000/(IGB_INTS_PER_SEC * 256)
+
+
+/* Header split codes for get_buf */
+#define IGB_CLEAN_HEADER 0x01
+#define IGB_CLEAN_PAYLOAD 0x02
+#define IGB_CLEAN_BOTH (IGB_CLEAN_HEADER | IGB_CLEAN_PAYLOAD)
+
#define IGB_LINK_ITR 2000
/* Precision Time Sync (IEEE 1588) defines */
@@ -264,19 +274,33 @@ struct igb_dma_alloc {
/*
- * Transmit ring: one per tx queue
+** Driver queue struct: this is the interrupt container
+** for the associated tx and rx ring.
+*/
+struct igb_queue {
+ struct adapter *adapter;
+ u32 msix; /* This queue's MSIX vector */
+ u32 eims; /* This queue's EIMS bit */
+ u32 eitr_setting;
+ struct resource *res;
+ void *tag;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ struct task que_task;
+ struct taskqueue *tq;
+ u64 irqs;
+};
+
+/*
+ * Transmit ring: one per queue
*/
struct tx_ring {
struct adapter *adapter;
u32 me;
- u32 msix; /* This ring's MSIX vector */
- u32 eims; /* This ring's EIMS bit */
struct mtx tx_mtx;
char mtx_name[16];
- struct igb_dma_alloc txdma; /* bus_dma glue for tx desc */
+ struct igb_dma_alloc txdma;
struct e1000_tx_desc *tx_base;
- struct task tx_task; /* cleanup tasklet */
- struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
volatile u16 tx_avail;
@@ -284,39 +308,38 @@ struct tx_ring {
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
- bus_dma_tag_t txtag; /* dma tag for tx */
- struct resource *res;
- void *tag;
+ bus_dma_tag_t txtag;
+
+ u32 bytes;
+ u32 packets;
bool watchdog_check;
int watchdog_time;
u64 no_desc_avail;
- u64 tx_irq;
u64 tx_packets;
};
/*
- * Receive ring: one per rx queue
+ * Receive ring: one per queue
*/
struct rx_ring {
struct adapter *adapter;
u32 me;
- u32 msix; /* This ring's MSIX vector */
- u32 eims; /* This ring's EIMS bit */
- struct igb_dma_alloc rxdma; /* bus_dma glue for tx desc */
+ struct igb_dma_alloc rxdma;
union e1000_adv_rx_desc *rx_base;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
- struct task rx_task; /* cleanup tasklet */
- struct taskqueue *tq;
+ bool discard;
struct mtx rx_mtx;
char mtx_name[16];
u32 last_cleaned;
u32 next_to_check;
struct igb_rx_buf *rx_buffers;
- bus_dma_tag_t rxtag; /* dma tag for tx */
- bus_dmamap_t spare_map;
+ bus_dma_tag_t rx_htag; /* dma tag for rx head */
+ bus_dmamap_t rx_hspare_map;
+ bus_dma_tag_t rx_ptag; /* dma tag for rx packet */
+ bus_dmamap_t rx_pspare_map;
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
@@ -325,14 +348,11 @@ struct rx_ring {
struct mbuf *lmp;
u32 bytes;
- u32 eitr_setting;
-
- struct resource *res;
- void *tag;
+ u32 packets;
/* Soft stats */
- u64 rx_irq;
u64 rx_split_packets;
+ u64 rx_discarded;
u64 rx_packets;
u64 rx_bytes;
};
@@ -341,7 +361,6 @@ struct adapter {
struct ifnet *ifp;
struct e1000_hw hw;
- /* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
struct device *dev;
@@ -381,12 +400,14 @@ struct adapter {
u16 link_duplex;
u32 smartspeed;
+ /* Interface queues */
+ struct igb_queue *queues;
+
/*
* Transmit rings
*/
struct tx_ring *tx_rings;
u16 num_tx_desc;
- u32 txd_cmd;
/*
* Receive rings
@@ -446,22 +467,26 @@ struct igb_tx_buffer {
struct igb_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
- bus_dmamap_t map; /* bus_dma map for packet */
+ bus_dmamap_t head_map; /* bus_dma map for packet */
+ bus_dmamap_t pack_map; /* bus_dma map for packet */
};
#define IGB_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
#define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
-#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
-#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
+#define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
+#define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
+
+#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
+#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
+#define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
+
+#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
-#define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
-#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
-#define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
#endif /* _IGB_H_DEFINED_ */
OpenPOWER on IntegriCloud