summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2009-06-24 17:41:29 +0000
committerjfv <jfv@FreeBSD.org>2009-06-24 17:41:29 +0000
commitbd3587c757355e28ac6abbaf11703661599270bb (patch)
tree2ecc6e8fbc53f9378cbe7397d226334796447dd4 /sys/dev
parent7d778199caba58e2bf12662021294f3869f8f1dd (diff)
downloadFreeBSD-src-bd3587c757355e28ac6abbaf11703661599270bb.zip
FreeBSD-src-bd3587c757355e28ac6abbaf11703661599270bb.tar.gz
Updates for both the em and igb drivers, add support
for multiqueue tx, shared code updates, new device support, and some bug fixes.
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/e1000/e1000_82540.c45
-rw-r--r--sys/dev/e1000/e1000_82541.c9
-rw-r--r--sys/dev/e1000/e1000_82571.c170
-rw-r--r--sys/dev/e1000/e1000_82575.c351
-rw-r--r--sys/dev/e1000/e1000_82575.h22
-rw-r--r--sys/dev/e1000/e1000_api.c13
-rw-r--r--sys/dev/e1000/e1000_defines.h98
-rw-r--r--sys/dev/e1000/e1000_hw.h18
-rw-r--r--sys/dev/e1000/e1000_ich8lan.c680
-rw-r--r--sys/dev/e1000/e1000_ich8lan.h21
-rw-r--r--sys/dev/e1000/e1000_mac.c6
-rw-r--r--sys/dev/e1000/e1000_osdep.c30
-rw-r--r--sys/dev/e1000/e1000_phy.c788
-rw-r--r--sys/dev/e1000/e1000_phy.h65
-rw-r--r--sys/dev/e1000/e1000_regs.h4
-rw-r--r--sys/dev/e1000/if_em.c570
-rw-r--r--sys/dev/e1000/if_em.h79
-rw-r--r--sys/dev/e1000/if_igb.c1204
-rw-r--r--sys/dev/e1000/if_igb.h74
19 files changed, 2742 insertions, 1505 deletions
diff --git a/sys/dev/e1000/e1000_82540.c b/sys/dev/e1000/e1000_82540.c
index 2cd1057..14dcbb3 100644
--- a/sys/dev/e1000/e1000_82540.c
+++ b/sys/dev/e1000/e1000_82540.c
@@ -57,6 +57,7 @@ static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw);
+static s32 e1000_read_mac_addr_82540(struct e1000_hw *hw);
/**
* e1000_init_phy_params_82540 - Init PHY func ptrs.
@@ -229,6 +230,8 @@ static s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82540;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* setup LED */
@@ -676,3 +679,45 @@ static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw)
E1000_READ_REG(hw, E1000_MGTPTC);
}
+/**
+ * e1000_read_mac_addr_82540 - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ *
+ * This version is being used over generic because of customer issues
+ * with VmWare and Virtual Box when using generic. It seems in
+ * the emulated 82545, RAR[0] does NOT have a valid address after a
+ * reset, this older method works and using this breaks nothing for
+ * these legacy adapters.
+ **/
+s32 e1000_read_mac_addr_82540(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 offset, nvm_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = i >> 1;
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+ hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+ }
+
+ /* Flip last bit of mac address if we're on second port */
+ if (hw->bus.func == E1000_FUNC_1)
+ hw->mac.perm_addr[5] ^= 1;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+ return ret_val;
+}
diff --git a/sys/dev/e1000/e1000_82541.c b/sys/dev/e1000/e1000_82541.c
index a7f896e..68d1b05 100644
--- a/sys/dev/e1000/e1000_82541.c
+++ b/sys/dev/e1000/e1000_82541.c
@@ -377,6 +377,7 @@ static s32 e1000_reset_hw_82541(struct e1000_hw *hw)
static s32 e1000_init_hw_82541(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
u32 i, txdctl;
s32 ret_val;
@@ -388,6 +389,13 @@ static s32 e1000_init_hw_82541(struct e1000_hw *hw)
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
}
+
+ /* Storing the Speed Power Down value for later use */
+ ret_val = hw->phy.ops.read_reg(hw,
+ IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
+ if (ret_val)
+ goto out;
/* Disabling VLAN filtering */
DEBUGOUT("Initializing the IEEE VLAN\n");
@@ -425,6 +433,7 @@ static s32 e1000_init_hw_82541(struct e1000_hw *hw)
*/
e1000_clear_hw_cntrs_82541(hw);
+out:
return ret_val;
}
diff --git a/sys/dev/e1000/e1000_82571.c b/sys/dev/e1000/e1000_82571.c
index 5d16136..18fe745 100644
--- a/sys/dev/e1000/e1000_82571.c
+++ b/sys/dev/e1000/e1000_82571.c
@@ -47,6 +47,7 @@
* 82573L Gigabit Ethernet Controller
* 82574L Gigabit Network Connection
* 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
*/
#include "e1000_api.h"
@@ -154,6 +155,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
goto out;
}
break;
+ case e1000_82583:
case e1000_82574:
phy->type = e1000_phy_bm;
phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
@@ -215,6 +217,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
@@ -264,6 +267,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = E1000_SUCCESS;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = FALSE;
DEBUGFUNC("e1000_init_mac_params_82571");
@@ -304,6 +310,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
mac->ops.set_lan_id = e1000_set_lan_id_single_port;
break;
default:
@@ -339,6 +346,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
/* check management mode */
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
break;
default:
@@ -366,6 +374,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
/* turn on/off LED */
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
mac->ops.led_on = e1000_led_on_82574;
break;
default:
@@ -381,6 +390,50 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
? e1000_get_speed_and_duplex_copper_generic
: e1000_get_speed_and_duplex_fiber_serdes_generic;
+ /*
+ * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ * first NVM or PHY acess. This should be done for single-port
+ * devices, and for one port only on dual-port devices so that
+ * for those devices we can still use the SMBI lock to synchronize
+ * inter-port accesses to the PHY & NVM.
+ */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ swsm2 = E1000_READ_REG(hw, E1000_SWSM2);
+
+ if (!(swsm2 & E1000_SWSM2_LOCK)) {
+ /* Only do this for the first interface on this card */
+ E1000_WRITE_REG(hw, E1000_SWSM2,
+ swsm2 | E1000_SWSM2_LOCK);
+ force_clear_smbi = TRUE;
+ } else
+ force_clear_smbi = FALSE;
+ break;
+ default:
+ force_clear_smbi = TRUE;
+ break;
+ }
+
+ if (force_clear_smbi) {
+ /* Make sure SWSM.SMBI is clear */
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (swsm & E1000_SWSM_SMBI) {
+ /* This bit should not be set on a first interface, and
+ * indicates that the bootagent or EFI code has
+ * improperly left this bit enabled
+ */
+ DEBUGOUT("Please update your 82571 Bootagent\n");
+ }
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_SMBI);
+ }
+
+ /*
+ * Initialze device specific counter of SMBI acquisition
+ * timeouts.
+ */
+ hw->dev_spec._82571.smb_counter = 0;
+
out:
return ret_val;
}
@@ -430,6 +483,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
ret_val = e1000_get_phy_id(hw);
break;
case e1000_82574:
+ case e1000_82583:
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
@@ -458,17 +512,43 @@ out:
*
* Acquire the HW semaphore to access the PHY or NVM
**/
-static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
s32 ret_val = E1000_SUCCESS;
- s32 timeout = hw->nvm.word_size + 1;
+ s32 sw_timeout = hw->nvm.word_size + 1;
+ s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_82571");
+ /*
+ * If we have timedout 3 times on trying to acquire
+ * the inter-port SMBI semaphore, there is old code
+ * operating on the other port, and it is not
+ * releasing SMBI. Modify the number of times that
+ * we try for the semaphore to interwork with this
+ * older code.
+ */
+ if (hw->dev_spec._82571.smb_counter > 2)
+ sw_timeout = 1;
+
+ /* Get the SW semaphore */
+ while (i < sw_timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == sw_timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ hw->dev_spec._82571.smb_counter++;
+ }
/* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < fw_timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
@@ -479,9 +559,9 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
usec_delay(50);
}
- if (i == timeout) {
+ if (i == fw_timeout) {
/* Release semaphores */
- e1000_put_hw_semaphore_generic(hw);
+ e1000_put_hw_semaphore_82571(hw);
DEBUGOUT("Driver can't access the NVM\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -497,15 +577,15 @@ out:
*
* Release hardware semaphore used to access the PHY or NVM
**/
-static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
- DEBUGFUNC("e1000_put_hw_semaphore_82571");
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
swsm = E1000_READ_REG(hw, E1000_SWSM);
- swsm &= ~E1000_SWSM_SWESMBI;
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
@@ -531,6 +611,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
break;
default:
@@ -581,6 +662,7 @@ static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
break;
case e1000_82571:
@@ -885,6 +967,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@@ -932,6 +1015,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
msec_delay(25);
break;
@@ -1014,6 +1098,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
e1000_enable_tx_pkt_filtering_generic(hw);
reg_data = E1000_READ_REG(hw, E1000_GCR);
@@ -1096,6 +1181,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~(1 << 29);
@@ -1108,6 +1194,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
/* Extended Device Control */
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~(1 << 23);
@@ -1141,6 +1228,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
reg = E1000_READ_REG(hw, E1000_GCR);
reg |= (1 << 22);
E1000_WRITE_REG(hw, E1000_GCR, reg);
@@ -1180,6 +1268,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
if (hw->mng_cookie.vlan_id != 0) {
/*
@@ -1281,6 +1370,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
if (hw->fc.requested_mode == e1000_fc_default)
hw->fc.requested_mode = e1000_fc_full;
@@ -1301,7 +1391,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
**/
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
- u32 ctrl, led_ctrl;
+ u32 ctrl;
s32 ret_val;
DEBUGFUNC("e1000_setup_copper_link_82571");
@@ -1318,11 +1408,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
break;
case e1000_phy_igp_2:
ret_val = e1000_copper_link_setup_igp(hw);
- /* Setup activity LED */
- led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
break;
default:
ret_val = -E1000_ERR_PHY;
@@ -1372,8 +1457,20 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
* e1000_check_for_serdes_link_82571 - Check for link (Serdes)
* @hw: pointer to the HW structure
*
- * Checks for link up on the hardware. If link is not up and we have
- * a signal, then we need to force link up.
+ * Reports the link state as up or down.
+ *
+ * If autonegotiation is supported by the link partner, the link state is
+ * determined by the result of autongotiation. This is the most likely case.
+ * If autonegotiation is not supported by the link partner, and the link
+ * has a valid signal, force the link up.
+ *
+ * The link state is represented internally here by 4 states:
+ *
+ * 1) down
+ * 2) autoneg_progress
+ * 3) autoneg_complete (the link sucessfully autonegotiated)
+ * 4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
**/
s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
{
@@ -1401,6 +1498,7 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
*/
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = FALSE;
DEBUGOUT("AN_UP -> AN_PROG\n");
}
break;
@@ -1419,28 +1517,35 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
(ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = FALSE;
DEBUGOUT("FORCED_UP -> AN_PROG\n");
}
break;
case e1000_serdes_link_autoneg_progress:
- /*
- * If the LU bit is set in the STATUS register,
- * autoneg has completed sucessfully. If not,
- * try foring the link because the far end may be
- * available but not capable of autonegotiation.
- */
- if (status & E1000_STATUS_LU) {
- mac->serdes_link_state =
- e1000_serdes_link_autoneg_complete;
- DEBUGOUT("AN_PROG -> AN_UP\n");
+ if (rxcw & E1000_RXCW_C) {
+ /* We received /C/ ordered sets, meaning the
+ * link partner has autonegotiated, and we can
+ * trust the Link Up (LU) status bit
+ */
+ if (status & E1000_STATUS_LU) {
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_complete;
+ DEBUGOUT("AN_PROG -> AN_UP\n");
+ mac->serdes_has_link = TRUE;
+ } else {
+ /* Autoneg completed, but failed */
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ DEBUGOUT("AN_PROG -> DOWN\n");
+ }
} else {
- /*
- * Disable autoneg, force link up and
- * full duplex, and change state to forced
+ /* The link partner did not autoneg.
+ * Force link up and full duplex, and change
+ * state to forced.
*/
E1000_WRITE_REG(hw, E1000_TXCW,
- (mac->txcw & ~E1000_TXCW_ANE));
+ (mac->txcw & ~E1000_TXCW_ANE));
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
@@ -1452,10 +1557,10 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
break;
}
mac->serdes_link_state =
- e1000_serdes_link_forced_up;
+ e1000_serdes_link_forced_up;
+ mac->serdes_has_link = TRUE;
DEBUGOUT("AN_PROG -> FORCED_UP\n");
}
- mac->serdes_has_link = TRUE;
break;
case e1000_serdes_link_down:
@@ -1517,6 +1622,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
case e1000_82573:
if(*data == ID_LED_RESERVED_F746)
*data = ID_LED_DEFAULT_82573;
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index d7ed6c8..2f8e8ed 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -38,6 +38,7 @@
* 82575GB Gigabit Network Connection
* 82575GB Gigabit Network Connection
* 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
*/
#include "e1000_api.h"
@@ -77,6 +78,7 @@ static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
/**
* e1000_init_phy_params_82575 - Init PHY func ptrs.
@@ -326,11 +328,12 @@ void e1000_init_function_pointers_82575(struct e1000_hw *hw)
**/
static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
{
- u16 mask;
+ u16 mask = E1000_SWFW_PHY0_SM;
DEBUGFUNC("e1000_acquire_phy_82575");
- mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
return e1000_acquire_swfw_sync_82575(hw, mask);
}
@@ -343,11 +346,13 @@ static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
**/
static void e1000_release_phy_82575(struct e1000_hw *hw)
{
- u16 mask;
+ u16 mask = E1000_SWFW_PHY0_SM;
DEBUGFUNC("e1000_release_phy_82575");
- mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+
e1000_release_swfw_sync_82575(hw, mask);
}
@@ -785,9 +790,8 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
DEBUGFUNC("e1000_get_cfg_done_82575");
- if (hw->bus.func == 1)
+ if (hw->bus.func == E1000_FUNC_1)
mask = E1000_NVM_CFG_DONE_PORT_1;
-
while (timeout) {
if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
break;
@@ -937,13 +941,13 @@ void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
u32 reg;
u16 eeprom_data = 0;
- if (hw->mac.type != e1000_82576 ||
- (hw->phy.media_type != e1000_media_type_fiber &&
- hw->phy.media_type != e1000_media_type_internal_serdes))
+ if (hw->phy.media_type != e1000_media_type_internal_serdes)
return;
- if (hw->bus.func == 0)
+ if (hw->bus.func == E1000_FUNC_0)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ else if (hw->bus.func == E1000_FUNC_1)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
/*
* If APM is not enabled in the EEPROM and management interface is
@@ -970,250 +974,42 @@ void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
}
/**
- * e1000_vmdq_loopback_enable_pf- Enables VM to VM queue loopback replication
- * @hw: pointer to the HW structure
- **/
-void e1000_vmdq_loopback_enable_pf(struct e1000_hw *hw)
-{
- u32 reg;
-
- reg = E1000_READ_REG(hw, E1000_DTXSWC);
- reg |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
-}
-
-/**
- * e1000_vmdq_loopback_disable_pf - Disable VM to VM queue loopbk replication
+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
* @hw: pointer to the HW structure
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality
**/
-void e1000_vmdq_loopback_disable_pf(struct e1000_hw *hw)
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
{
u32 reg;
reg = E1000_READ_REG(hw, E1000_DTXSWC);
- reg &= ~(E1000_DTXSWC_VMDQ_LOOPBACK_EN);
+ if (enable)
+ reg |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ reg &= ~(E1000_DTXSWC_VMDQ_LOOPBACK_EN);
E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
}
/**
- * e1000_vmdq_replication_enable_pf - Enable replication of brdcst & multicst
+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
* @hw: pointer to the HW structure
+ * @enable: state to enter, either enabled or disabled
*
- * Enables replication of broadcast and multicast packets from the network
- * to VM's which have their respective broadcast and multicast accept
- * bits set in the VM Offload Register. This gives the PF driver per
- * VM granularity control over which VM's get replicated broadcast traffic.
+ * enables/disables replication of packets across multiple pools
**/
-void e1000_vmdq_replication_enable_pf(struct e1000_hw *hw, u32 enables)
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
{
u32 reg;
- u32 i;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- if (enables & (1 << i)) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- reg |= (E1000_VMOLR_AUPE |
- E1000_VMOLR_BAM |
- E1000_VMOLR_MPME);
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- }
reg = E1000_READ_REG(hw, E1000_VT_CTL);
- reg |= E1000_VT_CTL_VM_REPL_EN;
- E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
-}
-
-/**
- * e1000_vmdq_replication_disable_pf - Disable replication of brdcst & multicst
- * @hw: pointer to the HW structure
- *
- * Disables replication of broadcast and multicast packets to the VM's.
- **/
-void e1000_vmdq_replication_disable_pf(struct e1000_hw *hw)
-{
- u32 reg;
-
- reg = E1000_READ_REG(hw, E1000_VT_CTL);
- reg &= ~(E1000_VT_CTL_VM_REPL_EN);
- E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
-}
-
-/**
- * e1000_vmdq_enable_replication_mode_pf - Enables replication mode in the device
- * @hw: pointer to the HW structure
- **/
-void e1000_vmdq_enable_replication_mode_pf(struct e1000_hw *hw)
-{
- u32 reg;
-
- reg = E1000_READ_REG(hw, E1000_VT_CTL);
- reg |= E1000_VT_CTL_VM_REPL_EN;
- E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
-}
-
-/**
- * e1000_vmdq_broadcast_replication_enable_pf - Enable replication of brdcst
- * @hw: pointer to the HW structure
- * @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
- *
- * Enables replication of broadcast packets from the network
- * to VM's which have their respective broadcast accept
- * bits set in the VM Offload Register. This gives the PF driver per
- * VM granularity control over which VM's get replicated broadcast traffic.
- **/
-void e1000_vmdq_broadcast_replication_enable_pf(struct e1000_hw *hw,
- u32 enables)
-{
- u32 reg;
- u32 i;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- reg |= E1000_VMOLR_BAM;
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- }
-}
-
-/**
- * e1000_vmdq_broadcast_replication_disable_pf - Disable replication
- * of broadcast packets
- * @hw: pointer to the HW structure
- * @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
- *
- * Disables replication of broadcast packets for specific pools.
- * If bam/mpe is disabled on all pools then replication mode is
- * turned off.
- **/
-void e1000_vmdq_broadcast_replication_disable_pf(struct e1000_hw *hw,
- u32 disables)
-{
- u32 reg;
- u32 i;
- u32 oneenabled = 0;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
- reg &= ~(E1000_VMOLR_BAM);
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- if (!oneenabled && (reg & (E1000_VMOLR_AUPE |
- E1000_VMOLR_BAM |
- E1000_VMOLR_MPME)))
- oneenabled = 1;
- }
- if (!oneenabled) {
- reg = E1000_READ_REG(hw, E1000_VT_CTL);
- reg &= ~(E1000_VT_CTL_VM_REPL_EN);
- E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
- }
-}
-
-/**
- * e1000_vmdq_multicast_promiscuous_enable_pf - Enable promiscuous reception
- * @hw: pointer to the HW structure
- * @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
- *
- * Enables promiscuous reception of multicast packets from the network
- * to VM's which have their respective multicast promiscuous mode enable
- * bits set in the VM Offload Register. This gives the PF driver per
- * VM granularity control over which VM's get all multicast traffic.
- **/
-void e1000_vmdq_multicast_promiscuous_enable_pf(struct e1000_hw *hw,
- u32 enables)
-{
- u32 reg;
- u32 i;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- reg |= E1000_VMOLR_MPME;
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- }
-}
-
-/**
- * e1000_vmdq_multicast_promiscuous_disable_pf - Disable promiscuous
- * reception of multicast packets
- * @hw: pointer to the HW structure
- * @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
- *
- * Disables promiscuous reception of multicast packets for specific pools.
- * If bam/mpe is disabled on all pools then replication mode is
- * turned off.
- **/
-void e1000_vmdq_multicast_promiscuous_disable_pf(struct e1000_hw *hw,
- u32 disables)
-{
- u32 reg;
- u32 i;
- u32 oneenabled = 0;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
- reg &= ~(E1000_VMOLR_MPME);
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- if (!oneenabled && (reg & (E1000_VMOLR_AUPE |
- E1000_VMOLR_BAM |
- E1000_VMOLR_MPME)))
- oneenabled = 1;
- }
- if (!oneenabled) {
- reg = E1000_READ_REG(hw, E1000_VT_CTL);
+ if (enable)
+ reg |= E1000_VT_CTL_VM_REPL_EN;
+ else
reg &= ~(E1000_VT_CTL_VM_REPL_EN);
- E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
- }
-}
-/**
- * e1000_vmdq_aupe_enable_pf - Enable acceptance of untagged packets
- * @hw: pointer to the HW structure
- * @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
- *
- * Enables acceptance of packets from the network which do not have
- * a VLAN tag but match the exact MAC filter of a given VM.
- **/
-void e1000_vmdq_aupe_enable_pf(struct e1000_hw *hw, u32 enables)
-{
- u32 reg;
- u32 i;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- reg |= E1000_VMOLR_AUPE;
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- }
-}
-
-/**
- * e1000_vmdq_aupe_disable_pf - Disable acceptance of untagged packets
- * @hw: pointer to the HW structure
- * @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
- *
- * Disables acceptance of packets from the network which do not have
- * a VLAN tag but match the exact MAC filter of a given VM.
- **/
-void e1000_vmdq_aupe_disable_pf(struct e1000_hw *hw, u32 disables)
-{
- u32 reg;
- u32 i;
-
- for (i = 0; i < MAX_NUM_VFS; i++) {
- if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
- reg = E1000_READ_REG(hw, E1000_VMOLR(i));
- reg &= ~E1000_VMOLR_AUPE;
- E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
- }
- }
+ E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
}
/**
@@ -1238,6 +1034,12 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
DEBUGOUT("PCI-E Master disable polling has failed.\n");
}
+ /* set the completion timeout for interface */
+ ret_val = e1000_set_pcie_completion_timeout(hw);
+ if (ret_val) {
+ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+ }
+
DEBUGOUT("Masking off all interrupts\n");
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
@@ -1333,7 +1135,7 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
**/
static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
{
- u32 ctrl, led_ctrl;
+ u32 ctrl;
s32 ret_val;
bool link;
@@ -1350,11 +1152,6 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
break;
case e1000_phy_igp_3:
ret_val = e1000_copper_link_setup_igp(hw);
- /* Setup activity LED */
- led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
break;
default:
ret_val = -E1000_ERR_PHY;
@@ -1433,15 +1230,14 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
*/
E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
- /* Force link up, set 1gb, set both sw defined pins */
+ /* Force link up, set 1gb */
reg = E1000_READ_REG(hw, E1000_CTRL);
- reg |= E1000_CTRL_SLU |
- E1000_CTRL_SPD_1000 |
- E1000_CTRL_FRCSPD |
- E1000_CTRL_SWDPIN0 |
- E1000_CTRL_SWDPIN1;
+ reg |= E1000_CTRL_SLU | E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD;
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
+ /* set both sw defined pins */
+ reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+ }
E1000_WRITE_REG(hw, E1000_CTRL, reg);
-
/* Power on phy for 82576 fiber adapters */
if (hw->mac.type == e1000_82576) {
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
@@ -1514,7 +1310,6 @@ static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
switch(hw->phy.media_type) {
- case e1000_media_type_fiber:
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
@@ -1605,12 +1400,6 @@ out:
static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
{
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
-
- DEBUGFUNC("e1000_sgmii_active_82575");
-
- if (hw->mac.type != e1000_82575 && hw->mac.type != e1000_82576)
- return FALSE;
-
return dev_spec->sgmii_active;
}
@@ -1762,6 +1551,7 @@ static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
if (hw->phy.media_type == e1000_media_type_internal_serdes)
E1000_READ_REG(hw, E1000_SCVPC);
}
+
/**
* e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
* @hw: pointer to the HW structure
@@ -1836,3 +1626,54 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
E1000_READ_REG(hw, E1000_MPC);
}
+/**
+ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 200ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+ s32 ret_val = E1000_SUCCESS;
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & E1000_GCR_CAP_VER2)) {
+ gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ return ret_val;
+}
+
diff --git a/sys/dev/e1000/e1000_82575.h b/sys/dev/e1000/e1000_82575.h
index 1b7ce89..34e0d29 100644
--- a/sys/dev/e1000/e1000_82575.h
+++ b/sys/dev/e1000/e1000_82575.h
@@ -214,7 +214,7 @@ union e1000_adv_rx_desc {
} wb; /* writeback */
};
-#define E1000_RXDADV_RSSTYPE_MASK 0x0000F000
+#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
#define E1000_RXDADV_RSSTYPE_SHIFT 12
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
@@ -421,21 +421,11 @@ struct e1000_adv_tx_context_desc {
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
#define ALL_QUEUES 0xFFFF
-void e1000_vmdq_loopback_enable_pf(struct e1000_hw *hw);
-void e1000_vmdq_loopback_disable_pf(struct e1000_hw *hw);
-void e1000_vmdq_replication_enable_pf(struct e1000_hw *hw, u32 enables);
-void e1000_vmdq_replication_disable_pf(struct e1000_hw *hw);
-void e1000_vmdq_enable_replication_mode_pf(struct e1000_hw *hw);
-void e1000_vmdq_broadcast_replication_enable_pf(struct e1000_hw *hw,
- u32 enables);
-void e1000_vmdq_multicast_promiscuous_enable_pf(struct e1000_hw *hw,
- u32 enables);
-void e1000_vmdq_broadcast_replication_disable_pf(struct e1000_hw *hw,
- u32 disables);
-void e1000_vmdq_multicast_promiscuous_disable_pf(struct e1000_hw *hw,
- u32 disables);
-void e1000_vmdq_aupe_enable_pf(struct e1000_hw *hw, u32 enables);
-void e1000_vmdq_aupe_disable_pf(struct e1000_hw *hw, u32 disables);
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
#endif /* _E1000_82575_H_ */
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index 1f47d34..8188658 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -213,8 +213,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
mac->type = e1000_82573;
break;
case E1000_DEV_ID_82574L:
+ case E1000_DEV_ID_82574LA:
mac->type = e1000_82574;
break;
+ case E1000_DEV_ID_82583V:
+ mac->type = e1000_82583;
+ break;
case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
@@ -248,6 +252,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_ICH10_D_BM_LF:
mac->type = e1000_ich10lan;
break;
+ case E1000_DEV_ID_PCH_D_HV_DM:
+ case E1000_DEV_ID_PCH_D_HV_DC:
+ case E1000_DEV_ID_PCH_M_HV_LM:
+ case E1000_DEV_ID_PCH_M_HV_LC:
+ mac->type = e1000_pchlan;
+ break;
case E1000_DEV_ID_82575EB_COPPER:
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82575GB_QUAD_COPPER:
@@ -259,6 +269,7 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
mac->type = e1000_82576;
break;
default:
@@ -337,6 +348,7 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_82572:
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
e1000_init_function_pointers_82571(hw);
break;
case e1000_80003es2lan:
@@ -345,6 +357,7 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
+ case e1000_pchlan:
e1000_init_function_pointers_ich8lan(hw);
break;
case e1000_82575:
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index 1f6c198..d845fb2 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -64,6 +64,8 @@
#define E1000_WUFC_FLX1_PHY 0x00002000 /* Flexible Filter 1 Enable */
#define E1000_WUFC_FLX2_PHY 0x00004000 /* Flexible Filter 2 Enable */
#define E1000_WUFC_FLX3_PHY 0x00008000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_FLX4_PHY 0x00000200 /* Flexible Filter 4 Enable */
+#define E1000_WUFC_FLX5_PHY 0x00000400 /* Flexible Filter 5 Enable */
#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
@@ -74,9 +76,13 @@
#define E1000_WUFC_ALL_FILTERS_PHY_4 0x0000F0FF /*Mask for all wakeup filters*/
#define E1000_WUFC_FLX_OFFSET_PHY 12 /* Offset to the Flexible Filters bits */
#define E1000_WUFC_FLX_FILTERS_PHY_4 0x0000F000 /*Mask for 4 flexible filters*/
+#define E1000_WUFC_ALL_FILTERS_PHY_6 0x0000F6FF /*Mask for 6 wakeup filters */
+#define E1000_WUFC_FLX_FILTERS_PHY_6 0x0000F600 /*Mask for 6 flexible filters*/
#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask for all 6 wakeup filters*/
#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
#define E1000_WUFC_FLX_FILTERS 0x000F0000 /*Mask for the 4 flexible filters */
+#define E1000_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flexible filters */
/*
* For 82576 to utilize Extended filter masks in addition to
* existing (filter) masks
@@ -101,13 +107,21 @@
#define E1000_WUS_FLX1 E1000_WUFC_FLX1
#define E1000_WUS_FLX2 E1000_WUFC_FLX2
#define E1000_WUS_FLX3 E1000_WUFC_FLX3
+#define E1000_WUS_FLX4 E1000_WUFC_FLX4
+#define E1000_WUS_FLX5 E1000_WUFC_FLX5
+#define E1000_WUS_FLX4_PHY E1000_WUFC_FLX4_PHY
+#define E1000_WUS_FLX5_PHY E1000_WUFC_FLX5_PHY
#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS
+#define E1000_WUS_FLX_FILTERS_6 E1000_WUFC_FLX_FILTERS_6
+#define E1000_WUS_FLX_FILTERS_PHY_6 E1000_WUFC_FLX_FILTERS_PHY_6
/* Wake Up Packet Length */
#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
/* Four Flexible Filters are supported */
#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+/* Six Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX_6 6
/* Two Extended Flexible Filters are supported (82576) */
#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
@@ -117,6 +131,7 @@
#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFLT_SIZE_6 E1000_FLEXIBLE_FILTER_COUNT_MAX_6
#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
@@ -162,7 +177,7 @@
#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
/* IAME enable bit (27) was removed in >= 82575 */
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */
#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error
* detection enabled */
#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity
@@ -170,6 +185,7 @@
#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_REG_ADDR 0x00FF0000
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
@@ -300,8 +316,8 @@
#define E1000_RCTL_RST 0x00000001 /* Software reset */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
@@ -309,9 +325,9 @@
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min thresh size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min thresh size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min thresh size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
@@ -366,10 +382,10 @@
#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* SWFW_SYNC Definitions */
-#define E1000_SWFW_EEP_SM 0x1
-#define E1000_SWFW_PHY0_SM 0x2
-#define E1000_SWFW_PHY1_SM 0x4
-#define E1000_SWFW_CSR_SM 0x8
+#define E1000_SWFW_EEP_SM 0x01
+#define E1000_SWFW_PHY0_SM 0x02
+#define E1000_SWFW_PHY1_SM 0x04
+#define E1000_SWFW_CSR_SM 0x08
/* FACTPS Definitions */
#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */
@@ -377,7 +393,7 @@
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
@@ -475,8 +491,9 @@
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
-#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state.
* Clear on write '0'. */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
@@ -498,9 +515,9 @@
#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
/* Constants used to interpret the masked PCI-X bus speed. */
-#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
-#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
-#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
#define SPEED_10 10
#define SPEED_100 100
@@ -532,6 +549,11 @@
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK 0x00000007
+#define E1000_PHY_LED0_IVRT 0x00000008
+#define E1000_PHY_LED0_BLINK 0x00000010
+#define E1000_PHY_LED0_MASK 0x0000001F
+
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020
@@ -690,7 +712,7 @@
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
/* PBA constants */
-#define E1000_PBA_6K 0x0006 /* 6KB */
+#define E1000_PBA_6K 0x0006 /* 6KB */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_10K 0x000A /* 10KB */
#define E1000_PBA_12K 0x000C /* 12KB */
@@ -725,6 +747,8 @@
#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
@@ -749,7 +773,7 @@
* should claim the interrupt */
#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
-#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
@@ -769,9 +793,9 @@
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
-#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */
+#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
-#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */
+#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */
/* Extended Interrupt Cause Read */
#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
@@ -817,7 +841,7 @@
E1000_IMS_LSC)
/* Interrupt Mask Set */
-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
@@ -871,7 +895,7 @@
#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
/* Interrupt Cause Set */
-#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */
#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
@@ -1012,6 +1036,7 @@
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
@@ -1019,6 +1044,10 @@
#define E1000_GCR_TXD_NO_SNOOP 0x00000008
#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
E1000_GCR_RXDSCW_NO_SNOOP | \
@@ -1099,7 +1128,7 @@
/* 0=DTE device */
#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
/* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
/* 0=Automatic Master/Slave config */
#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
@@ -1109,7 +1138,7 @@
/* 1000BASE-T Status Register */
#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
-#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
@@ -1134,6 +1163,8 @@
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
@@ -1168,7 +1199,7 @@
#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */
#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */
-#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_NVM_RW_REG_START 1 /* Start operation */
#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
@@ -1194,8 +1225,8 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
-#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
-#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
/* Mask bits for fields in Word 0x0f of the NVM */
#define NVM_WORD0F_PAUSE_MASK 0x3000
@@ -1273,6 +1304,7 @@
#define PCIX_STATUS_REGISTER_HI 0xEA
#define PCI_HEADER_TYPE_REGISTER 0x0E
#define PCIE_LINK_STATUS 0x12
+#define PCIE_DEVICE_CONTROL2 0x28
#define PCIX_COMMAND_MMRBC_MASK 0x000C
#define PCIX_COMMAND_MMRBC_SHIFT 0x2
@@ -1284,6 +1316,7 @@
#define PCI_HEADER_TYPE_MULTIFUNC 0x80
#define PCIE_LINK_WIDTH_MASK 0x3F0
#define PCIE_LINK_WIDTH_SHIFT 4
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#ifndef ETH_ADDR_LEN
#define ETH_ADDR_LEN 6
@@ -1311,6 +1344,8 @@
#define IFE_C_E_PHY_ID 0x02A80310
#define BME1000_E_PHY_ID 0x01410CB0
#define BME1000_E_PHY_ID_R2 0x01410CB1
+#define I82577_E_PHY_ID 0x01540050
+#define I82578_E_PHY_ID 0x004DD040
#define IGP04E1000_E_PHY_ID 0x02A80391
#define M88_VENDOR 0x0141
@@ -1330,11 +1365,11 @@
/* M88E1000 PHY Specific Control Register */
#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
/* 1=CLK125 low, 0=CLK125 toggling */
#define M88E1000_PSCR_CLK125_DISABLE 0x0010
-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
/* Manual MDI configuration */
#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
@@ -1350,7 +1385,7 @@
#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
/* M88E1000 PHY Specific Status Register */
#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
@@ -1418,6 +1453,9 @@
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
+#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
+
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
@@ -1536,4 +1574,6 @@
#define E1000_LSECRXCTRL_RP 0x00000080
#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+
#endif /* _E1000_DEFINES_H_ */
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index d92ea40..6afa4fbb 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -95,6 +95,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
@@ -120,11 +121,16 @@ struct e1000_hw;
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
@@ -159,10 +165,12 @@ enum e1000_mac_type {
e1000_82572,
e1000_82573,
e1000_82574,
+ e1000_82583,
e1000_80003es2lan,
e1000_ich8lan,
e1000_ich9lan,
e1000_ich10lan,
+ e1000_pchlan,
e1000_82575,
e1000_82576,
e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */
@@ -203,6 +211,8 @@ enum e1000_phy_type {
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_bm,
+ e1000_phy_82578,
+ e1000_phy_82577,
e1000_phy_vf,
};
@@ -647,9 +657,9 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
-#define MAX_MTA_REG 128 /* this must be the maximum size of the MTA register
- * table in all supported adapters
- */
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
@@ -755,6 +765,7 @@ struct e1000_dev_spec_82543 {
struct e1000_dev_spec_82571 {
bool laa_is_present;
+ u32 smb_counter;
};
struct e1000_shadow_ram {
@@ -823,6 +834,7 @@ struct e1000_hw {
void e1000_pci_clear_mwi(struct e1000_hw *hw);
void e1000_pci_set_mwi(struct e1000_hw *hw);
s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
diff --git a/sys/dev/e1000/e1000_ich8lan.c b/sys/dev/e1000/e1000_ich8lan.c
index 1225e55..540f577 100644
--- a/sys/dev/e1000/e1000_ich8lan.c
+++ b/sys/dev/e1000/e1000_ich8lan.c
@@ -54,19 +54,22 @@
* 82567LF-3 Gigabit Network Connection
* 82567LM-3 Gigabit Network Connection
* 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
*/
#include "e1000_api.h"
static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
-static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
-static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw);
static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
@@ -81,6 +84,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
u16 *data);
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
@@ -91,6 +95,10 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
@@ -155,6 +163,55 @@ union ich8_hws_flash_regacc {
};
/**
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_phy_params_pchlan");
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_swflag_ich8lan;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
+ phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
+ phy->ops.get_info = e1000_get_phy_info_ich8lan;
+ phy->ops.read_reg = e1000_read_phy_reg_hv;
+ phy->ops.release = e1000_release_swflag_ich8lan;
+ phy->ops.reset = e1000_phy_hw_reset_ich8lan;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
+ phy->ops.write_reg = e1000_write_phy_reg_hv;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ phy->id = e1000_phy_unknown;
+ e1000_get_phy_id(hw);
+ phy->type = e1000_get_phy_type_from_id(phy->id);
+
+ if (phy->type == e1000_phy_82577) {
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_init_phy_params_ich8lan - Initialize PHY function pointers
* @hw: pointer to the HW structure
*
@@ -172,9 +229,9 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->reset_delay_us = 100;
phy->ops.acquire = e1000_acquire_swflag_ich8lan;
- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
phy->ops.get_info = e1000_get_phy_info_ich8lan;
@@ -250,6 +307,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ union ich8_hws_flash_status hsfsts;
u32 gfpreg, sector_base_addr, sector_end_addr;
s32 ret_val = E1000_SUCCESS;
u16 i;
@@ -288,6 +346,20 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
+ /*
+ * Make sure the flash bank size does not overwrite the 4k
+ * sector ranges. We may have 64k allotted to us but we only care
+ * about the first 2 4k sectors. Therefore, if we have anything less
+ * than 64k set in the HSFSTS register, we will reduce the bank size
+ * down to 4k and let the rest remain unused. If berasesz == 3, then
+ * we are working in 64k mode. Otherwise we are not.
+ */
+ if (nvm->flash_bank_size > E1000_SHADOW_RAM_WORDS) {
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.berasesz != 3)
+ nvm->flash_bank_size = E1000_SHADOW_RAM_WORDS;
+ }
+
nvm->word_size = E1000_SHADOW_RAM_WORDS;
/* Clear shadow ram */
@@ -319,9 +391,7 @@ out:
static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
-#if defined(NAHUM4) && !defined(NO_PCH_A_SUPPORT)
u16 pci_cfg;
-#endif
DEBUGFUNC("e1000_init_mac_params_ich8lan");
@@ -383,6 +453,20 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan;
break;
+ case e1000_pchlan:
+ /* save PCH revision_id */
+ e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
+ hw->revision_id = (u8)(pci_cfg &= 0x000F);
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_pchlan;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_pchlan;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_pchlan;
+ mac->ops.led_off = e1000_led_off_pchlan;
+ break;
default:
break;
}
@@ -407,7 +491,18 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
- hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
+ break;
+ case e1000_pchlan:
+ hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
+ break;
+ default:
+ break;
+ }
}
/**
@@ -427,18 +522,21 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
while (timeout) {
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
- E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
- break;
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+ }
msec_delay_irq(1);
timeout--;
}
if (!timeout) {
- DEBUGOUT("FW or HW has locked the resource for too long.\n");
+ DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
ret_val = -E1000_ERR_CONFIG;
@@ -511,77 +609,152 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex
+ * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
* @hw: pointer to the HW structure
- *
- * Forces the speed and duplex settings of the PHY.
- * This is a function pointer entry point only called by
- * PHY setup routines.
**/
-static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
+s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- bool link;
+ if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
+ return E1000_SUCCESS;
- DEBUGFUNC("e1000_phy_force_speed_duplex_ich8lan");
+ return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
+}
- if (phy->type != e1000_phy_ife) {
- ret_val = e1000_phy_force_speed_duplex_igp(hw);
- goto out;
+/**
+ * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mac.type != e1000_pchlan)
+ return ret_val;
+
+ /* Hanksville M Phy init for IEEE. */
+ if ((hw->revision_id == 2) &&
+ (hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
+ hw->phy.ops.write_reg(hw, 0x10, 0x8823);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0018);
+ hw->phy.ops.write_reg(hw, 0x10, 0x8824);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0016);
+ hw->phy.ops.write_reg(hw, 0x10, 0x8825);
+ hw->phy.ops.write_reg(hw, 0x11, 0x001A);
+ hw->phy.ops.write_reg(hw, 0x10, 0x888C);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0007);
+ hw->phy.ops.write_reg(hw, 0x10, 0x888D);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0007);
+ hw->phy.ops.write_reg(hw, 0x10, 0x888E);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0007);
+ hw->phy.ops.write_reg(hw, 0x10, 0x8827);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0001);
+ hw->phy.ops.write_reg(hw, 0x10, 0x8835);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0001);
+ hw->phy.ops.write_reg(hw, 0x10, 0x8834);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0001);
+ hw->phy.ops.write_reg(hw, 0x10, 0x8833);
+ hw->phy.ops.write_reg(hw, 0x11, 0x0002);
}
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
- if (ret_val)
- goto out;
+ if (((hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+ ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+ /* Disable generation of early preamble */
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ return ret_val;
- e1000_phy_force_speed_duplex_setup(hw, &data);
+ /* Preamble tuning for SSC */
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
+ if (ret_val)
+ return ret_val;
+ }
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
- if (ret_val)
- goto out;
+ if (hw->phy.type == e1000_phy_82578) {
+ if (hw->revision_id < 3) {
+ /* PHY config */
+ ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
+ 0x66C0);
+ if (ret_val)
+ return ret_val;
- /* Disable MDI-X support for 10/100 */
- ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
- if (ret_val)
- goto out;
+ /* PHY config */
+ ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ }
- data &= ~IFE_PMC_AUTO_MDIX;
- data &= ~IFE_PMC_FORCE_MDIX;
+ /*
+ * Return registers to default by doing a soft reset then
+ * writing 0x3140 to the control register.
+ */
+ if (hw->phy.revision < 2) {
+ e1000_phy_sw_reset_generic(hw);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
+ 0x3140);
+ }
+ }
- ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
- if (ret_val)
- goto out;
+ if ((hw->revision_id == 2) &&
+ (hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
+ /*
+ * Workaround for OEM (GbE) not operating after reset -
+ * restart AN (twice)
+ */
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
+ if (ret_val)
+ return ret_val;
+ }
- DEBUGOUT1("IFE PMC: %X\n", data);
+ /* Select page 0 */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ hw->phy.addr = 1;
+ e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
- usec_delay(1);
+ return ret_val;
+}
- if (phy->autoneg_wait_to_complete) {
- DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
- ret_val = e1000_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
- if (ret_val)
- goto out;
+ DEBUGFUNC("e1000_lan_init_done_ich8lan");
- if (!link)
- DEBUGOUT("Link taking longer than expected.\n");
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = E1000_READ_REG(hw, E1000_STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ usec_delay(100);
+ } while ((!data) && --loop);
- /* Try once more */
- ret_val = e1000_phy_has_link_generic(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
- if (ret_val)
- goto out;
- }
+ /*
+ * If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
-out:
- return ret_val;
+ /* Clear the Init Done bit for the next init event */
+ data = E1000_READ_REG(hw, E1000_STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ E1000_WRITE_REG(hw, E1000_STATUS, data);
}
/**
@@ -597,7 +770,6 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val;
- u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
@@ -606,6 +778,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto out;
+ /* Allow time for h/w to get to a quiescent state after reset */
+ msec_delay(10);
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ }
+
/*
* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
@@ -625,25 +806,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (!(data & sw_cfg_mask))
goto out;
- /* Wait for basic configuration completes before proceeding*/
- do {
- data = E1000_READ_REG(hw, E1000_STATUS);
- data &= E1000_STATUS_LAN_INIT_DONE;
- usec_delay(100);
- } while ((!data) && --loop);
-
- /*
- * If basic configuration is incomplete before the above loop
- * count reaches 0, loading the configuration from NVM will
- * leave the PHY in a bad state possibly resulting in no link.
- */
- if (loop == 0)
- DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
-
- /* Clear the Init Done bit for the next init event */
- data = E1000_READ_REG(hw, E1000_STATUS);
- data &= ~E1000_STATUS_LAN_INIT_DONE;
- E1000_WRITE_REG(hw, E1000_STATUS, data);
+ /* Wait for basic configuration completes before proceeding */
+ e1000_lan_init_done_ich8lan(hw);
/*
* Make sure HW does not configure LCD from PHY
@@ -714,6 +878,8 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
break;
case e1000_phy_igp_3:
case e1000_phy_bm:
+ case e1000_phy_82578:
+ case e1000_phy_82577:
ret_val = e1000_get_phy_info_igp(hw);
break;
default:
@@ -757,7 +923,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
? FALSE : TRUE;
if (phy->polarity_correction) {
- ret_val = e1000_check_polarity_ife_ich8lan(hw);
+ ret_val = e1000_check_polarity_ife(hw);
if (ret_val)
goto out;
} else {
@@ -783,43 +949,6 @@ out:
}
/**
- * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
- * @hw: pointer to the HW structure
- *
- * Polarity is determined on the polarity reversal feature being enabled.
- * This function is only called by other family-specific
- * routines.
- **/
-static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_data, offset, mask;
-
- DEBUGFUNC("e1000_check_polarity_ife_ich8lan");
-
- /*
- * Polarity is determined based on the reversal feature being enabled.
- */
- if (phy->polarity_correction) {
- offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
- mask = IFE_PESC_POLARITY_REVERSED;
- } else {
- offset = IFE_PHY_SPECIAL_CONTROL;
- mask = IFE_PSC_FORCE_POLARITY;
- }
-
- ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
- if (!ret_val)
- phy->cable_polarity = (phy_data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
-
- return ret_val;
-}
-
-/**
* e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: TRUE to enable LPLU, FALSE to disable
@@ -850,12 +979,14 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ goto out;
+
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
- if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3))
+ if (hw->mac.type == e1000_ich8lan)
e1000_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
@@ -872,6 +1003,9 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ goto out;
+
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
@@ -938,6 +1072,10 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (!active) {
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ goto out;
+
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
@@ -977,12 +1115,14 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+ if (phy->type != e1000_phy_igp_3)
+ goto out;
+
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
- if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3))
+ if (hw->mac.type == e1000_ich8lan)
e1000_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
@@ -1818,7 +1958,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
break;
case 1:
sector_size = ICH_FLASH_SEG_SIZE_4K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
break;
case 2:
if (hw->mac.type == e1000_ich9lan) {
@@ -1831,7 +1971,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
break;
case 3:
sector_size = ICH_FLASH_SEG_SIZE_64K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
break;
default:
ret_val = -E1000_ERR_NVM;
@@ -1922,6 +2062,81 @@ out:
}
/**
+ * e1000_id_led_init_pchlan - store LED configurations
+ * @hw: pointer to the HW structure
+ *
+ * PCH does not control LEDs via the LEDCTL register, rather it uses
+ * the PHY LED configuration register.
+ *
+ * PCH also does not have an "always on" or "always off" mode which
+ * complicates the ID feature. Instead of using the "on" mode to indicate
+ * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
+ * use "link_up" mode. The LEDs will still ID on request if there is no
+ * link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+ u16 data, i, temp, shift;
+
+ DEBUGFUNC("e1000_id_led_init_pchlan");
+
+ /* Get default ID LED modes */
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ goto out;
+
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+ shift = (i * 5);
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_on << shift);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_on << shift);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_get_bus_info_ich8lan - Get/Set the bus type and width
* @hw: pointer to the HW structure
*
@@ -1996,6 +2211,13 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = E1000_READ_REG(hw, E1000_CTRL);
if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) {
+ /* Clear PHY Reset Asserted bit */
+ if (hw->mac.type >= e1000_pchlan) {
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+ E1000_WRITE_REG(hw, E1000_STATUS, status &
+ ~E1000_STATUS_PHYRA);
+ }
+
/*
* PHY HW reset requires MAC CORE reset at the same
* time to make sure the interface between MAC and the
@@ -2008,14 +2230,24 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
msec_delay(20);
- ret_val = e1000_get_auto_rd_done_generic(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- DEBUGOUT("Auto Read Done did not complete\n");
+ if (!ret_val)
+ e1000_release_swflag_ich8lan(hw);
+
+ if (ctrl & E1000_CTRL_PHY_RST)
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
}
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
@@ -2025,6 +2257,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
return ret_val;
}
@@ -2065,6 +2300,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Reading the BM_WUC register will clear the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ hw->phy.ops.read_reg(hw, BM_WUC, &i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Setup link and flow control */
ret_val = mac->ops.setup_link(hw);
@@ -2122,6 +2369,9 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Extended Device Control */
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg |= (1 << 22);
+ /* Enable PHY low-power state when MAC is at D3 w/o WoL */
+ if (hw->mac.type >= e1000_pchlan)
+ reg |= E1000_CTRL_EXT_PHYPDEN;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Transmit Descriptor Control 0 */
@@ -2202,6 +2452,14 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
goto out;
E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ret_val = hw->phy.ops.write_reg(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
+ if (ret_val)
+ goto out;
+ }
ret_val = e1000_set_fc_watermarks_generic(hw);
@@ -2235,16 +2493,19 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
* and increase the max iterations when polling the phy;
* this fixes erroneous timeouts at 10Mbps.
*/
- ret_val = e1000_write_kmrn_reg_generic(hw, GG82563_REG(0x34, 4),
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_TIMEOUTS,
0xFFFF);
if (ret_val)
goto out;
- ret_val = e1000_read_kmrn_reg_generic(hw, GG82563_REG(0x34, 9),
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
&reg_data);
if (ret_val)
goto out;
reg_data |= 0x3F;
- ret_val = e1000_write_kmrn_reg_generic(hw, GG82563_REG(0x34, 9),
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
reg_data);
if (ret_val)
goto out;
@@ -2256,10 +2517,16 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
goto out;
break;
case e1000_phy_bm:
+ case e1000_phy_82578:
ret_val = e1000_copper_link_setup_m88(hw);
if (ret_val)
goto out;
break;
+ case e1000_phy_82577:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ goto out;
+ break;
case e1000_phy_ife:
ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
&reg_data);
@@ -2543,10 +2810,15 @@ void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_ich9lan:
case e1000_ich10lan:
+ case e1000_pchlan:
phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_GBE_DISABLE;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ /* Workaround SWFLAG unexpectedly set during S0->Sx */
+ if (hw->mac.type == e1000_pchlan)
+ usec_delay(500);
default:
break;
}
@@ -2619,6 +2891,100 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_setup_led_pchlan - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use.
+ **/
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_led_pchlan");
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ * e1000_cleanup_led_pchlan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_pchlan");
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_default);
+}
+
+/**
+ * e1000_led_on_pchlan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode2;
+ u32 i, led;
+
+ DEBUGFUNC("e1000_led_on_pchlan");
+
+ /*
+ * If no link, then turn LED on by setting the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode2.
+ */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_led_off_pchlan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode1;
+ u32 i, led;
+
+ DEBUGFUNC("e1000_led_off_pchlan");
+
+ /*
+ * If no link, then turn LED off by clearing the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode1.
+ */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
* e1000_get_cfg_done_ich8lan - Read config done bit
* @hw: pointer to the HW structure
*
@@ -2633,10 +2999,21 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u32 bank = 0;
+ if (hw->mac.type >= e1000_pchlan) {
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if (status & E1000_STATUS_PHYRA) {
+ E1000_WRITE_REG(hw, E1000_STATUS, status &
+ ~E1000_STATUS_PHYRA);
+ } else
+ DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
+ }
+
e1000_get_cfg_done_generic(hw);
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if (hw->mac.type != e1000_ich10lan) {
+ if ((hw->mac.type != e1000_ich10lan) &&
+ (hw->mac.type != e1000_pchlan)) {
if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000_phy_init_script_igp3(hw);
@@ -2678,6 +3055,8 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
**/
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
{
+ u16 phy_data;
+
DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
e1000_clear_hw_cntrs_base_generic(hw);
@@ -2695,5 +3074,24 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
E1000_READ_REG(hw, E1000_IAC);
E1000_READ_REG(hw, E1000_ICRXOC);
+
+ /* Clear PHY statistics registers */
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+ }
}
diff --git a/sys/dev/e1000/e1000_ich8lan.h b/sys/dev/e1000/e1000_ich8lan.h
index 6316021..5416eeb 100644
--- a/sys/dev/e1000/e1000_ich8lan.h
+++ b/sys/dev/e1000/e1000_ich8lan.h
@@ -121,6 +121,25 @@
#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_SCC_UPPER PHY_REG(778, 16) /* Single Collision Count */
+#define HV_SCC_LOWER PHY_REG(778, 17)
+#define HV_ECOL_UPPER PHY_REG(778, 18) /* Excessive Collision Count */
+#define HV_ECOL_LOWER PHY_REG(778, 19)
+#define HV_MCC_UPPER PHY_REG(778, 20) /* Multiple Collision Count */
+#define HV_MCC_LOWER PHY_REG(778, 21)
+#define HV_LATECOL_UPPER PHY_REG(778, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(778, 24)
+#define HV_COLC_UPPER PHY_REG(778, 25) /* Collision Count */
+#define HV_COLC_LOWER PHY_REG(778, 26)
+#define HV_DC_UPPER PHY_REG(778, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(778, 28)
+#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(778, 30)
+
/*
* Additional interrupts need to be handled for ICH family:
* DSW = The FW changed the status of the DISSW bit in FWSM
@@ -150,8 +169,6 @@ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw);
-#if defined(HANKSVILLE_HW) && !defined(NO_PCH_A_SUPPORT)
s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-#endif
#endif
diff --git a/sys/dev/e1000/e1000_mac.c b/sys/dev/e1000/e1000_mac.c
index 435d14e..db6e5f52 100644
--- a/sys/dev/e1000/e1000_mac.c
+++ b/sys/dev/e1000/e1000_mac.c
@@ -750,6 +750,12 @@ s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
mac->get_link_status = FALSE;
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
/*
* Check if there was DownShift, must be checked
* immediately after link-up
diff --git a/sys/dev/e1000/e1000_osdep.c b/sys/dev/e1000/e1000_osdep.c
index b902685..feaf47a 100644
--- a/sys/dev/e1000/e1000_osdep.c
+++ b/sys/dev/e1000/e1000_osdep.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,13 +41,13 @@
*/
void
-e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
pci_write_config(((struct e1000_osdep *)hw->back)->dev, reg, *value, 2);
}
void
-e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
*value = pci_read_config(((struct e1000_osdep *)hw->back)->dev, reg, 2);
}
@@ -70,12 +70,26 @@ e1000_pci_clear_mwi(struct e1000_hw *hw)
* Read the PCI Express capabilities
*/
int32_t
-e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
- u32 result;
+ device_t dev = ((struct e1000_osdep *)hw->back)->dev;
+ u32 offset;
- pci_find_extcap(((struct e1000_osdep *)hw->back)->dev,
- reg, &result);
- *value = (u16)result;
+ pci_find_extcap(dev, PCIY_EXPRESS, &offset);
+ *value = pci_read_config(dev, offset + reg, 2);
+ return (E1000_SUCCESS);
+}
+
+/*
+ * Write the PCI Express capabilities
+ */
+int32_t
+e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ device_t dev = ((struct e1000_osdep *)hw->back)->dev;
+ u32 offset;
+
+ pci_find_extcap(dev, PCIY_EXPRESS, &offset);
+ pci_write_config(dev, offset + reg, *value, 2);
return (E1000_SUCCESS);
}
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index 751edbb..513f2e6 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -37,6 +37,10 @@
static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read);
+static u32 e1000_get_phy_addr_for_hv_page(u32 page);
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
+
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] =
{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
@@ -161,26 +165,46 @@ s32 e1000_get_phy_id(struct e1000_hw *hw)
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
u16 phy_id;
+ u16 retry_count = 0;
DEBUGFUNC("e1000_get_phy_id");
if (!(phy->ops.read_reg))
goto out;
- ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
- if (ret_val)
- goto out;
+ while (retry_count < 2) {
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
- phy->id = (u32)(phy_id << 16);
- usec_delay(20);
- ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
- if (ret_val)
- goto out;
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+ if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+ goto out;
- phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
- phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+ /*
+ * If the PHY ID is still unknown, we may have an 82577 without link.
+ * We will try again after setting Slow MDIC mode. No harm in trying
+ * again in this case since the PHY ID is unknown at this point anyway
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
+ if (ret_val)
+ goto out;
+ retry_count++;
+ }
out:
+ /* Revert to MDIO fast mode, if applicable */
+ if (retry_count)
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
+
return ret_val;
}
@@ -237,12 +261,10 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
-#if defined(HANKSVILLE_HW) && !defined(NO_PCH_A_SUPPORT)
/* Workaround for Si errata */
- if ((hw->phy.type == e1000_phy_lsi) && (hw->revision_id <= 2 ))
+ if ((hw->phy.type == e1000_phy_82577) && (hw->revision_id <= 2))
msec_delay(10);
-#endif /* HANKSVILLE_HW && !NO_PCH_A_SUPPORT */
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
@@ -298,12 +320,10 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
-#if defined(HANKSVILLE_HW) && !defined(NO_PCH_A_SUPPORT)
/* Workaround for Si errata */
- if ((hw->phy.type == e1000_phy_lsi) && (hw->revision_id <= 2))
+ if ((hw->phy.type == e1000_phy_82577) && (hw->revision_id <= 2))
msec_delay(10);
-#endif /* HANKSVILLE_HW && !NO_PCH_A_SUPPORT */
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
@@ -551,6 +571,50 @@ out:
}
/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_82577");
+
+ if (phy->reset_disable) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Set number of link attempts before downshift */
+ ret_val = phy->ops.read_reg(hw, I82577_CTRL_REG, &phy_data);
+ if (ret_val)
+ goto out;
+ phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
+ ret_val = phy->ops.write_reg(hw, I82577_CTRL_REG, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
* @hw: pointer to the HW structure
*
@@ -675,6 +739,21 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
goto out;
}
+ if (phy->type == e1000_phy_82578) {
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ /* 82578 PHY - set the downshift count to 1x. */
+ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+ phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ goto out;
+ }
+
out:
return ret_val;
}
@@ -1298,6 +1377,80 @@ out:
}
/**
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+ if (phy->type != e1000_phy_ife) {
+ ret_val = e1000_phy_force_speed_duplex_igp(hw);
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+ if (ret_val)
+ goto out;
+
+ e1000_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+ if (ret_val)
+ goto out;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ goto out;
+
+ DEBUGOUT1("IFE PMC: %X\n", data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
* @hw: pointer to the HW structure
* @phy_ctrl: pointer to current value of PHY_CONTROL
@@ -1471,6 +1624,8 @@ s32 e1000_check_downshift_generic(struct e1000_hw *hw)
case e1000_phy_m88:
case e1000_phy_gg82563:
case e1000_phy_bm:
+ case e1000_phy_82578:
+ case e1000_phy_82577:
offset = M88E1000_PHY_SPEC_STATUS;
mask = M88E1000_PSSR_DOWNSHIFT;
break;
@@ -1572,6 +1727,41 @@ out:
}
/**
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_ife");
+
+ /*
+ * Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = (phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
* e1000_wait_autoneg_generic - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
@@ -1635,9 +1825,16 @@ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val)
- break;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val) {
+ /*
+ * If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ usec_delay(usec_interval);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &phy_status);
+ }
if (ret_val)
break;
if (phy_status & MII_SR_LINK_STATUS)
@@ -2130,6 +2327,12 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
case BME1000_E_PHY_ID_R2:
phy_type = e1000_phy_bm;
break;
+ case I82578_E_PHY_ID:
+ phy_type = e1000_phy_82578;
+ break;
+ case I82577_E_PHY_ID:
+ phy_type = e1000_phy_82577;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
@@ -2447,6 +2650,11 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
DEBUGFUNC("e1000_access_phy_wakeup_reg_bm");
+ /* Gig must be disabled for MDIO accesses to page 800 */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ DEBUGOUT("Attempting to access page 800 while gig enabled.\n");
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
DEBUGOUT("Could not acquire PHY\n");
@@ -2566,3 +2774,545 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
msec_delay(1);
}
+
+s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 data = 0;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
+ hw->phy.addr = 1;
+ ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ return ret_val;
+ }
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_CS_CTRL1,
+ (0x2180 | (slow << 10)));
+
+ /* dummy read when reverting to fast mode - throw away result */
+ if (!slow)
+ e1000_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ bool in_slow_mode = FALSE;
+
+ DEBUGFUNC("e1000_read_phy_reg_hv");
+
+ /* Workaround failure in MDIO access while cable is disconnected */
+ if ((hw->phy.type == e1000_phy_82577) &&
+ !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
+ if (ret_val)
+ goto out;
+
+ in_slow_mode = TRUE;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
+ data, TRUE);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ data, TRUE);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ if ((hw->phy.type != e1000_phy_82578) ||
+ ((reg != I82578_ADDR_REG) &&
+ (reg != I82578_ADDR_REG + 1))) {
+ u32 phy_addr = hw->phy.addr;
+
+ hw->phy.addr = 1;
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (page << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ goto out;
+ }
+ hw->phy.addr = phy_addr;
+ }
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+ hw->phy.ops.release(hw);
+
+out:
+ /* Revert to MDIO fast mode, if applicable */
+ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ bool in_slow_mode = FALSE;
+
+ DEBUGFUNC("e1000_write_phy_reg_hv");
+
+ /* Workaround failure in MDIO access while cable is disconnected */
+ if ((hw->phy.type == e1000_phy_82577) &&
+ !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
+ if (ret_val)
+ goto out;
+
+ in_slow_mode = TRUE;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
+ &data, FALSE);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ &data, FALSE);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ /*
+ * Workaround MDIO accesses being disabled after entering IEEE Power
+ * Down (whenever bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
+ (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+ hw->phy.ops.release(hw);
+ ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
+ &data2, FALSE);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ if ((hw->phy.type != e1000_phy_82578) ||
+ ((reg != I82578_ADDR_REG) &&
+ (reg != I82578_ADDR_REG + 1))) {
+ u32 phy_addr = hw->phy.addr;
+
+ hw->phy.addr = 1;
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (page << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ goto out;
+ }
+ hw->phy.addr = phy_addr;
+ }
+ }
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+ hw->phy.ops.release(hw);
+
+out:
+ /* Revert to MDIO fast mode, if applicable */
+ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
+ ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ * @page: page to be accessed
+ **/
+static u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+ u32 phy_addr = 2;
+
+ if (page >= HV_INTC_FC_PAGE_START)
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to be read or written
+ * @read: determines if operation is read or written
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retreived information in data. Release any acquired
+ * semaphores before exiting. Note that the procedure to read these regs
+ * uses the address port and data port to read/write.
+ **/
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u32 addr_reg = 0;
+ u32 data_reg = 0;
+ u8 phy_acquired = 1;
+
+ DEBUGFUNC("e1000_access_phy_debug_regs_hv");
+
+ /* This takes care of the difference with desktop vs mobile phy */
+ addr_reg = (hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG;
+ data_reg = addr_reg + 1;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ DEBUGOUT("Could not acquire PHY\n");
+ phy_acquired = 0;
+ goto out;
+ }
+
+ /* All operations in this function are phy address 2 */
+ hw->phy.addr = 2;
+
+ /* masking with 0x3F to remove the page from offset */
+ ret_val = e1000_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+ if (ret_val) {
+ DEBUGOUT("Could not write PHY the HV address register\n");
+ goto out;
+ }
+
+ /* Read or write the data value next */
+ if (read)
+ ret_val = e1000_read_phy_reg_mdic(hw, data_reg, data);
+ else
+ ret_val = e1000_write_phy_reg_mdic(hw, data_reg, *data);
+
+ if (ret_val) {
+ DEBUGOUT("Could not read data value from HV data register\n");
+ goto out;
+ }
+
+out:
+ if (phy_acquired == 1)
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_link_stall_workaround_hv - Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * This function works around a Si bug where the link partner can get
+ * a link up indication before the PHY does. If small packets are sent
+ * by the link partner they can be placed in the packet buffer without
+ * being properly accounted for by the PHY and will stall preventing
+ * further packets from being received. The workaround is to clear the
+ * packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_link_stall_workaround_hv");
+
+ if (hw->phy.type != e1000_phy_82578)
+ goto out;
+
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+ if (data & PHY_CONTROL_LB)
+ goto out;
+
+ /* check if link is up and at 1Gbps */
+ ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ data &= BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK;
+
+ if (data != (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ goto out;
+
+ msec_delay(200);
+
+ /* flush the packets in the fifo buffer */
+ ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
+ HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
+ HV_MUX_DATA_CTRL_GEN_TO_MAC);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
+ phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ goto out;
+
+ DEBUGOUT1("I82577_PHY_CTRL_2: %X\n", phy_data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_82577");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ phy->polarity_correction = TRUE;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? TRUE : FALSE;
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ DEBUGFUNC("e1000_get_cable_length_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ ret_val = E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+out:
+ return ret_val;
+}
diff --git a/sys/dev/e1000/e1000_phy.h b/sys/dev/e1000/e1000_phy.h
index 3e52368..28ed0c1 100644
--- a/sys/dev/e1000/e1000_phy.h
+++ b/sys/dev/e1000/e1000_phy.h
@@ -43,12 +43,14 @@ s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_check_downshift_generic(struct e1000_hw *hw);
s32 e1000_check_polarity_m88(struct e1000_hw *hw);
s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
@@ -74,15 +76,24 @@ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
-s32 e1000_determine_phy_address(struct e1000_hw *hw);
-s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_determine_phy_address(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
-s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define E1000_MAX_PHY_ADDR 4
@@ -122,13 +133,47 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
(((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
~MAX_PHY_REG_ADDRESS)))
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG 23
+#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
/* BM PHY Copper Specific Control 1 */
#define BM_CS_CTRL1 16
#define BM_CS_CTRL1_ENERGY_DETECT 0x0300 /* Enable Energy Detect */
-/* BM PHY Copper Specific States */
+/* BM PHY Copper Specific Status */
#define BM_CS_STATUS 17
#define BM_CS_STATUS_ENERGY_DETECT 0x0010 /* Energy Detect Status */
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
@@ -149,7 +194,7 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0008
+#define IGP01E1000_PSSR_MDIX 0x0800
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
@@ -172,6 +217,8 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
diff --git a/sys/dev/e1000/e1000_regs.h b/sys/dev/e1000/e1000_regs.h
index ac673a7..3a62d0a 100644
--- a/sys/dev/e1000/e1000_regs.h
+++ b/sys/dev/e1000/e1000_regs.h
@@ -58,6 +58,8 @@
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
#define E1000_RCTL 0x00100 /* Rx Control - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
@@ -387,6 +389,7 @@
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_SWSM2 0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
@@ -455,6 +458,7 @@
#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 3d1d362..43990b6 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -39,7 +39,9 @@
#include <sys/param.h>
#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
#include <sys/buf_ring.h>
+#endif
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
@@ -52,10 +54,8 @@
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
+#if __FreeBSD_version >= 700029
#include <sys/eventhandler.h>
-#ifdef EM_TIMESYNC
-#include <sys/ioccom.h>
-#include <sys/time.h>
#endif
#include <machine/bus.h>
#include <machine/resource.h>
@@ -94,7 +94,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char em_driver_version[] = "6.9.9";
+char em_driver_version[] = "6.9.14";
/*********************************************************************
@@ -178,6 +178,7 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
@@ -204,6 +205,7 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -232,6 +234,11 @@ static int em_suspend(device_t);
static int em_resume(device_t);
static void em_start(struct ifnet *);
static void em_start_locked(struct ifnet *ifp);
+#if __FreeBSD_version >= 800000
+static int em_mq_start(struct ifnet *, struct mbuf *);
+static int em_mq_start_locked(struct ifnet *, struct mbuf *);
+static void em_qflush(struct ifnet *);
+#endif
static int em_ioctl(struct ifnet *, u_long, caddr_t);
static void em_watchdog(struct adapter *);
static void em_init(void *);
@@ -261,7 +268,7 @@ static void em_txeof(struct adapter *);
static void em_tx_purge(struct adapter *);
static int em_allocate_receive_structures(struct adapter *);
static int em_allocate_transmit_structures(struct adapter *);
-static int em_rxeof(struct adapter *, int, int *);
+static int em_rxeof(struct adapter *, int);
#ifndef __NO_STRICT_ALIGNMENT
static int em_fixup_rx(struct adapter *);
#endif
@@ -279,8 +286,11 @@ static void em_set_multi(struct adapter *);
static void em_print_hw_stats(struct adapter *);
static void em_update_link_status(struct adapter *);
static int em_get_buf(struct adapter *, int);
+#if __FreeBSD_version >= 700029
static void em_register_vlan(void *, struct ifnet *, u16);
static void em_unregister_vlan(void *, struct ifnet *, u16);
+static void em_setup_vlan_hw_support(struct adapter *);
+#endif
static int em_xmit(struct adapter *, struct mbuf **);
static void em_smartspeed(struct adapter *);
static int em_82547_fifo_workaround(struct adapter *, int);
@@ -307,12 +317,6 @@ static void em_get_hw_control(struct adapter *);
static void em_release_hw_control(struct adapter *);
static void em_enable_wakeup(device_t);
-#ifdef EM_TIMESYNC
-/* Precision Time sync support */
-static int em_tsync_init(struct adapter *);
-static void em_tsync_disable(struct adapter *);
-#endif
-
#ifdef EM_LEGACY_IRQ
static void em_intr(void *);
#else /* FAST IRQ */
@@ -404,6 +408,18 @@ static int em_rx_process_limit = 100;
TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
#endif
+/* Flow control setting - default to FULL */
+static int em_fc_setting = e1000_fc_full;
+TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
+
+/*
+** Shadow VFTA table, this is needed because
+** the real vlan filter table gets cleared during
+** a soft reset and the driver needs to be able
+** to repopulate it.
+*/
+static u32 em_shadow_vfta[EM_VFTA_SIZE];
+
/* Global used in WOL setup with multiport cards */
static int global_quad_port_a = 0;
@@ -796,11 +812,13 @@ em_attach(device_t dev)
else
adapter->pcix_82544 = FALSE;
+#if __FreeBSD_version >= 700029
/* Register for VLAN events */
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
em_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
em_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
+#endif
/* Tell the stack that the interface is not active */
adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
@@ -869,6 +887,7 @@ em_detach(device_t dev)
em_release_manageability(adapter);
if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82583) ||
(adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_ich10lan) ||
(adapter->hw.mac.type == e1000_ich9lan)) &&
@@ -884,11 +903,13 @@ em_detach(device_t dev)
EM_TX_UNLOCK(adapter);
EM_CORE_UNLOCK(adapter);
+#if __FreeBSD_version >= 700029
/* Unregister VLAN events */
if (adapter->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+#endif
ether_ifdetach(adapter->ifp);
callout_drain(&adapter->timer);
@@ -898,7 +919,9 @@ em_detach(device_t dev)
bus_generic_detach(dev);
if_free(ifp);
+#if __FreeBSD_version >= 800000
drbr_free(adapter->br, M_DEVBUF);
+#endif
em_free_transmit_structures(adapter);
em_free_receive_structures(adapter);
@@ -950,6 +973,7 @@ em_suspend(device_t dev)
em_release_manageability(adapter);
if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82583) ||
(adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_ich10lan) ||
(adapter->hw.mac.type == e1000_ich9lan)) &&
@@ -993,20 +1017,25 @@ em_resume(device_t dev)
* the packet is requeued.
**********************************************************************/
-#ifdef IFNET_BUF_RING
+#if __FreeBSD_version >= 800000
static int
-em_transmit_locked(struct ifnet *ifp, struct mbuf *m)
+em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
{
struct adapter *adapter = ifp->if_softc;
- int error;
+ struct mbuf *next;
+ int error = E1000_SUCCESS;
EM_TX_LOCK_ASSERT(adapter);
+ /* To allow being called from a tasklet */
+ if (m == NULL)
+ goto process;
+
if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
|| (!adapter->link_active)) {
error = drbr_enqueue(ifp, adapter->br, m);
return (error);
- } else if (ADAPTER_RING_EMPTY(adapter) &&
+ } else if (drbr_empty(ifp, adapter->br) &&
(adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
if (em_xmit(adapter, &m)) {
if (m && (error = drbr_enqueue(ifp, adapter->br, m)) != 0)
@@ -1027,22 +1056,43 @@ em_transmit_locked(struct ifnet *ifp, struct mbuf *m)
} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
return (error);
- if (!ADAPTER_RING_EMPTY(adapter))
- em_start_locked(ifp);
+process:
+ if (drbr_empty(ifp, adapter->br))
+ return(error);
+ /* Process the queue */
+ while (TRUE) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ next = drbr_dequeue(ifp, adapter->br);
+ if (next == NULL)
+ break;
+ if (em_xmit(adapter, &next))
+ break;
+ ETHER_BPF_MTAP(ifp, next);
+ /* Set the watchdog */
+ adapter->watchdog_timer = EM_TX_TIMEOUT;
+ }
+
+ if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
return (0);
}
-
+
+/*
+** Multiqueue capable stack interface, this is not
+** yet truely multiqueue, but that is coming...
+*/
static int
-em_transmit(struct ifnet *ifp, struct mbuf *m)
+em_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct adapter *adapter = ifp->if_softc;
int error = 0;
- if(EM_TX_TRYLOCK(adapter)) {
+ if (EM_TX_TRYLOCK(adapter)) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- error = em_transmit_locked(ifp, m);
+ error = em_mq_start_locked(ifp, m);
EM_TX_UNLOCK(adapter);
} else
error = drbr_enqueue(ifp, adapter->br, m);
@@ -1062,7 +1112,7 @@ em_qflush(struct ifnet *ifp)
if_qflush(ifp);
EM_TX_UNLOCK(adapter);
}
-#endif
+#endif /* FreeBSD_version */
static void
em_start_locked(struct ifnet *ifp)
@@ -1078,10 +1128,9 @@ em_start_locked(struct ifnet *ifp)
if (!adapter->link_active)
return;
- while ((adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)
- && (!ADAPTER_RING_EMPTY(adapter))) {
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
- m_head = em_dequeue(ifp, adapter->br);
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
/*
@@ -1091,10 +1140,8 @@ em_start_locked(struct ifnet *ifp)
if (em_xmit(adapter, &m_head)) {
if (m_head == NULL)
break;
-#ifndef IFNET_BUF_RING
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
-#endif
break;
}
@@ -1104,9 +1151,10 @@ em_start_locked(struct ifnet *ifp)
/* Set timeout in case hardware has problems transmitting. */
adapter->watchdog_timer = EM_TX_TIMEOUT;
}
- if ((adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD))
+ if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ return;
}
static void
@@ -1195,6 +1243,7 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
/* Adapters that do not support jumbo frames */
case e1000_82542:
+ case e1000_82583:
case e1000_ich8lan:
max_frame_size = ETHER_MAX_LEN;
break;
@@ -1320,70 +1369,6 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
}
-#ifdef EM_TIMESYNC
- /*
- ** IOCTL support for Precision Time (IEEE 1588) Support
- */
- case EM_TIMESYNC_READTS:
- {
- u32 rx_ctl, tx_ctl;
- struct em_tsync_read *tdata;
-
- tdata = (struct em_tsync_read *) ifr->ifr_data;
-
- IOCTL_DEBUGOUT("Reading Timestamp\n");
-
- if (tdata->read_current_time) {
- getnanotime(&tdata->system_time);
- tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
- tdata->network_time |=
- (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32;
- }
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
-
- IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl);
- IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl);
-
- if (rx_ctl & 0x1) {
- IOCTL_DEBUGOUT("RX timestamp is valid\n");
- u32 tmp;
- unsigned char *tmp_cp;
-
- tdata->rx_valid = 1;
- tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
- tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
- E1000_RXSTMPH) << 32;
-
- tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
- tmp_cp = (unsigned char *) &tmp;
- tdata->srcid[0] = tmp_cp[0];
- tdata->srcid[1] = tmp_cp[1];
- tdata->srcid[2] = tmp_cp[2];
- tdata->srcid[3] = tmp_cp[3];
- tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
- tmp_cp = (unsigned char *) &tmp;
- tdata->srcid[4] = tmp_cp[0];
- tdata->srcid[5] = tmp_cp[1];
- tdata->seqid = tmp >> 16;
- tdata->seqid = htons(tdata->seqid);
- } else
- tdata->rx_valid = 0;
-
- if (tx_ctl & 0x1) {
- IOCTL_DEBUGOUT("TX timestamp is valid\n");
- tdata->tx_valid = 1;
- tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
- tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
- E1000_TXSTMPH) << 32;
- } else
- tdata->tx_valid = 0;
-
- return (0);
- }
-#endif /* EM_TIMESYNC */
-
default:
error = ether_ioctl(ifp, command, data);
break;
@@ -1499,13 +1484,11 @@ em_init_locked(struct adapter *adapter)
pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
break;
case e1000_82574:
+ case e1000_82583:
pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
break;
case e1000_ich9lan:
case e1000_ich10lan:
-#define E1000_PBA_10K 0x000A
- pba = E1000_PBA_10K;
- break;
case e1000_ich8lan:
pba = E1000_PBA_8K;
break;
@@ -1549,14 +1532,17 @@ em_init_locked(struct adapter *adapter)
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
- if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
- ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) {
+#if __FreeBSD_version < 700029
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
u32 ctrl;
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
}
-
+#else
+ /* Use real VLAN Filter support */
+ em_setup_vlan_hw_support(adapter);
+#endif
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
@@ -1625,13 +1611,6 @@ em_init_locked(struct adapter *adapter)
#endif /* DEVICE_POLLING */
em_enable_intr(adapter);
-#ifdef EM_TIMESYNC
- /* Initializae IEEE 1588 Precision Time hardware */
- if ((adapter->hw.mac.type == e1000_82574) ||
- (adapter->hw.mac.type == e1000_ich10lan))
- em_tsync_init(adapter);
-#endif
-
/* Don't reset the phy next time init gets called */
adapter->hw.phy.reset_disable = TRUE;
}
@@ -1656,17 +1635,13 @@ em_init(void *arg)
static int
em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
- struct adapter *adapter;
- u32 reg_icr;
- int rx_npkts;
-
- adapter = ifp->if_softc;
- rx_npkts = 0;
+ struct adapter *adapter = ifp->if_softc;
+ u32 reg_icr, rx_done = 0;
EM_CORE_LOCK(adapter);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
EM_CORE_UNLOCK(adapter);
- return (rx_npkts);
+ return (rx_done);
}
if (cmd == POLL_AND_CHECK_STATUS) {
@@ -1681,15 +1656,19 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
}
EM_CORE_UNLOCK(adapter);
- em_rxeof(adapter, count, &rx_npkts);
+ rx_done = em_rxeof(adapter, count);
EM_TX_LOCK(adapter);
em_txeof(adapter);
-
- if (!ADAPTER_RING_EMPTY(adapter))
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, adapter->br))
+ em_mq_start_locked(ifp, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
em_start_locked(ifp);
+#endif
EM_TX_UNLOCK(adapter);
- return (rx_npkts);
+ return (rx_done);
}
#endif /* DEVICE_POLLING */
@@ -1713,6 +1692,8 @@ em_intr(void *arg)
EM_CORE_LOCK(adapter);
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+ if (reg_icr & E1000_ICR_RXO)
+ adapter->rx_overruns++;
if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
(adapter->hw.mac.type >= e1000_82571 &&
(reg_icr & E1000_ICR_INT_ASSERTED) == 0))
@@ -1721,12 +1702,6 @@ em_intr(void *arg)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
goto out;
- EM_TX_LOCK(adapter);
- em_txeof(adapter);
- em_rxeof(adapter, -1, NULL);
- em_txeof(adapter);
- EM_TX_UNLOCK(adapter);
-
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
callout_stop(&adapter->timer);
adapter->hw.mac.get_link_status = 1;
@@ -1735,15 +1710,21 @@ em_intr(void *arg)
em_tx_purge(adapter);
callout_reset(&adapter->timer, hz,
em_local_timer, adapter);
+ goto out;
}
- if (reg_icr & E1000_ICR_RXO)
- adapter->rx_overruns++;
+ EM_TX_LOCK(adapter);
+ em_txeof(adapter);
+ em_rxeof(adapter, -1);
+ em_txeof(adapter);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
+ !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ em_start_locked(ifp);
+ EM_TX_UNLOCK(adapter);
+
out:
EM_CORE_UNLOCK(adapter);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- !ADAPTER_RING_EMPTY(adapter))
- em_start(ifp);
+ return;
}
#else /* EM_FAST_IRQ, then fast interrupt routines only */
@@ -1776,13 +1757,18 @@ em_handle_rxtx(void *context, int pending)
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0)
+ if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
EM_TX_LOCK(adapter);
em_txeof(adapter);
- if (!ADAPTER_RING_EMPTY(adapter))
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, adapter->br))
+ em_mq_start_locked(ifp, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
em_start_locked(ifp);
+#endif
EM_TX_UNLOCK(adapter);
}
@@ -1887,7 +1873,7 @@ em_msix_rx(void *arg)
++adapter->rx_irq;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
- (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0))
+ (em_rxeof(adapter, adapter->rx_process_limit) != 0))
taskqueue_enqueue(adapter->tq, &adapter->rx_task);
/* Reenable this interrupt */
E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
@@ -1925,7 +1911,7 @@ em_handle_rx(void *context, int pending)
struct ifnet *ifp = adapter->ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
- (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0))
+ (em_rxeof(adapter, adapter->rx_process_limit) != 0))
taskqueue_enqueue(adapter->tq, &adapter->rx_task);
}
@@ -1939,10 +1925,14 @@ em_handle_tx(void *context, int pending)
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (!EM_TX_TRYLOCK(adapter))
return;
-
em_txeof(adapter);
- if (!ADAPTER_RING_EMPTY(adapter))
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, adapter->br))
+ em_mq_start_locked(ifp, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
em_start_locked(ifp);
+#endif
EM_TX_UNLOCK(adapter);
}
}
@@ -2201,14 +2191,7 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
tso_desc = TRUE;
} else
#endif
-#ifndef EM_TIMESYNC
- /*
- ** Timesync needs to check the packet header
- ** so call checksum code to do so, but don't
- ** penalize the code if not defined.
- */
if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
-#endif
em_transmit_checksum_setup(adapter, m_head,
&txd_upper, &txd_lower);
@@ -2365,11 +2348,6 @@ em_xmit(struct adapter *adapter, struct mbuf **m_headp)
m_head->m_pkthdr.len);
}
-#ifdef EM_TIMESYNC
- if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) {
- HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" );
- }
-#endif
return (0);
}
@@ -2728,13 +2706,6 @@ em_stop(void *arg)
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-#ifdef EM_TIMESYNC
- /* Disable IEEE 1588 Time hardware */
- if ((adapter->hw.mac.type == e1000_82574) ||
- (adapter->hw.mac.type == e1000_ich10lan))
- em_tsync_disable(adapter);
-#endif
-
e1000_reset_hw(&adapter->hw);
if (adapter->hw.mac.type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
@@ -3121,6 +3092,7 @@ em_hardware_init(struct adapter *adapter)
/* Get control from any management/hw control */
if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82583) ||
(adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_ich10lan) ||
(adapter->hw.mac.type == e1000_ich9lan)) &&
@@ -3169,7 +3141,13 @@ em_hardware_init(struct adapter *adapter)
else
adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
adapter->hw.fc.send_xon = TRUE;
- adapter->hw.fc.requested_mode = e1000_fc_full;
+
+ /* Set Flow control, use the tunable location if sane */
+ if ((em_fc_setting >= 0) || (em_fc_setting < 4))
+ adapter->hw.fc.requested_mode = em_fc_setting;
+ else
+ adapter->hw.fc.requested_mode = e1000_fc_none;
+
if (e1000_init_hw(&adapter->hw) < 0) {
device_printf(dev, "Hardware Initialization Failed\n");
@@ -3211,10 +3189,11 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = ifp->if_capenable = 0;
-#ifdef IFNET_BUF_RING
- ifp->if_transmit = em_transmit;
+#if __FreeBSD_version >= 800000
+ /* Multiqueue tx functions */
+ ifp->if_transmit = em_mq_start;
ifp->if_qflush = em_qflush;
- adapter->br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
+ adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
#endif
if (adapter->hw.mac.type >= e1000_82543) {
int version_cap;
@@ -3664,6 +3643,9 @@ em_free_transmit_structures(struct adapter *adapter)
bus_dma_tag_destroy(adapter->txtag);
adapter->txtag = NULL;
}
+#if __FreeBSD_version >= 800000
+ buf_ring_free(adapter->br, M_DEVBUF);
+#endif
}
/*********************************************************************
@@ -3672,27 +3654,27 @@ em_free_transmit_structures(struct adapter *adapter)
* packet of a particular protocol (TCP/UDP). This routine has been
* enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
*
+ * Added back the old method of keeping the current context type
+ * and not setting if unnecessary, as this is reported to be a
+ * big performance win. -jfv
**********************************************************************/
static void
em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
u32 *txd_upper, u32 *txd_lower)
{
- struct e1000_context_desc *TXD;
+ struct e1000_context_desc *TXD = NULL;
struct em_buffer *tx_buffer;
struct ether_vlan_header *eh;
struct ip *ip = NULL;
struct ip6_hdr *ip6;
- struct tcp_hdr *th;
int curr_txd, ehdrlen;
u32 cmd, hdr_len, ip_hlen;
u16 etype;
u8 ipproto;
+
cmd = hdr_len = ipproto = 0;
- /* Setup checksum offload context. */
curr_txd = adapter->next_avail_tx_desc;
- tx_buffer = &adapter->tx_buffer_area[curr_txd];
- TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
/*
* Determine where frame payload starts.
@@ -3724,6 +3706,8 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
* End offset for header checksum calculation.
* Offset of place to put the checksum.
*/
+ TXD = (struct e1000_context_desc *)
+ &adapter->tx_desc_base[curr_txd];
TXD->lower_setup.ip_fields.ipcss = ehdrlen;
TXD->lower_setup.ip_fields.ipcse =
htole16(ehdrlen + ip_hlen);
@@ -3753,11 +3737,6 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
ipproto = ip6->ip6_nxt;
break;
-#ifdef EM_TIMESYNC
- case ETHERTYPE_IEEE1588:
- *txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
- break;
-#endif
default:
*txd_upper = 0;
*txd_lower = 0;
@@ -3767,42 +3746,46 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
switch (ipproto) {
case IPPROTO_TCP:
if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
+ *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ /* no need for context if already set */
+ if (adapter->last_hw_offload == CSUM_TCP)
+ return;
+ adapter->last_hw_offload = CSUM_TCP;
/*
* Start offset for payload checksum calculation.
* End offset for payload checksum calculation.
* Offset of place to put the checksum.
*/
- th = (struct tcp_hdr *)(mp->m_data + hdr_len);
+ TXD = (struct e1000_context_desc *)
+ &adapter->tx_desc_base[curr_txd];
TXD->upper_setup.tcp_fields.tucss = hdr_len;
TXD->upper_setup.tcp_fields.tucse = htole16(0);
TXD->upper_setup.tcp_fields.tucso =
hdr_len + offsetof(struct tcphdr, th_sum);
cmd |= E1000_TXD_CMD_TCP;
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
}
break;
case IPPROTO_UDP:
{
-#ifdef EM_TIMESYNC
- void *hdr = (caddr_t) ip + ip_hlen;
- struct udphdr *uh = (struct udphdr *)hdr;
-
- if (uh->uh_dport == htons(TSYNC_PORT)) {
- *txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
- IOCTL_DEBUGOUT("@@@ Sending Event Packet\n");
- }
-#endif
if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
+ *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ /* no need for context if already set */
+ if (adapter->last_hw_offload == CSUM_UDP)
+ return;
+ adapter->last_hw_offload = CSUM_UDP;
/*
* Start offset for header checksum calculation.
* End offset for header checksum calculation.
* Offset of place to put the checksum.
*/
+ TXD = (struct e1000_context_desc *)
+ &adapter->tx_desc_base[curr_txd];
TXD->upper_setup.tcp_fields.tucss = hdr_len;
TXD->upper_setup.tcp_fields.tucse = htole16(0);
TXD->upper_setup.tcp_fields.tucso =
hdr_len + offsetof(struct udphdr, uh_sum);
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
}
/* Fall Thru */
}
@@ -3810,20 +3793,10 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
break;
}
-#ifdef EM_TIMESYNC
- /*
- ** We might be here just for TIMESYNC
- ** which means we don't need the context
- ** descriptor.
- */
- if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
- return;
-#endif
- *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
- E1000_TXD_DTYP_D; /* Data descr */
TXD->tcp_seg_setup.data = htole32(0);
TXD->cmd_and_length =
htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
+ tx_buffer = &adapter->tx_buffer_area[curr_txd];
tx_buffer->m_head = NULL;
tx_buffer->next_eop = -1;
@@ -4463,30 +4436,28 @@ em_free_receive_structures(struct adapter *adapter)
*
* We loop at most count times if count is > 0, or until done if
* count < 0.
- *
+ *
+ * For polling we also now return the number of cleaned packets
*********************************************************************/
static int
-em_rxeof(struct adapter *adapter, int count, int *rx_npktsp)
+em_rxeof(struct adapter *adapter, int count)
{
struct ifnet *ifp = adapter->ifp;;
struct mbuf *mp;
u8 status, accept_frame = 0, eop = 0;
u16 len, desc_len, prev_len_adj;
- int i, rx_npkts;
+ u32 i, rx_sent = 0;
struct e1000_rx_desc *current_desc;
EM_RX_LOCK(adapter);
i = adapter->next_rx_desc_to_check;
- rx_npkts = 0;
current_desc = &adapter->rx_desc_base[i];
bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
BUS_DMASYNC_POSTREAD);
if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
EM_RX_UNLOCK(adapter);
- if (rx_npktsp != NULL)
- *rx_npktsp = rx_npkts;
- return (0);
+ return (rx_sent);
}
while ((current_desc->status & E1000_RXD_STAT_DD) &&
@@ -4628,13 +4599,13 @@ discard:
/* Advance our pointers to the next descriptor. */
if (++i == adapter->num_rx_desc)
i = 0;
+ /* Call into the stack */
if (m != NULL) {
adapter->next_rx_desc_to_check = i;
- /* Unlock for call into stack */
EM_RX_UNLOCK(adapter);
(*ifp->if_input)(ifp, m);
EM_RX_LOCK(adapter);
- rx_npkts++;
+ rx_sent++;
i = adapter->next_rx_desc_to_check;
}
current_desc = &adapter->rx_desc_base[i];
@@ -4646,12 +4617,7 @@ discard:
i = adapter->num_rx_desc - 1;
E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
EM_RX_UNLOCK(adapter);
- if (rx_npktsp != NULL)
- *rx_npktsp = rx_npkts;
- if (!((current_desc->status) & E1000_RXD_STAT_DD))
- return (0);
-
- return (1);
+ return (rx_sent);
}
#ifndef __NO_STRICT_ALIGNMENT
@@ -4743,6 +4709,7 @@ em_receive_checksum(struct adapter *adapter,
}
}
+#if __FreeBSD_version >= 700029
/*
* This routine is run via an vlan
* config EVENT
@@ -4751,28 +4718,17 @@ static void
em_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 ctrl, rctl, index, vfta;
-
- ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
- ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
-
- /* Setup for Hardware Filter */
- rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- rctl |= E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
-
- /* Make entry in the hardware filter table */
- index = ((vtag >> 5) & 0x7F);
- vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
- vfta |= (1 << (vtag & 0x1F));
- E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
+ u32 index, bit;
- /* Update the frame size */
- E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
- adapter->max_frame_size + VLAN_TAG_SIZE);
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ em_shadow_vfta[index] |= (1 << bit);
+ ++adapter->num_vlans;
+ /* Re-init to load the changes */
+ em_init(adapter);
}
/*
@@ -4783,26 +4739,58 @@ static void
em_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 index, vfta;
-
- /* Remove entry in the hardware filter table */
- index = ((vtag >> 5) & 0x7F);
- vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
- vfta &= ~(1 << (vtag & 0x1F));
- E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
- /* Have all vlans unregistered? */
- if (adapter->ifp->if_vlantrunk == NULL) {
- u32 rctl;
- /* Turn off the filter table */
- rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- rctl &= ~E1000_RCTL_VFE;
- rctl |= E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
- /* Reset the frame size */
- E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
- adapter->max_frame_size);
- }
+ u32 index, bit;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ em_shadow_vfta[index] &= ~(1 << bit);
+ --adapter->num_vlans;
+ /* Re-init to load the changes */
+ em_init(adapter);
+}
+
+static void
+em_setup_vlan_hw_support(struct adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ /*
+ ** We get here thru init_locked, meaning
+ ** a soft reset, this has already cleared
+ ** the VFTA and other state, so if there
+ ** have been no vlan's registered do nothing.
+ */
+ if (adapter->num_vlans == 0)
+ return;
+
+ /*
+ ** A soft reset zero's out the VFTA, so
+ ** we need to repopulate it now.
+ */
+ for (int i = 0; i < EM_VFTA_SIZE; i++)
+ if (em_shadow_vfta[i] != 0)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
+ i, em_shadow_vfta[i]);
+
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ /* Enable the Filter Table */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* Update the frame size */
+ E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+ adapter->max_frame_size + VLAN_TAG_SIZE);
}
+#endif
static void
em_enable_intr(struct adapter *adapter)
@@ -5392,100 +5380,4 @@ em_add_rx_process_limit(struct adapter *adapter, const char *name,
}
#endif
-#ifdef EM_TIMESYNC
-/*
- * Initialize the Time Sync Feature
- */
-static int
-em_tsync_init(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- u32 tx_ctl, rx_ctl;
-
-
- E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
- 20833/PICOSECS_PER_TICK);
-
- adapter->last_stamp = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
- adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
- E1000_SYSTIMH) << 32ULL;
-
- /* Enable the TX side */
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- tx_ctl |= 0x10;
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- if ((tx_ctl & 0x10) == 0) {
- device_printf(dev, "Failed to enable TX timestamping\n");
- return (ENXIO);
- }
-
- /* Enable RX */
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- rx_ctl |= 0x10; /* Enable the feature */
- rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
-
- /*
- * Ethertype Stamping (Ethertype = 0x88F7)
- */
- E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7));
-
- /*
- * Source Port Queue Filter Setup:
- * this is for UDP port filtering
- */
- E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT));
- /* Protocol = UDP, enable Timestamp, and filter on source/protocol */
-
- E1000_WRITE_FLUSH(&adapter->hw);
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- if ((rx_ctl & 0x10) == 0) {
- device_printf(dev, "Failed to enable RX timestamping\n");
- return (ENXIO);
- }
- device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
-
- return (0);
-}
-
-/*
- * Disable the Time Sync Feature
- */
-static void
-em_tsync_disable(struct adapter *adapter)
-{
- u32 tx_ctl, rx_ctl;
-
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- tx_ctl &= ~0x10;
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- /* Invalidate TX Timestamp */
- E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
-
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- if (tx_ctl & 0x10)
- HW_DEBUGOUT("Failed to disable TX timestamping\n");
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- rx_ctl &= ~0x10;
-
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- /* Invalidate RX Timestamp */
- E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- if (rx_ctl & 0x10)
- HW_DEBUGOUT("Failed to disable RX timestamping\n");
-
- return;
-}
-#endif /* EM_TIMESYNC */
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 215a2b1..7487a89 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,7 @@
#ifndef _EM_H_DEFINED_
#define _EM_H_DEFINED_
-#define IFNET_BUF_RING
+
/* Tunables */
/*
@@ -232,6 +232,7 @@
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define EM_MAX_SCATTER 64
+#define EM_VFTA_SIZE 128
#define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */
#define EM_MSIX_MASK 0x01F00000 /* For 82574 use */
@@ -254,38 +255,11 @@
#define EM_FIFO_HDR 0x10
#define EM_82547_PKT_THRESH 0x3e0
-#ifdef EM_TIMESYNC
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_PORT 319 /* UDP port for the protocol */
-/* TIMESYNC IOCTL defines */
-#define EM_TIMESYNC_READTS _IOWR('i', 127, struct em_tsync_read)
-
-/* Used in the READTS IOCTL */
-struct em_tsync_read {
- int read_current_time;
- struct timespec system_time;
- u64 network_time;
- u64 rx_stamp;
- u64 tx_stamp;
- u16 seqid;
- unsigned char srcid[6];
- int rx_valid;
- int tx_valid;
-};
-
-#endif /* EM_TIMESYNC */
-
-struct adapter;
-
-struct em_int_delay_info {
- struct adapter *adapter; /* Back-pointer to the adapter struct */
- int offset; /* Register offset to read/write */
- int value; /* Current value in usecs */
-};
-
/*
* Bus dma allocation structure used by
* e1000_dma_malloc and e1000_dma_free.
@@ -299,13 +273,19 @@ struct em_dma_alloc {
int dma_nseg;
};
+struct adapter;
+
+struct em_int_delay_info {
+ struct adapter *adapter; /* Back-pointer to the adapter struct */
+ int offset; /* Register offset to read/write */
+ int value; /* Current value in usecs */
+};
+
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
-#ifdef IFNET_BUF_RING
+#if __FreeBSD_version >= 800000
struct buf_ring *br;
-#else
- void *br;
#endif
struct e1000_hw hw;
@@ -320,7 +300,7 @@ struct adapter {
struct resource *ioport;
int io_rid;
- /* 82574 uses 3 int vectors */
+ /* 82574 may use 3 int vectors */
struct resource *res[3];
void *tag[3];
int rid[3];
@@ -345,8 +325,11 @@ struct adapter {
struct task tx_task;
struct taskqueue *tq; /* private task queue */
+#if __FreeBSD_version >= 700029
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
+ u32 num_vlans;
+#endif
/* Management and WOL features */
int wol;
@@ -377,6 +360,7 @@ struct adapter {
uint32_t next_tx_to_clean;
volatile uint16_t num_tx_desc_avail;
uint16_t num_tx_desc;
+ uint16_t last_hw_offload;
uint32_t txd_cmd;
struct em_buffer *tx_buffer_area;
bus_dma_tag_t txtag; /* dma tag for tx */
@@ -433,11 +417,6 @@ struct adapter {
boolean_t pcix_82544;
boolean_t in_detach;
-#ifdef EM_TIMESYNC
- u64 last_stamp;
- u64 last_sec;
- u32 last_ns;
-#endif
struct e1000_hw_stats stats;
};
@@ -457,7 +436,6 @@ typedef struct _em_vendor_info_t {
unsigned int index;
} em_vendor_info_t;
-
struct em_buffer {
int next_eop; /* Index of the desc to watch */
struct mbuf *m_head;
@@ -496,27 +474,4 @@ typedef struct _DESCRIPTOR_PAIR
#define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
-#ifdef IFNET_BUF_RING
-#define ADAPTER_RING_EMPTY(adapter) drbr_empty((adapter)->ifp, (adapter)->br)
-#define em_dequeue drbr_dequeue
-
-#else
-#define ADAPTER_RING_EMPTY(adapter) IFQ_DRV_IS_EMPTY(&((adapter)->ifp->if_snd))
-#define drbr_free(br, type)
-static __inline struct mbuf *
-em_dequeue(struct ifnet *ifp, struct buf_ring *br)
-{
- struct mbuf *m;
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- return (m);
-}
-#ifdef BUF_RING_UNDEFINED
-
-struct buf_ring {
-};
-
-#endif
-#endif
-
#endif /* _EM_H_DEFINED_ */
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 3275c4d..8784cef 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -32,13 +32,16 @@
******************************************************************************/
/*$FreeBSD$*/
+
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
-#include "opt_inet.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
+#include <sys/buf_ring.h>
+#endif
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
@@ -53,13 +56,15 @@
#include <sys/taskqueue.h>
#include <sys/eventhandler.h>
#include <sys/pcpu.h>
-#ifdef IGB_TIMESYNC
-#include <sys/ioccom.h>
-#include <sys/time.h>
-#endif
+#include <sys/smp.h>
+#include <machine/smp.h>
#include <machine/bus.h>
#include <machine/resource.h>
+#ifdef IGB_IEEE1588
+#include <sys/ieee1588.h>
+#endif
+
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
@@ -95,7 +100,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char igb_driver_version[] = "version - 1.5.3";
+char igb_driver_version[] = "version - 1.7.3";
/*********************************************************************
@@ -119,6 +124,8 @@ static igb_vendor_info_t igb_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
@@ -144,6 +151,12 @@ static int igb_suspend(device_t);
static int igb_resume(device_t);
static void igb_start(struct ifnet *);
static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
+#if __FreeBSD_version >= 800000
+static int igb_mq_start(struct ifnet *, struct mbuf *);
+static int igb_mq_start_locked(struct ifnet *,
+ struct tx_ring *, struct mbuf *);
+static void igb_qflush(struct ifnet *);
+#endif
static int igb_ioctl(struct ifnet *, u_long, caddr_t);
static void igb_watchdog(struct adapter *);
static void igb_init(void *);
@@ -155,9 +168,7 @@ static void igb_identify_hardware(struct adapter *);
static int igb_allocate_pci_resources(struct adapter *);
static int igb_allocate_msix(struct adapter *);
static int igb_allocate_legacy(struct adapter *);
-#if __FreeBSD_version >= 602105
static int igb_setup_msix(struct adapter *);
-#endif
static void igb_free_pci_resources(struct adapter *);
static void igb_local_timer(void *);
static int igb_hardware_init(struct adapter *);
@@ -193,8 +204,11 @@ static void igb_set_multi(struct adapter *);
static void igb_print_hw_stats(struct adapter *);
static void igb_update_link_status(struct adapter *);
static int igb_get_buf(struct rx_ring *, int, u8);
+
static void igb_register_vlan(void *, struct ifnet *, u16);
static void igb_unregister_vlan(void *, struct ifnet *, u16);
+static void igb_setup_vlan_hw_support(struct adapter *);
+
static int igb_xmit(struct tx_ring *, struct mbuf **);
static int igb_dma_malloc(struct adapter *, bus_size_t,
struct igb_dma_alloc *, int);
@@ -211,30 +225,17 @@ static void igb_get_hw_control(struct adapter *);
static void igb_release_hw_control(struct adapter *);
static void igb_enable_wakeup(device_t);
-#ifdef IGB_TIMESYNC
-/* Precision Time sync support */
-static int igb_tsync_init(struct adapter *);
-static void igb_tsync_disable(struct adapter *);
-#endif
-
-#if __FreeBSD_version > 700000
static int igb_irq_fast(void *);
-#else
-static void igb_irq_fast(void *);
-#endif
-
static void igb_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
static void igb_handle_rxtx(void *context, int pending);
static void igb_handle_tx(void *context, int pending);
static void igb_handle_rx(void *context, int pending);
-#if __FreeBSD_version >= 602105
/* These are MSIX only irq handlers */
static void igb_msix_rx(void *);
static void igb_msix_tx(void *);
static void igb_msix_link(void *);
-#endif
/* Adaptive Interrupt Moderation */
static void igb_update_aim(struct rx_ring *);
@@ -294,14 +295,12 @@ static int igb_bulk_latency = IGB_BULK_LATENCY;
TUNABLE_INT("hw.igb.bulk_latency", &igb_bulk_latency);
/*
-** IF YOU CHANGE THESE: be sure and change IGB_MSIX_VEC in
-** if_igb.h to match. These can be autoconfigured if set to
-** 0, it will then be based on number of cpus.
+** This will autoconfigure based on the number
+** of CPUs if set to 0. Only a matched pair of
+** TX and RX rings are allowed.
*/
-static int igb_tx_queues = 1;
-static int igb_rx_queues = 1;
-TUNABLE_INT("hw.igb.tx_queues", &igb_tx_queues);
-TUNABLE_INT("hw.igb.rx_queues", &igb_rx_queues);
+static int igb_num_queues = 1;
+TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
/* How many packets rxeof tries to clean at a time */
static int igb_rx_process_limit = 100;
@@ -312,21 +311,14 @@ static int igb_fc_setting = e1000_fc_full;
TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
/*
- * Should the driver do LRO on the RX end
- * this can be toggled on the fly, but the
- * interface must be reset (down/up) for it
- * to take effect.
- */
-static int igb_enable_lro = 1;
-TUNABLE_INT("hw.igb.enable_lro", &igb_enable_lro);
+** Shadow VFTA table, this is needed because
+** the real filter table gets cleared during
+** a soft reset and the driver needs to be able
+** to repopulate it.
+*/
+static u32 igb_shadow_vfta[IGB_VFTA_SIZE];
-/*
- * Enable RX Header Split
- */
-static int igb_rx_hdr_split = 1;
-TUNABLE_INT("hw.igb.rx_hdr_split", &igb_rx_hdr_split);
-extern int mp_ncpus;
/*********************************************************************
* Device identification routine
*
@@ -417,11 +409,6 @@ igb_attach(device_t dev)
OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
&igb_fc_setting, 0, "Flow Control");
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
- &igb_enable_lro, 0, "Large Receive Offload");
-
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
@@ -442,11 +429,6 @@ igb_attach(device_t dev)
OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
&igb_bulk_latency, 1, "Bulk Latency");
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
- &igb_rx_hdr_split, 0, "RX Header Split");
-
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
/* Determine hardware and mac info */
@@ -577,6 +559,22 @@ igb_attach(device_t dev)
/* Setup OS specific network interface */
igb_setup_interface(dev, adapter);
+#ifdef IGB_IEEE1588
+ /*
+ ** Setup the timer: IEEE 1588 support
+ */
+ adapter->cycles.read = igb_read_clock;
+ adapter->cycles.mask = (u64)-1;
+ adapter->cycles.mult = 1;
+ adapter->cycles.shift = IGB_TSYNC_SHIFT;
+ E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
+ IGB_TSYNC_CYCLE_TIME * IGB_TSYNC_SHIFT);
+ E1000_WRITE_REG(&adapter->hw, E1000_SYSTIML, 0x00000000);
+ E1000_WRITE_REG(&adapter->hw, E1000_SYSTIMH, 0xFF800000);
+
+ // JFV - this is not complete yet
+#endif
+
/* Initialize statistics */
igb_update_stats_counters(adapter);
@@ -642,11 +640,7 @@ igb_detach(device_t dev)
INIT_DEBUGOUT("igb_detach: begin");
/* Make sure VLANS are not using driver */
-#if __FreeBSD_version >= 700000
if (adapter->ifp->if_vlantrunk != NULL) {
-#else
- if (adapter->ifp->if_nvlans != 0) {
-#endif
device_printf(dev,"Vlan in use, detach first\n");
return (EBUSY);
}
@@ -797,31 +791,127 @@ igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
}
}
+/*
+ * Legacy TX driver routine, called from the
+ * stack, always uses tx[0], and spins for it.
+ * Should not be used with multiqueue tx
+ */
static void
igb_start(struct ifnet *ifp)
{
struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr;
- u32 queue = 0;
-
- /*
- ** This is really just here for testing
- ** TX multiqueue, ultimately what is
- ** needed is the flow support in the stack
- ** and appropriate logic here to deal with
- ** it. -jfv
- */
- if (adapter->num_tx_queues > 1)
- queue = (curcpu % adapter->num_tx_queues);
+ struct tx_ring *txr = adapter->tx_rings;
- txr = &adapter->tx_rings[queue];
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IGB_TX_LOCK(txr);
igb_start_locked(txr, ifp);
IGB_TX_UNLOCK(txr);
}
+ return;
}
+#if __FreeBSD_version >= 800000
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+igb_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr;
+ int i = 0, err = 0;
+
+ /* Which queue to use */
+ if ((m->m_flags & M_FLOWID) != 0)
+ i = m->m_pkthdr.flowid % adapter->num_queues;
+ txr = &adapter->tx_rings[i];
+
+ if (IGB_TX_TRYLOCK(txr)) {
+ err = igb_mq_start_locked(ifp, txr, m);
+ IGB_TX_UNLOCK(txr);
+ } else
+ err = drbr_enqueue(ifp, txr->br, m);
+
+ return (err);
+}
+
+static int
+igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+ struct adapter *adapter = txr->adapter;
+ struct mbuf *next;
+ int err = 0;
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ err = drbr_enqueue(ifp, txr->br, m);
+ return (err);
+ }
+
+ if (m == NULL) /* Called by tasklet */
+ goto process;
+
+ /* If nothing queued go right to xmit */
+ if (drbr_empty(ifp, txr->br)) {
+ if (igb_xmit(txr, &m)) {
+ if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
+ } else {
+ /* Success, update stats */
+ drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m);
+ /* Set the watchdog */
+ txr->watchdog_timer = IGB_TX_TIMEOUT;
+ }
+
+ } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
+
+process:
+ if (drbr_empty(ifp, txr->br))
+ return (err);
+
+ /* Process the queue */
+ while (TRUE) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ next = drbr_dequeue(ifp, txr->br);
+ if (next == NULL)
+ break;
+ if (igb_xmit(txr, &next))
+ break;
+ ETHER_BPF_MTAP(ifp, next);
+ /* Set the watchdog */
+ txr->watchdog_timer = IGB_TX_TIMEOUT;
+ }
+
+ if (txr->tx_avail <= IGB_TX_OP_THRESHOLD)
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+
+ return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+igb_qflush(struct ifnet *ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct mbuf *m;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IGB_TX_LOCK(txr);
+ while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+ m_freem(m);
+ IGB_TX_UNLOCK(txr);
+ }
+ if_qflush(ifp);
+}
+#endif /* __FreeBSD_version >= 800000 */
+
/*********************************************************************
* Ioctl entry point
*
@@ -952,72 +1042,24 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
+ if (mask & IFCAP_LRO) {
+ ifp->if_capenable ^= IFCAP_LRO;
+ reinit = 1;
+ }
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
igb_init(adapter);
-#if __FreeBSD_version >= 700000
VLAN_CAPABILITIES(ifp);
-#endif
break;
}
-#ifdef IGB_TIMESYNC
+#ifdef IGB_IEEE1588
/*
** IOCTL support for Precision Time (IEEE 1588) Support
*/
- case IGB_TIMESYNC_READTS:
- {
- u32 rx_ctl, tx_ctl;
- struct igb_tsync_read *tdata;
-
- tdata = (struct igb_tsync_read *) ifr->ifr_data;
-
- if (tdata->read_current_time) {
- getnanotime(&tdata->system_time);
- tdata->network_time = E1000_READ_REG(&adapter->hw,
- E1000_SYSTIML);
- tdata->network_time |=
- (u64)E1000_READ_REG(&adapter->hw,
- E1000_SYSTIMH ) << 32;
- }
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
-
- if (rx_ctl & 0x1) {
- u32 tmp;
- unsigned char *tmp_cp;
-
- tdata->rx_valid = 1;
- tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL);
- tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw,
- E1000_RXSTMPH) << 32;
-
- tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL);
- tmp_cp = (unsigned char *) &tmp;
- tdata->srcid[0] = tmp_cp[0];
- tdata->srcid[1] = tmp_cp[1];
- tdata->srcid[2] = tmp_cp[2];
- tdata->srcid[3] = tmp_cp[3];
- tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
- tmp_cp = (unsigned char *) &tmp;
- tdata->srcid[4] = tmp_cp[0];
- tdata->srcid[5] = tmp_cp[1];
- tdata->seqid = tmp >> 16;
- tdata->seqid = htons(tdata->seqid);
- } else
- tdata->rx_valid = 0;
-
- if (tx_ctl & 0x1) {
- tdata->tx_valid = 1;
- tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL);
- tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw,
- E1000_TXSTMPH) << 32;
- } else
- tdata->tx_valid = 0;
-
- return (0);
- }
-#endif /* IGB_TIMESYNC */
+ case SIOCSHWTSTAMP:
+ error = igb_hwtstamp_ioctl(adapter, ifp);
+ break;
+#endif
default:
error = ether_ioctl(ifp, command, data);
@@ -1055,7 +1097,7 @@ igb_watchdog(struct adapter *adapter)
** With TX Multiqueue we need to check every queue's timer,
** if any time out we do the reset.
*/
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
IGB_TX_LOCK(txr);
if (txr->watchdog_timer == 0 ||
(--txr->watchdog_timer)) {
@@ -1076,7 +1118,7 @@ igb_watchdog(struct adapter *adapter)
if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
E1000_STATUS_TXOFF) {
txr = adapter->tx_rings; /* reset pointer */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
IGB_TX_LOCK(txr);
txr->watchdog_timer = IGB_TX_TIMEOUT;
IGB_TX_UNLOCK(txr);
@@ -1087,7 +1129,7 @@ igb_watchdog(struct adapter *adapter)
if (e1000_check_for_link(&adapter->hw) == 0)
device_printf(adapter->dev, "watchdog timeout -- resetting\n");
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
device_printf(adapter->dev, "Queue(%d) tdh = %d, tdt = %d\n",
i, E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
@@ -1155,14 +1197,6 @@ igb_init_locked(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
- if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
- ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) {
- u32 ctrl;
- ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
- ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
- }
-
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM) {
@@ -1215,30 +1249,29 @@ igb_init_locked(struct adapter *adapter)
if (adapter->msix > 1) /* Set up queue routing */
igb_configure_queues(adapter);
+ /* Set up VLAN tag offload and filter */
+ igb_setup_vlan_hw_support(adapter);
+
/* Set default RX interrupt moderation */
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
E1000_WRITE_REG(&adapter->hw,
E1000_EITR(rxr->msix), igb_ave_latency);
rxr->eitr_setting = igb_ave_latency;
}
/* Set TX interrupt rate & reset TX watchdog */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
E1000_WRITE_REG(&adapter->hw,
E1000_EITR(txr->msix), igb_ave_latency);
txr->watchdog_timer = FALSE;
}
- /* this clears any pending interrupts */
- E1000_READ_REG(&adapter->hw, E1000_ICR);
- igb_enable_intr(adapter);
- E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
-
-#ifdef IGB_TIMESYNC
- /* Initialize IEEE 1588 Time sync if available */
- if (adapter->hw.mac.type == e1000_82576)
- igb_tsync_init(adapter);
-#endif
+ {
+ /* this clears any pending interrupts */
+ E1000_READ_REG(&adapter->hw, E1000_ICR);
+ igb_enable_intr(adapter);
+ E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+ }
/* Don't reset the phy next time init gets called */
adapter->hw.phy.reset_disable = TRUE;
@@ -1271,8 +1304,13 @@ igb_handle_rxtx(void *context, int pending)
IGB_TX_LOCK(txr);
igb_txeof(txr);
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+#else
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
igb_start_locked(txr, ifp);
+#endif
IGB_TX_UNLOCK(txr);
}
@@ -1303,8 +1341,13 @@ igb_handle_tx(void *context, int pending)
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IGB_TX_LOCK(txr);
igb_txeof(txr);
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+#else
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
igb_start_locked(txr, ifp);
+#endif
IGB_TX_UNLOCK(txr);
}
}
@@ -1316,11 +1359,7 @@ igb_handle_tx(void *context, int pending)
* Interrupt Service routine
*
*********************************************************************/
-#if __FreeBSD_version < 700000
-static void
-#else
static int
-#endif
igb_irq_fast(void *arg)
{
struct adapter *adapter = arg;
@@ -1360,7 +1399,6 @@ igb_irq_fast(void *arg)
}
-#if __FreeBSD_version >= 602105
/*********************************************************************
*
* MSIX TX Interrupt Service routine
@@ -1448,7 +1486,6 @@ spurious:
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
return;
}
-#endif
/*
@@ -1634,7 +1671,7 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
struct mbuf *m_head;
u32 olinfo_status = 0, cmd_type_len = 0;
int nsegs, i, j, error, first, last = 0;
- u32 hdrlen = 0, offload = 0;
+ u32 hdrlen = 0;
m_head = *m_headp;
@@ -1730,13 +1767,12 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
} else
return (ENXIO);
- } else
- /* Do all other context descriptor setup */
- offload = igb_tx_ctx_setup(txr, m_head);
- if (offload == TRUE)
+ } else if (igb_tx_ctx_setup(txr, m_head))
olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
-#ifdef IGB_TIMESYNC
- if (offload == IGB_TIMESTAMP)
+
+#ifdef IGB_IEEE1588
+ /* This is changing soon to an mtag detection */
+ if (we detect this mbuf has a TSTAMP mtag)
cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
#endif
/* Calculate payload length */
@@ -1960,7 +1996,7 @@ igb_update_link_status(struct adapter *adapter)
adapter->link_active = 0;
if_link_state_change(ifp, LINK_STATE_DOWN);
/* Turn off watchdogs */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
txr->watchdog_timer = FALSE;
}
}
@@ -1989,12 +2025,6 @@ igb_stop(void *arg)
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-#ifdef IGB_TIMESYNC
- /* Disable IEEE 1588 Time sync */
- if (adapter->hw.mac.type == e1000_82576)
- igb_tsync_disable(adapter);
-#endif
-
e1000_reset_hw(&adapter->hw);
E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
}
@@ -2042,7 +2072,7 @@ static int
igb_allocate_pci_resources(struct adapter *adapter)
{
device_t dev = adapter->dev;
- int rid, error = 0;
+ int rid;
rid = PCIR_BAR(0);
adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
@@ -2055,27 +2085,15 @@ igb_allocate_pci_resources(struct adapter *adapter)
rman_get_bustag(adapter->pci_mem);
adapter->osdep.mem_bus_space_handle =
rman_get_bushandle(adapter->pci_mem);
- adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
-
- /*
- ** Init the resource arrays
- */
- for (int i = 0; i < IGB_MSIX_VEC; i++) {
- adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
- adapter->tag[i] = NULL;
- adapter->res[i] = NULL;
- }
+ adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
- adapter->num_tx_queues = 1; /* Defaults for Legacy or MSI */
- adapter->num_rx_queues = 1;
+ adapter->num_queues = 1; /* Defaults for Legacy or MSI */
-#if __FreeBSD_version >= 602105
/* This will setup either MSI/X or MSI */
adapter->msix = igb_setup_msix(adapter);
-#endif
adapter->hw.back = &adapter->osdep;
- return (error);
+ return (0);
}
/*********************************************************************
@@ -2087,19 +2105,19 @@ static int
igb_allocate_legacy(struct adapter *adapter)
{
device_t dev = adapter->dev;
- int error;
+ int error, rid = 0;
/* Turn off all interrupts */
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
- /* Legacy RID at 0 */
- if (adapter->msix == 0)
- adapter->rid[0] = 0;
+ /* MSI RID is 1 */
+ if (adapter->msix == 1)
+ rid = 1;
/* We allocate a single interrupt resource */
- adapter->res[0] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res[0] == NULL) {
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (adapter->res == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"interrupt\n");
return (ENXIO);
@@ -2114,12 +2132,9 @@ igb_allocate_legacy(struct adapter *adapter)
taskqueue_thread_enqueue, &adapter->tq);
taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(adapter->dev));
- if ((error = bus_setup_intr(dev, adapter->res[0],
- INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast,
-#if __FreeBSD_version >= 700000
- NULL,
-#endif
- adapter, &adapter->tag[0])) != 0) {
+ if ((error = bus_setup_intr(dev, adapter->res,
+ INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL,
+ adapter, &adapter->tag)) != 0) {
device_printf(dev, "Failed to register fast interrupt "
"handler: %d\n", error);
taskqueue_free(adapter->tq);
@@ -2131,7 +2146,6 @@ igb_allocate_legacy(struct adapter *adapter)
}
-#if __FreeBSD_version >= 602105
/*********************************************************************
*
* Setup the MSIX Interrupt handlers:
@@ -2143,105 +2157,104 @@ igb_allocate_msix(struct adapter *adapter)
device_t dev = adapter->dev;
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- int error, vector = 0;
+ int error, rid, vector = 0;
/*
* Setup the interrupt handlers
*/
/* TX Setup */
- for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
- adapter->res[vector] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[vector],
- RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res[vector] == NULL) {
+ for (int i = 0; i < adapter->num_queues; i++, vector++, txr++) {
+ rid = vector +1;
+ txr->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (txr->res == NULL) {
device_printf(dev,
"Unable to allocate bus resource: "
"MSIX TX Interrupt\n");
return (ENXIO);
}
- error = bus_setup_intr(dev, adapter->res[vector],
- INTR_TYPE_NET | INTR_MPSAFE,
-#if __FreeBSD_version >= 700000
- NULL,
-#endif
- igb_msix_tx, txr, &adapter->tag[vector]);
+ error = bus_setup_intr(dev, txr->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ igb_msix_tx, txr, &txr->tag);
if (error) {
- adapter->res[vector] = NULL;
+ txr->res = NULL;
device_printf(dev, "Failed to register TX handler");
return (error);
}
/* Make tasklet for deferred handling - one per queue */
TASK_INIT(&txr->tx_task, 0, igb_handle_tx, txr);
- if (adapter->hw.mac.type == e1000_82575) {
+ txr->msix = vector;
+ if (adapter->hw.mac.type == e1000_82575)
txr->eims = E1000_EICR_TX_QUEUE0 << i;
- /* MSIXBM registers start at 0 */
- txr->msix = adapter->rid[vector] - 1;
- } else {
+ else
txr->eims = 1 << vector;
- txr->msix = vector;
- }
+#if __FreeBSD_version >= 800000
+ /*
+ ** Bind the msix vector, and thus the
+ ** ring to the corresponding cpu.
+ */
+ intr_bind(rman_get_start(txr->res), i);
+#endif
}
/* RX Setup */
- for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
- adapter->res[vector] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[vector],
- RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res[vector] == NULL) {
+ for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
+ rid = vector +1;
+ rxr->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (rxr->res == NULL) {
device_printf(dev,
"Unable to allocate bus resource: "
"MSIX RX Interrupt\n");
return (ENXIO);
}
- error = bus_setup_intr(dev, adapter->res[vector],
- INTR_TYPE_NET | INTR_MPSAFE,
-#if __FreeBSD_version >= 700000
- NULL,
-#endif
- igb_msix_rx, rxr, &adapter->tag[vector]);
+ error = bus_setup_intr(dev, rxr->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ igb_msix_rx, rxr, &rxr->tag);
if (error) {
- adapter->res[vector] = NULL;
+ rxr->res = NULL;
device_printf(dev, "Failed to register RX handler");
return (error);
}
/* Make tasklet for deferred handling - one per queue */
TASK_INIT(&rxr->rx_task, 0, igb_handle_rx, rxr);
- if (adapter->hw.mac.type == e1000_82575) {
+ rxr->msix = vector;
+ if (adapter->hw.mac.type == e1000_82575)
rxr->eims = E1000_EICR_RX_QUEUE0 << i;
- rxr->msix = adapter->rid[vector] - 1;
- } else {
+ else
rxr->eims = 1 << vector;
- rxr->msix = vector;
- }
/* Get a mask for local timer */
adapter->rx_mask |= rxr->eims;
+#if __FreeBSD_version >= 800000
+ /*
+ ** Bind the msix vector, and thus the
+ ** ring to the corresponding cpu.
+ ** Notice that this makes an RX/TX pair
+ ** bound to each CPU, limited by the MSIX
+ ** vectors.
+ */
+ intr_bind(rman_get_start(rxr->res), i);
+#endif
}
/* And Link */
- adapter->res[vector] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[vector],
- RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res[vector] == NULL) {
+ rid = vector +1;
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (adapter->res == NULL) {
device_printf(dev,
"Unable to allocate bus resource: "
"MSIX Link Interrupt\n");
return (ENXIO);
}
- if ((error = bus_setup_intr(dev, adapter->res[vector],
- INTR_TYPE_NET | INTR_MPSAFE,
-#if __FreeBSD_version >= 700000
- NULL,
-#endif
- igb_msix_link, adapter, &adapter->tag[vector])) != 0) {
+ if ((error = bus_setup_intr(dev, adapter->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ igb_msix_link, adapter, &adapter->tag)) != 0) {
device_printf(dev, "Failed to register Link handler");
return (error);
}
- if (adapter->hw.mac.type == e1000_82575)
- adapter->linkvec = adapter->rid[vector] - 1;
- else
- adapter->linkvec = vector;
-
+ adapter->linkvec = vector;
adapter->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &adapter->tq);
taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
@@ -2249,13 +2262,6 @@ igb_allocate_msix(struct adapter *adapter)
return (0);
}
-#else /* FreeBSD 6.1/2 */
-static int
-igb_allocate_msix(struct adapter *adapter)
-{
- return (1);
-}
-#endif
static void
@@ -2279,7 +2285,7 @@ igb_configure_queues(struct adapter *adapter)
E1000_GPIE_EIAME |
E1000_GPIE_PBA | E1000_GPIE_NSICR);
/* RX */
- for (int i = 0; i < adapter->num_rx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
rxr = &adapter->rx_rings[i];
@@ -2294,7 +2300,7 @@ igb_configure_queues(struct adapter *adapter)
adapter->eims_mask |= rxr->eims;
}
/* TX */
- for (int i = 0; i < adapter->num_tx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
txr = &adapter->tx_rings[i];
@@ -2327,7 +2333,7 @@ igb_configure_queues(struct adapter *adapter)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
/* TX */
- for (int i = 0; i < adapter->num_tx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
txr = &adapter->tx_rings[i];
E1000_WRITE_REG(hw, E1000_MSIXBM(txr->msix),
txr->eims);
@@ -2335,7 +2341,7 @@ igb_configure_queues(struct adapter *adapter)
}
/* RX */
- for (int i = 0; i < adapter->num_rx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
rxr = &adapter->rx_rings[i];
E1000_WRITE_REG(hw, E1000_MSIXBM(rxr->msix),
rxr->eims);
@@ -2355,38 +2361,65 @@ igb_configure_queues(struct adapter *adapter)
static void
igb_free_pci_resources(struct adapter *adapter)
{
- device_t dev = adapter->dev;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+ device_t dev = adapter->dev;
+ int rid;
- /* Make sure the for loop below runs once */
- if (adapter->msix == 0)
- adapter->msix = 1;
+ /*
+ ** There is a slight possibility of a failure mode
+ ** in attach that will result in entering this function
+ ** before interrupt resources have been initialized, and
+ ** in that case we do not want to execute the loops below
+ ** We can detect this reliably by the state of the adapter
+ ** res pointer.
+ */
+ if (adapter->res == NULL)
+ goto mem;
/*
- * First release all the interrupt resources:
- * notice that since these are just kept
- * in an array we can do the same logic
- * whether its MSIX or just legacy.
+ * First release all the TX/RX interrupt resources:
*/
- for (int i = 0; i < adapter->msix; i++) {
- if (adapter->tag[i] != NULL) {
- bus_teardown_intr(dev, adapter->res[i],
- adapter->tag[i]);
- adapter->tag[i] = NULL;
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ rid = txr->msix + 1;
+ if (txr->tag != NULL) {
+ bus_teardown_intr(dev, txr->res, txr->tag);
+ txr->tag = NULL;
}
- if (adapter->res[i] != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ,
- adapter->rid[i], adapter->res[i]);
+ if (txr->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res);
+ }
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ rid = rxr->msix + 1;
+ if (rxr->tag != NULL) {
+ bus_teardown_intr(dev, rxr->res, rxr->tag);
+ rxr->tag = NULL;
}
+ if (rxr->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res);
}
-#if __FreeBSD_version >= 602105
+ /* Clean the Legacy or Link interrupt last */
+ if (adapter->linkvec) /* we are doing MSIX */
+ rid = adapter->linkvec + 1;
+ else
+ (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (adapter->tag != NULL) {
+ bus_teardown_intr(dev, adapter->res, adapter->tag);
+ adapter->tag = NULL;
+ }
+ if (adapter->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
if (adapter->msix)
pci_release_msi(dev);
if (adapter->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
-#endif
if (adapter->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -2394,7 +2427,6 @@ igb_free_pci_resources(struct adapter *adapter)
}
-#if __FreeBSD_version >= 602105
/*
* Setup Either MSI/X or MSI
*/
@@ -2423,18 +2455,16 @@ igb_setup_msix(struct adapter *adapter)
goto msi;
}
- /* Limit by the number set in header */
- if (msgs > IGB_MSIX_VEC)
- msgs = IGB_MSIX_VEC;
-
/* Figure out a reasonable auto config value */
queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
- if (igb_tx_queues == 0)
- igb_tx_queues = queues;
- if (igb_rx_queues == 0)
- igb_rx_queues = queues;
- want = igb_tx_queues + igb_rx_queues + 1;
+ if (igb_num_queues == 0)
+ igb_num_queues = queues;
+ /*
+ ** Two vectors (RX/TX pair) per queue
+ ** plus an additional for Link interrupt
+ */
+ want = (igb_num_queues * 2) + 1;
if (msgs >= want)
msgs = want;
else {
@@ -2447,8 +2477,7 @@ igb_setup_msix(struct adapter *adapter)
if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
device_printf(adapter->dev,
"Using MSIX interrupts with %d vectors\n", msgs);
- adapter->num_tx_queues = igb_tx_queues;
- adapter->num_rx_queues = igb_rx_queues;
+ adapter->num_queues = igb_num_queues;
return (msgs);
}
msi:
@@ -2457,7 +2486,6 @@ msi:
device_printf(adapter->dev,"Using MSI interrupt\n");
return (msgs);
}
-#endif /* __FreeBSD_version >= 602105 */
/*********************************************************************
*
@@ -2545,6 +2573,10 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = igb_ioctl;
ifp->if_start = igb_start;
+#if __FreeBSD_version >= 800000
+ ifp->if_transmit = igb_mq_start;
+ ifp->if_qflush = igb_qflush;
+#endif
IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
IFQ_SET_READY(&ifp->if_snd);
@@ -2553,8 +2585,8 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = ifp->if_capenable = 0;
- ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_MTU;
+ ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_TSO4;
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
ifp->if_capenable = ifp->if_capabilities;
@@ -2562,10 +2594,8 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
* Tell the upper layer(s) we support long frames.
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER;
- ifp->if_capabilities |= IFCAP_VLAN_MTU;
- ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER;
- ifp->if_capenable |= IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
/*
* Specify the media types supported by this adapter and register
@@ -2702,7 +2732,7 @@ igb_allocate_queues(struct adapter *adapter)
/* First allocate the TX ring struct memory */
if (!(adapter->tx_rings =
(struct tx_ring *) malloc(sizeof(struct tx_ring) *
- adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX ring memory\n");
error = ENOMEM;
goto fail;
@@ -2712,7 +2742,7 @@ igb_allocate_queues(struct adapter *adapter)
/* Next allocate the RX */
if (!(adapter->rx_rings =
(struct rx_ring *) malloc(sizeof(struct rx_ring) *
- adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate RX ring memory\n");
error = ENOMEM;
goto rx_fail;
@@ -2726,7 +2756,7 @@ igb_allocate_queues(struct adapter *adapter)
* possibility that things fail midcourse and we need to
* undo memory gracefully
*/
- for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
+ for (int i = 0; i < adapter->num_queues; i++, txconf++) {
/* Set up some basics */
txr = &adapter->tx_rings[i];
txr->adapter = adapter;
@@ -2754,7 +2784,11 @@ igb_allocate_queues(struct adapter *adapter)
error = ENOMEM;
goto err_tx_desc;
}
-
+#if __FreeBSD_version >= 800000
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF,
+ M_WAITOK, &txr->tx_mtx);
+#endif
}
/*
@@ -2762,7 +2796,7 @@ igb_allocate_queues(struct adapter *adapter)
*/
rsize = roundup2(adapter->num_rx_desc *
sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
- for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
rxr = &adapter->rx_rings[i];
rxr->adapter = adapter;
rxr->me = i;
@@ -2877,7 +2911,7 @@ igb_setup_transmit_ring(struct tx_ring *txr)
struct igb_tx_buffer *txbuf;
int i;
- /* Clear the old ring contents */
+ /* Clear the old descriptor contents */
bzero((void *)txr->tx_base,
(sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
/* Reset indices */
@@ -2916,7 +2950,7 @@ igb_setup_transmit_structures(struct adapter *adapter)
{
struct tx_ring *txr = adapter->tx_rings;
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
igb_setup_transmit_ring(txr);
return;
@@ -2936,7 +2970,7 @@ igb_initialize_transmit_units(struct adapter *adapter)
INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
/* Setup the Base and Length of the Tx Descriptor Rings */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
u64 bus_addr = txr->txdma.dma_paddr;
E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(i),
@@ -2985,7 +3019,7 @@ igb_free_transmit_structures(struct adapter *adapter)
{
struct tx_ring *txr = adapter->tx_rings;
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
IGB_TX_LOCK(txr);
igb_free_transmit_buffers(txr);
igb_dma_free(adapter, &txr->txdma);
@@ -3034,7 +3068,9 @@ igb_free_transmit_buffers(struct tx_ring *txr)
tx_buffer->map = NULL;
}
}
-
+#if __FreeBSD_version >= 800000
+ buf_ring_free(txr->br, M_DEVBUF);
+#endif
if (txr->tx_buffers != NULL) {
free(txr->tx_buffers, M_DEVBUF);
txr->tx_buffers = NULL;
@@ -3046,7 +3082,6 @@ igb_free_transmit_buffers(struct tx_ring *txr)
return;
}
-#if __FreeBSD_version >= 700000
/**********************************************************************
*
* Setup work for hardware segmentation offload (TSO) on
@@ -3134,13 +3169,7 @@ igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
txr->next_avail_desc = ctxd;
return TRUE;
}
-#else /* fake out for 6.2 */
-static boolean_t
-igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
-{
- return (FALSE);
-}
-#endif
+
/*********************************************************************
*
@@ -3148,7 +3177,7 @@ igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
*
**********************************************************************/
-static int
+static bool
igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
{
struct adapter *adapter = txr->adapter;
@@ -3158,41 +3187,29 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
struct ether_vlan_header *eh;
struct ip *ip = NULL;
struct ip6_hdr *ip6;
- int ehdrlen, ip_hlen = 0;
- u16 etype;
+ int ehdrlen, ctxd, ip_hlen = 0;
+ u16 etype, vtag = 0;
u8 ipproto = 0;
- bool offload = FALSE;
-#if __FreeBSD_version >= 700000
- u16 vtag = 0;
-#else
- struct m_tag *mtag;
-#endif
+ bool offload = TRUE;
+
+ if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+ offload = FALSE;
- int ctxd = txr->next_avail_desc;
+ ctxd = txr->next_avail_desc;
tx_buffer = &txr->tx_buffers[ctxd];
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
- if (mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
- offload = TRUE;
-
/*
** In advanced descriptors the vlan tag must
- ** be placed into the descriptor itself.
+ ** be placed into the context descriptor, thus
+ ** we need to be here just for that setup.
*/
-#if __FreeBSD_version < 700000
- mtag = VLAN_OUTPUT_TAG(ifp, mp);
- if (mtag != NULL) {
- vlan_macip_lens |=
- htole16(VLAN_TAG_VALUE(mtag)) << E1000_ADVTXD_VLAN_SHIFT;
- offload = TRUE;
- }
-#else
if (mp->m_flags & M_VLANTAG) {
vtag = htole16(mp->m_pkthdr.ether_vtag);
vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
- offload = TRUE;
- }
-#endif
+ } else if (offload == FALSE)
+ return FALSE;
+
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
@@ -3214,8 +3231,10 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
case ETHERTYPE_IP:
ip = (struct ip *)(mp->m_data + ehdrlen);
ip_hlen = ip->ip_hl << 2;
- if (mp->m_len < ehdrlen + ip_hlen)
- return FALSE;
+ if (mp->m_len < ehdrlen + ip_hlen) {
+ offload = FALSE;
+ break;
+ }
ipproto = ip->ip_p;
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
break;
@@ -3223,16 +3242,13 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
ip_hlen = sizeof(struct ip6_hdr);
if (mp->m_len < ehdrlen + ip_hlen)
- return FALSE; /* failure */
+ return (FALSE);
ipproto = ip6->ip6_nxt;
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
break;
-#ifdef IGB_TIMESYNC
- case ETHERTYPE_IEEE1588:
- return (IGB_TIMESTAMP);
-#endif
default:
- return (FALSE);
+ offload = FALSE;
+ break;
}
vlan_macip_lens |= ip_hlen;
@@ -3240,43 +3256,24 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
switch (ipproto) {
case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP)
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
- offload = TRUE;
- }
break;
case IPPROTO_UDP:
- {
-#ifdef IGB_TIMESYNC
- void *hdr = (caddr_t) ip + ip_hlen;
- struct udphdr *uh = (struct udphdr *)hdr;
-
- if (uh->uh_dport == htons(TSYNC_PORT))
- return (IGB_TIMESTAMP);
-#endif
- if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
+ if (mp->m_pkthdr.csum_flags & CSUM_UDP)
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
- offload = TRUE;
- }
break;
- }
#if __FreeBSD_version >= 800000
case IPPROTO_SCTP:
- {
- if (mp->m_pkthdr.csum_flags & CSUM_SCTP) {
+ if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
- offload = TRUE;
- }
break;
- }
#endif
default:
- return (FALSE);
+ offload = FALSE;
+ break;
}
- if (offload != TRUE)
- return (FALSE);
-
/* Now copy bits into descriptor */
TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
@@ -3292,7 +3289,7 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
txr->next_avail_desc = ctxd;
--txr->tx_avail;
- return (TRUE);
+ return (offload);
}
@@ -3309,7 +3306,7 @@ igb_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
int first, last, done, num_avail;
- u32 cleaned = 0;
+ u32 cleaned = 0;
struct igb_tx_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
@@ -3382,19 +3379,22 @@ igb_txeof(struct tx_ring *txr)
txr->next_to_clean = first;
/*
- * If we have enough room, clear IFF_DRV_OACTIVE to
- * tell the stack that it is OK to send packets.
- * If there are no pending descriptors, clear the timeout.
+ * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
+ * that it is OK to send packets.
+ * If there are no pending descriptors, clear the timeout. Otherwise,
+ * if some descriptors have been freed, restart the timeout.
*/
if (num_avail > IGB_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ /* All clean, turn off the timer */
if (num_avail == adapter->num_tx_desc) {
txr->watchdog_timer = 0;
txr->tx_avail = num_avail;
return FALSE;
}
}
- /* Some descriptors cleaned, reset the watchdog */
+
+ /* Some cleaned, reset the timer */
if (cleaned)
txr->watchdog_timer = IGB_TX_TIMEOUT;
txr->tx_avail = num_avail;
@@ -3597,6 +3597,7 @@ static int
igb_setup_receive_ring(struct rx_ring *rxr)
{
struct adapter *adapter;
+ struct ifnet *ifp;
device_t dev;
struct igb_rx_buffer *rxbuf;
struct lro_ctrl *lro = &rxr->lro;
@@ -3604,6 +3605,9 @@ igb_setup_receive_ring(struct rx_ring *rxr)
adapter = rxr->adapter;
dev = adapter->dev;
+ ifp = adapter->ifp;
+ rxr->lro_enabled = FALSE;
+ rxr->hdr_split = FALSE;
/* Clear the ring contents */
rsize = roundup2(adapter->num_rx_desc *
@@ -3644,14 +3648,21 @@ igb_setup_receive_ring(struct rx_ring *rxr)
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /* Now set up the LRO interface */
- if (igb_enable_lro) {
+ /*
+ ** Now set up the LRO interface, we
+ ** also only do head split when LRO
+ ** is enabled, since so often they
+ ** are undesireable in similar setups.
+ */
+ if (ifp->if_capenable & IFCAP_LRO) {
int err = tcp_lro_init(lro);
if (err) {
device_printf(dev,"LRO Initialization failed!\n");
goto fail;
}
INIT_DEBUGOUT("RX LRO Initialized\n");
+ rxr->lro_enabled = TRUE;
+ rxr->hdr_split = TRUE;
lro->ifp = adapter->ifp;
}
@@ -3685,7 +3696,7 @@ igb_setup_receive_structures(struct adapter *adapter)
struct rx_ring *rxr = adapter->rx_rings;
int i, j;
- for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
+ for (i = 0; i < adapter->num_queues; i++, rxr++)
if (igb_setup_receive_ring(rxr))
goto fail;
@@ -3739,7 +3750,7 @@ igb_initialize_receive_units(struct adapter *adapter)
/*
** Set up for header split
*/
- if (igb_rx_hdr_split) {
+ if (rxr->hdr_split) {
/* Use a standard mbuf for the header */
srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
@@ -3757,11 +3768,7 @@ igb_initialize_receive_units(struct adapter *adapter)
/* Set maximum packet len */
psize = adapter->max_frame_size;
/* are we on a vlan? */
-#if __FreeBSD_version >= 700000
if (adapter->ifp->if_vlantrunk != NULL)
-#else
- if (adapter->ifp->if_nvlans != 0)
-#endif
psize += VLAN_TAG_SIZE;
E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
} else {
@@ -3771,7 +3778,7 @@ igb_initialize_receive_units(struct adapter *adapter)
}
/* Setup the Base and Length of the Rx Descriptor Rings */
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
u64 bus_addr = rxr->rxdma.dma_paddr;
u32 rxdctl;
@@ -3796,7 +3803,7 @@ igb_initialize_receive_units(struct adapter *adapter)
** Setup for RX MultiQueue
*/
rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
- if (adapter->num_rx_queues >1) {
+ if (adapter->num_queues >1) {
u32 random[10], mrqc, shift = 0;
union igb_reta {
u32 dword;
@@ -3809,7 +3816,7 @@ igb_initialize_receive_units(struct adapter *adapter)
/* Warning FM follows */
for (int i = 0; i < 128; i++) {
reta.bytes[i & 3] =
- (i % adapter->num_rx_queues) << shift;
+ (i % adapter->num_queues) << shift;
if ((i & 3) == 3)
E1000_WRITE_REG(&adapter->hw,
E1000_RETA(i >> 2), reta.dword);
@@ -3875,7 +3882,7 @@ igb_initialize_receive_units(struct adapter *adapter)
* Setup the HW Rx Head and Tail Descriptor Pointers
* - needs to be after enable
*/
- for (int i = 0; i < adapter->num_rx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
E1000_WRITE_REG(&adapter->hw, E1000_RDH(i), 0);
E1000_WRITE_REG(&adapter->hw, E1000_RDT(i),
adapter->num_rx_desc - 1);
@@ -3893,7 +3900,7 @@ igb_free_receive_structures(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
struct lro_ctrl *lro = &rxr->lro;
igb_free_receive_buffers(rxr);
tcp_lro_free(lro);
@@ -3996,11 +4003,11 @@ igb_rxeof(struct rx_ring *rxr, int count)
(count != 0) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
struct mbuf *sendmp, *mh, *mp;
- u16 hlen, plen, hdr, ptype, len_adj;
+ u16 hlen, plen, hdr, ptype, len_adj, vtag;
u8 dopayload, accept_frame, eop;
accept_frame = 1;
- hlen = plen = len_adj = 0;
+ hlen = plen = len_adj = vtag = 0;
sendmp = mh = mp = NULL;
ptype = (u16)(cur->wb.lower.lo_dword.data >> 4);
@@ -4019,7 +4026,7 @@ igb_rxeof(struct rx_ring *rxr, int count)
** packet spans multiple descriptors, in that
** case only the first header is valid.
*/
- if ((igb_rx_hdr_split) && (rxr->fmp == NULL)){
+ if ((rxr->hdr_split) && (rxr->fmp == NULL)){
hdr = le16toh(cur->
wb.lower.lo_dword.hs_rss.hdr_info);
hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
@@ -4086,8 +4093,35 @@ igb_rxeof(struct rx_ring *rxr, int count)
if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)
accept_frame = 0;
-
+#ifdef IGB_IEEE1588
+ This linux code needs to be converted to work here
+ -----------------------------------------------------
+ if (unlikely(staterr & E1000_RXD_STAT_TS)) {
+ u64 regval;
+ u64 ns;
+// Create an mtag and set it up
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps(skb);
+
+ rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
+ "igb: no RX time stamp available for time stamped packet");
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+// Do time conversion from the register
+ ns = timecounter_cyc2time(&adapter->clock, regval);
+ clocksync_update(&adapter->sync, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp =
+ clocksync_hw2sys(&adapter->sync, ns);
+ }
+#endif
if (accept_frame) {
+ /*
+ ** get_buf will overwrite the writeback
+ ** descriptor so save the VLAN tag now.
+ */
+ vtag = le16toh(cur->wb.upper.vlan);
if (igb_get_buf(rxr, i, dopayload) != 0) {
ifp->if_iqdrops++;
goto discard;
@@ -4103,16 +4137,16 @@ igb_rxeof(struct rx_ring *rxr, int count)
rxr->lmp = mh->m_next;
}
} else {
- /* Adjust for CRC frag */
- if (len_adj) {
- rxr->lmp->m_len -= len_adj;
- rxr->fmp->m_pkthdr.len -= len_adj;
- }
/* Chain mbuf's together */
mh->m_flags &= ~M_PKTHDR;
rxr->lmp->m_next = mh;
rxr->lmp = rxr->lmp->m_next;
rxr->fmp->m_pkthdr.len += mh->m_len;
+ /* Adjust for CRC frag */
+ if (len_adj) {
+ rxr->lmp->m_len -= len_adj;
+ rxr->fmp->m_pkthdr.len -= len_adj;
+ }
}
if (eop) {
@@ -4126,16 +4160,13 @@ igb_rxeof(struct rx_ring *rxr, int count)
igb_rx_checksum(staterr, rxr->fmp, sctp);
if (staterr & E1000_RXD_STAT_VP) {
-#if __FreeBSD_version >= 700000
- rxr->fmp->m_pkthdr.ether_vtag =
- le16toh(cur->wb.upper.vlan);
+ rxr->fmp->m_pkthdr.ether_vtag = vtag;
rxr->fmp->m_flags |= M_VLANTAG;
-#else
- VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
- (le16toh(cur->wb.upper.vlan) &
- E1000_RXD_SPC_VLAN_MASK));
-#endif
}
+#if __FreeBSD_version >= 800000
+ rxr->fmp->m_pkthdr.flowid = curcpu;
+ rxr->fmp->m_flags |= M_FLOWID;
+#endif
sendmp = rxr->fmp;
rxr->fmp = NULL;
rxr->lmp = NULL;
@@ -4180,11 +4211,16 @@ discard:
** next_to_check is not gonna change.
*/
if (sendmp != NULL) {
- /* Use LRO if possible */
- if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
- /* Pass up to the stack */
- (*ifp->if_input)(ifp, sendmp);
- }
+ /*
+ ** Send to the stack if:
+ ** - LRO not enabled, or
+ ** - no LRO resources, or
+ ** - lro enqueue fails
+ */
+ if ((!rxr->lro_enabled) ||
+ ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
+ (*ifp->if_input)(ifp, sendmp);
+ }
/* Get the next descriptor */
cur = &rxr->rx_base[i];
@@ -4272,28 +4308,17 @@ static void
igb_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 ctrl, rctl, index, vfta;
-
- ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
- ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
-
- /* Setup for Hardware Filter */
- rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- rctl |= E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+ u32 index, bit;
- /* Make entry in the hardware filter table */
- index = ((vtag >> 5) & 0x7F);
- vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
- vfta |= (1 << (vtag & 0x1F));
- E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
-
- /* Update the frame size */
- E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
- adapter->max_frame_size + VLAN_TAG_SIZE);
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ igb_shadow_vfta[index] |= (1 << bit);
+ ++adapter->num_vlans;
+ /* Re-init to load the changes */
+ igb_init(adapter);
}
/*
@@ -4304,25 +4329,56 @@ static void
igb_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 index, vfta;
-
- /* Remove entry in the hardware filter table */
- index = ((vtag >> 5) & 0x7F);
- vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
- vfta &= ~(1 << (vtag & 0x1F));
- E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta);
- /* Have all vlans unregistered? */
- if (adapter->ifp->if_vlantrunk == NULL) {
- u32 rctl;
- /* Turn off the filter table */
- rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
- rctl &= ~E1000_RCTL_VFE;
- rctl |= E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
- /* Reset the frame size */
- E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
- adapter->max_frame_size);
- }
+ u32 index, bit;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ igb_shadow_vfta[index] &= ~(1 << bit);
+ --adapter->num_vlans;
+ /* Re-init to load the changes */
+ igb_init(adapter);
+}
+
+static void
+igb_setup_vlan_hw_support(struct adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ /*
+ ** We get here thru init_locked, meaning
+ ** a soft reset, this has already cleared
+ ** the VFTA and other state, so if there
+ ** have been no vlan's registered do nothing.
+ */
+ if (adapter->num_vlans == 0)
+ return;
+
+ /*
+ ** A soft reset zero's out the VFTA, so
+ ** we need to repopulate it now.
+ */
+ for (int i = 0; i < IGB_VFTA_SIZE; i++)
+ if (igb_shadow_vfta[i] != 0)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
+ i, igb_shadow_vfta[i]);
+
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ /* Enable the Filter Table */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* Update the frame size */
+ E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+ adapter->max_frame_size + VLAN_TAG_SIZE);
}
static void
@@ -4601,19 +4657,19 @@ igb_print_debug_info(struct adapter *adapter)
adapter->hw.fc.high_water,
adapter->hw.fc.low_water);
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
device_printf(dev, "Queue(%d) tdh = %d, tdt = %d\n", i,
E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
- device_printf(dev, "no descriptors avail event = %lld\n",
- (long long)txr->no_desc_avail);
+ device_printf(dev, "no descriptors avail event = %lu\n",
+ txr->no_desc_avail);
device_printf(dev, "TX(%d) MSIX IRQ Handled = %lld\n", txr->me,
(long long)txr->tx_irq);
device_printf(dev, "TX(%d) Packets sent = %lld\n", txr->me,
(long long)txr->tx_packets);
}
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
struct lro_ctrl *lro = &rxr->lro;
device_printf(dev, "Queue(%d) rdh = %d, rdt = %d\n", i,
E1000_READ_REG(&adapter->hw, E1000_RDH(i)),
@@ -4787,106 +4843,172 @@ igb_add_rx_process_limit(struct adapter *adapter, const char *name,
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
-#ifdef IGB_TIMESYNC
+#ifdef IGB_IEEE1588
/*
- * Initialize the Time Sync Feature
- */
+** igb_hwtstamp_ioctl - control hardware time stamping
+**
+** Outgoing time stamping can be enabled and disabled. Play nice and
+** disable it when requested, although it shouldn't case any overhead
+** when no packet needs it. At most one packet in the queue may be
+** marked for time stamping, otherwise it would be impossible to tell
+** for sure to which packet the hardware time stamp belongs.
+**
+** Incoming time stamping has to be configured via the hardware
+** filters. Not all combinations are supported, in particular event
+** type has to be specified. Matching the kind of event packet is
+** not supported, with the exception of "all V2 events regardless of
+** level 2 or 4".
+**
+*/
static int
-igb_tsync_init(struct adapter *adapter)
+igb_hwtstamp_ioctl(struct adapter *adapter, struct ifreq *ifr)
{
- device_t dev = adapter->dev;
- u32 tx_ctl, rx_ctl, val;
-
-
- E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) |
- 20833/PICOSECS_PER_TICK);
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_ctrl *config;
+ u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_ctl_type = 0;
+ u32 tsync_rx_cfg = 0;
+ int is_l4 = 0;
+ int is_l2 = 0;
+ u16 port = 319; /* PTP */
+ u32 regval;
+
+ config = (struct hwtstamp_ctrl *) ifr->ifr_data;
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return (EINVAL);
- adapter->last_stamp = E1000_READ_REG(&adapter->hw, E1000_SYSTIML);
- adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw,
- E1000_SYSTIMH) << 32ULL;
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl_bit = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
+ break;
+ default:
+ return (ERANGE);
+ }
- /* Enable the TX side */
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- tx_ctl |= 0x10;
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
- E1000_WRITE_FLUSH(&adapter->hw);
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl_bit = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * register TSYNCRXCFG must be set, therefore it is not
+ * possible to time stamp both Sync and Delay_Req messages
+ * => fall back to time stamping all packets
+ */
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = 1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = 1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+ is_l2 = 1;
+ is_l4 = 1;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = 1;
+ is_l4 = 1;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- if ((tx_ctl & 0x10) == 0) {
- device_printf(dev, "Failed to enable TX timestamping\n");
- return (ENXIO);
- }
+ /* enable/disable TX */
+ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
- /* Enable RX */
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- rx_ctl |= 0x10; /* Enable the feature */
- rx_ctl |= 0x04; /* This value turns on Ver 1 and 2 */
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
+ /* enable/disable RX, define which PTP packets are time stamped */
+ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
+ regval = (regval & ~0xE) | tsync_rx_ctl_type;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
/*
- * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 (Ethertype)
+ * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
+ * (Ethertype to filter on)
* Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
- * Ethertype Filter Queue Filter[0][31] = 0x1 (Enable Timestamping)
- */
- E1000_WRITE_REG(&adapter->hw, E1000_ETQF(0), 0x440088f7);
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCFG, 0x0);
-
- /*
- * Source Port Queue Filter Setup:
- * this is for UDP port filtering
+ * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
*/
- E1000_WRITE_REG(&adapter->hw, E1000_SPQF(0), TSYNC_PORT);
- /* Protocol = UDP, enable Timestamp, and filter on source/protocol */
- val = (0x11 | (1 << 27) | (6 << 28));
- E1000_WRITE_REG(&adapter->hw, E1000_FTQF(0), val);
+ E1000_WRITE_REG(hw, E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
+
+ /* L4 Queue Filter[0]: only filter by source and destination port */
+ E1000_WRITE_REG(hw, E1000_SPQF0, htons(port));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(0), is_l4 ?
+ ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(0), is_l4 ?
+ (htons(port)
+ | (0<<16) /* immediate interrupt disabled */
+ | 0 /* (1<<17) bit cleared: do not bypass
+ destination port check */)
+ : 0);
+ E1000_WRITE_REG(hw, E1000_FTQF0, is_l4 ?
+ (0x11 /* UDP */
+ | (1<<15) /* VF not compared */
+ | (1<<27) /* Enable Timestamping */
+ | (7<<28) /* only source port filter enabled,
+ source/target address and protocol
+ masked */)
+ : ((1<<15) | (15<<28) /* all mask bits set = filter not
+ enabled */));
+
+ wrfl();
+
+ adapter->hwtstamp_ctrl = config;
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = E1000_READ_REG(hw, E1000_TXSTMPH);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPH);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- if ((rx_ctl & 0x10) == 0) {
- device_printf(dev, "Failed to enable RX timestamping\n");
- return (ENXIO);
- }
-
- device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
-
- return (0);
+ return (error);
}
/*
- * Disable the Time Sync Feature
- */
-static void
-igb_tsync_disable(struct adapter *adapter)
+** igb_read_clock - read raw cycle counter (to be used by time counter)
+*/
+static cycle_t igb_read_clock(const struct cyclecounter *tc)
{
- u32 tx_ctl, rx_ctl;
-
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- tx_ctl &= ~0x10;
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- /* Invalidate TX Timestamp */
- E1000_READ_REG(&adapter->hw, E1000_TXSTMPH);
-
- tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL);
- if (tx_ctl & 0x10)
- HW_DEBUGOUT("Failed to disable TX timestamping\n");
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- rx_ctl &= ~0x10;
-
- E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl);
- E1000_WRITE_FLUSH(&adapter->hw);
-
- /* Invalidate RX Timestamp */
- E1000_READ_REG(&adapter->hw, E1000_RXSATRH);
-
- rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL);
- if (rx_ctl & 0x10)
- HW_DEBUGOUT("Failed to disable RX timestamping\n");
-
- return;
+ struct igb_adapter *adapter =
+ container_of(tc, struct igb_adapter, cycles);
+ struct e1000_hw *hw = &adapter->hw;
+ u64 stamp;
+
+ stamp = E1000_READ_REG(hw, E1000_SYSTIML);
+ stamp |= (u64)E1000_READ_REG(hw, E1000_SYSTIMH) << 32ULL;
+
+ return (stamp);
}
-#endif /* IGB_TIMESYNC */
+#endif /* IGB_IEEE1588 */
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index 025a03c..ddc4d8a 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -184,18 +184,6 @@
#define IGB_FC_PAUSE_TIME 0x0680
#define IGB_EEPROM_APME 0x400;
-/* Code compatilbility between 6 and 7 */
-#ifndef ETHER_BPF_MTAP
-#define ETHER_BPF_MTAP BPF_MTAP
-#endif
-
-#if __FreeBSD_version < 700000
-#define CSUM_TSO 0
-#define IFCAP_TSO4 0
-#define FILTER_STRAY
-#define FILTER_HANDLED
-#endif
-
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
@@ -232,6 +220,8 @@
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define IGB_MAX_SCATTER 64
+#define IGB_VFTA_SIZE 128
+#define IGB_BR_SIZE 4096 /* ring buf size */
#define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */
#define IGB_HDR_BUF 128
@@ -258,39 +248,11 @@
#define IGB_BULK_LATENCY 1200
#define IGB_LINK_ITR 2000
-#ifdef IGB_TIMESYNC
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_PORT 319 /* UDP port for the protocol */
-/* TIMESYNC IOCTL defines */
-#define IGB_TIMESYNC_READTS _IOWR('i', 127, struct igb_tsync_read)
-#define IGB_TIMESTAMP 5 /* A unique return value */
-
-/* Used in the READTS IOCTL */
-struct igb_tsync_read {
- int read_current_time;
- struct timespec system_time;
- u64 network_time;
- u64 rx_stamp;
- u64 tx_stamp;
- u16 seqid;
- unsigned char srcid[6];
- int rx_valid;
- int tx_valid;
-};
-
-#endif /* IGB_TIMESYNC */
-
-struct adapter; /* forward reference */
-
-struct igb_int_delay_info {
- struct adapter *adapter; /* Back-pointer to the adapter struct */
- int offset; /* Register offset to read/write */
- int value; /* Current value in usecs */
-};
-
/*
* Bus dma allocation structure used by
* e1000_dma_malloc and e1000_dma_free.
@@ -322,7 +284,13 @@ struct tx_ring {
u32 next_to_clean;
volatile u16 tx_avail;
struct igb_tx_buffer *tx_buffers;
+#if __FreeBSD_version >= 800000
+ struct buf_ring *br;
+#endif
bus_dma_tag_t txtag; /* dma tag for tx */
+ struct resource *res;
+ void *tag;
+
u32 watchdog_timer;
u64 no_desc_avail;
u64 tx_irq;
@@ -340,6 +308,8 @@ struct rx_ring {
struct igb_dma_alloc rxdma; /* bus_dma glue for tx desc */
union e1000_adv_rx_desc *rx_base;
struct lro_ctrl lro;
+ bool lro_enabled;
+ bool hdr_split;
struct task rx_task; /* cleanup tasklet */
struct mtx rx_mtx;
char mtx_name[16];
@@ -358,6 +328,9 @@ struct rx_ring {
u32 bytes;
u32 eitr_setting;
+ struct resource *res;
+ void *tag;
+
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
@@ -375,9 +348,8 @@ struct adapter {
struct resource *pci_mem;
struct resource *msix_mem;
- struct resource *res[IGB_MSIX_VEC];
- void *tag[IGB_MSIX_VEC];
- int rid[IGB_MSIX_VEC];
+ struct resource *res;
+ void *tag;
u32 eims_mask;
int linkvec;
@@ -395,8 +367,11 @@ struct adapter {
struct task link_task;
struct task rxtx_task;
struct taskqueue *tq; /* private task queue */
+ u16 num_queues;
+
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
+ u32 num_vlans;
/* Management and WOL features */
int wol;
@@ -413,7 +388,6 @@ struct adapter {
*/
struct tx_ring *tx_rings;
u16 num_tx_desc;
- u16 num_tx_queues;
u32 txd_cmd;
/*
@@ -422,7 +396,6 @@ struct adapter {
struct rx_ring *rx_rings;
bool rx_hdr_split;
u16 num_rx_desc;
- u16 num_rx_queues;
int rx_process_limit;
u32 rx_mbuf_sz;
u32 rx_mask;
@@ -439,10 +412,12 @@ struct adapter {
boolean_t in_detach;
-#ifdef IGB_TIMESYNC
- u64 last_stamp;
- u64 last_sec;
- u32 last_ns;
+#ifdef IGB_IEEE1588
+ /* IEEE 1588 precision time support */
+ struct cyclecounter cycles;
+ struct nettimer clock;
+ struct nettime_compare compare;
+ struct hwtstamp_ctrl hwtstamp;
#endif
struct e1000_hw_stats stats;
@@ -483,6 +458,7 @@ struct igb_rx_buffer {
#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
+#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
OpenPOWER on IntegriCloud