summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2009-12-08 01:07:44 +0000
committerjfv <jfv@FreeBSD.org>2009-12-08 01:07:44 +0000
commitfe4debf5259516d09b353ac063fdd9b779ecb35c (patch)
tree1cdfb634e42282edf41b9a28c0d8cbb9d37a57be
parentc78c495a2fdd7fefc465b3b32ac2df27d6b87ed1 (diff)
downloadFreeBSD-src-fe4debf5259516d09b353ac063fdd9b779ecb35c.zip
FreeBSD-src-fe4debf5259516d09b353ac063fdd9b779ecb35c.tar.gz
Resync with Intel versions of both the em and igb
drivers. These add new hardware support, most importantly the pch (i5 chipset) in the em driver. Also, both drivers now have the simplified (and I hope improved) watchdog code. The igb driver uses the new RX cleanup that I first implemented in ixgbe. em - version 6.9.24 igb - version 1.8.4
-rw-r--r--sys/dev/e1000/LICENSE2
-rw-r--r--sys/dev/e1000/e1000_80003es2lan.c149
-rw-r--r--sys/dev/e1000/e1000_80003es2lan.h12
-rw-r--r--sys/dev/e1000/e1000_82541.c35
-rw-r--r--sys/dev/e1000/e1000_82571.c198
-rw-r--r--sys/dev/e1000/e1000_82575.c744
-rw-r--r--sys/dev/e1000/e1000_82575.h75
-rw-r--r--sys/dev/e1000/e1000_api.c15
-rw-r--r--sys/dev/e1000/e1000_defines.h99
-rw-r--r--sys/dev/e1000/e1000_hw.h29
-rw-r--r--sys/dev/e1000/e1000_ich8lan.c834
-rw-r--r--sys/dev/e1000/e1000_ich8lan.h28
-rw-r--r--sys/dev/e1000/e1000_mac.c23
-rw-r--r--sys/dev/e1000/e1000_manage.c10
-rw-r--r--sys/dev/e1000/e1000_osdep.h44
-rw-r--r--sys/dev/e1000/e1000_phy.c757
-rw-r--r--sys/dev/e1000/e1000_phy.h21
-rw-r--r--sys/dev/e1000/e1000_regs.h42
-rw-r--r--sys/dev/e1000/if_em.c586
-rw-r--r--sys/dev/e1000/if_em.h13
-rw-r--r--sys/dev/e1000/if_igb.c1196
-rw-r--r--sys/dev/e1000/if_igb.h34
22 files changed, 3148 insertions, 1798 deletions
diff --git a/sys/dev/e1000/LICENSE b/sys/dev/e1000/LICENSE
index d3f8bf5..f271dae 100644
--- a/sys/dev/e1000/LICENSE
+++ b/sys/dev/e1000/LICENSE
@@ -1,6 +1,6 @@
$FreeBSD$
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_80003es2lan.c b/sys/dev/e1000/e1000_80003es2lan.c
index 5c06086..cee3164 100644
--- a/sys/dev/e1000/e1000_80003es2lan.c
+++ b/sys/dev/e1000/e1000_80003es2lan.c
@@ -171,7 +171,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
break;
}
- nvm->type = e1000_nvm_eeprom_spi;
+ nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
@@ -206,17 +206,22 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_init_mac_params_80003es2lan");
- /* Set media type */
+ /* Set media type and media-dependent function pointers */
switch (hw->device_id) {
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_generic;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_80003es2lan;
break;
}
@@ -230,6 +235,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
mac->arc_subsystem_valid =
(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
? TRUE : FALSE;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = FALSE;
/* Function pointers */
@@ -241,27 +248,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
mac->ops.init_hw = e1000_init_hw_80003es2lan;
/* link setup */
mac->ops.setup_link = e1000_setup_link_generic;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_setup_copper_link_80003es2lan
- : e1000_setup_fiber_serdes_link_generic;
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- mac->ops.check_for_link = e1000_check_for_copper_link_generic;
- break;
- case e1000_media_type_fiber:
- mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
- break;
- case e1000_media_type_internal_serdes:
- mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
- break;
- default:
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- break;
- }
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
/* multicast address update */
@@ -290,8 +276,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan;
-out:
- return ret_val;
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
}
/**
@@ -307,7 +295,6 @@ void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw)
hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan;
hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan;
hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan;
- e1000_get_bus_info_pcie_generic(hw);
}
/**
@@ -342,7 +329,6 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
-
/**
* e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
* @hw: pointer to the HW structure
@@ -532,28 +518,36 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
goto out;
}
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
- * before the device has completed the "Page Select" MDI
- * transaction. So we wait 200us after each MDI command...
- */
- usec_delay(200);
+ if (hw->dev_spec._80003es2lan.mdic_wa_enable == TRUE) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usec_delay(200);
- /* ...and verify the command was successful. */
- ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+ /* ...and verify the command was successful. */
+ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
- if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- ret_val = -E1000_ERR_PHY;
- e1000_release_phy_80003es2lan(hw);
- goto out;
- }
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ ret_val = -E1000_ERR_PHY;
+ e1000_release_phy_80003es2lan(hw);
+ goto out;
+ }
+
+ usec_delay(200);
- usec_delay(200);
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
- ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ usec_delay(200);
+ } else {
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
- usec_delay(200);
e1000_release_phy_80003es2lan(hw);
out:
@@ -599,29 +593,36 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
goto out;
}
+ if (hw->dev_spec._80003es2lan.mdic_wa_enable == TRUE) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usec_delay(200);
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
- * before the device has completed the "Page Select" MDI
- * transaction. So we wait 200us after each MDI command...
- */
- usec_delay(200);
+ /* ...and verify the command was successful. */
+ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
- /* ...and verify the command was successful. */
- ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ ret_val = -E1000_ERR_PHY;
+ e1000_release_phy_80003es2lan(hw);
+ goto out;
+ }
- if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- ret_val = -E1000_ERR_PHY;
- e1000_release_phy_80003es2lan(hw);
- goto out;
- }
+ usec_delay(200);
- usec_delay(200);
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
- ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ usec_delay(200);
+ } else {
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
- usec_delay(200);
e1000_release_phy_80003es2lan(hw);
out:
@@ -802,13 +803,13 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
index = phy_data & GG82563_DSPD_CABLE_LENGTH;
- if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE + 5) {
- ret_val = E1000_ERR_PHY;
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
+ ret_val = -E1000_ERR_PHY;
goto out;
}
phy->min_cable_length = e1000_gg82563_cable_length_table[index];
- phy->max_cable_length = e1000_gg82563_cable_length_table[index+5];
+ phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -916,10 +917,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
- if (ret_val) {
+ if (ret_val)
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
- }
/* Disabling VLAN filtering */
DEBUGOUT("Initializing the IEEE VLAN\n");
@@ -969,6 +969,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
reg_data &= ~0x00100000;
E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+ /* default to TRUE to enable the MDIC W/A */
+ hw->dev_spec._80003es2lan.mdic_wa_enable = TRUE;
+
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT,
+ &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec._80003es2lan.mdic_wa_enable = FALSE;
+ }
+
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
@@ -1303,7 +1316,6 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
E1000_WRITE_REG(hw, E1000_TIPG, tipg);
-
do {
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
&reg_data);
@@ -1357,7 +1369,6 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
E1000_WRITE_REG(hw, E1000_TIPG, tipg);
-
do {
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
&reg_data);
diff --git a/sys/dev/e1000/e1000_80003es2lan.h b/sys/dev/e1000/e1000_80003es2lan.h
index 7bf8d9d..3ab1ec9 100644
--- a/sys/dev/e1000/e1000_80003es2lan.h
+++ b/sys/dev/e1000/e1000_80003es2lan.h
@@ -1,6 +1,6 @@
-/*******************************************************************************
+/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -29,9 +29,8 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-*******************************************************************************/
-/* $FreeBSD$ */
-
+******************************************************************************/
+/*$FreeBSD$*/
#ifndef _E1000_80003ES2LAN_H_
#define _E1000_80003ES2LAN_H_
@@ -49,6 +48,9 @@
#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
diff --git a/sys/dev/e1000/e1000_82541.c b/sys/dev/e1000/e1000_82541.c
index 68d1b05..c58a602 100644
--- a/sys/dev/e1000/e1000_82541.c
+++ b/sys/dev/e1000/e1000_82541.c
@@ -59,6 +59,7 @@ static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw,
static s32 e1000_setup_led_82541(struct e1000_hw *hw);
static s32 e1000_cleanup_led_82541(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw);
+static s32 e1000_read_mac_addr_82541(struct e1000_hw *hw);
static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
bool link_up);
static s32 e1000_phy_init_script_82541(struct e1000_hw *hw);
@@ -261,6 +262,8 @@ static s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82541;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* setup LED */
@@ -1292,3 +1295,35 @@ static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw)
E1000_READ_REG(hw, E1000_MGTPDC);
E1000_READ_REG(hw, E1000_MGTPTC);
}
+
+/**
+ * e1000_read_mac_addr_82541 - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ **/
+static s32 e1000_read_mac_addr_82541(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 offset, nvm_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = i >> 1;
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+ hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+ }
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+ return ret_val;
+}
+
diff --git a/sys/dev/e1000/e1000_82571.c b/sys/dev/e1000/e1000_82571.c
index 18fe745..be59b3b 100644
--- a/sys/dev/e1000/e1000_82571.c
+++ b/sys/dev/e1000/e1000_82571.c
@@ -46,7 +46,6 @@
* 82573E Gigabit Ethernet Controller (Copper)
* 82573L Gigabit Ethernet Controller
* 82574L Gigabit Network Connection
- * 82574L Gigabit Network Connection
* 82583V Gigabit Network Connection
*/
@@ -106,7 +105,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->reset_delay_us = 100;
phy->ops.acquire = e1000_get_hw_semaphore_82571;
- phy->ops.check_polarity = e1000_check_polarity_igp;
phy->ops.check_reset_block = e1000_check_reset_block_generic;
phy->ops.release = e1000_put_hw_semaphore_82571;
phy->ops.reset = e1000_phy_hw_reset_generic;
@@ -121,6 +119,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->type = e1000_phy_igp_2;
phy->ops.get_cfg_done = e1000_get_cfg_done_82571;
phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.read_reg = e1000_read_phy_reg_igp;
@@ -132,6 +131,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
/* Verify PHY ID */
if (phy->id != IGP01E1000_I_PHY_ID) {
ret_val = -E1000_ERR_PHY;
+ DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id);
goto out;
}
break;
@@ -139,6 +139,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->type = e1000_phy_m88;
phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.commit = e1000_phy_sw_reset_generic;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
phy->ops.get_cable_length = e1000_get_cable_length_m88;
@@ -155,11 +156,12 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
goto out;
}
break;
- case e1000_82583:
case e1000_82574:
+ case e1000_82583:
phy->type = e1000_phy_bm;
phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.commit = e1000_phy_sw_reset_generic;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
phy->ops.get_cable_length = e1000_get_cable_length_m88;
@@ -266,28 +268,42 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val = E1000_SUCCESS;
u32 swsm = 0;
u32 swsm2 = 0;
bool force_clear_smbi = FALSE;
DEBUGFUNC("e1000_init_mac_params_82571");
- /* Set media type */
+ /* Set media type and media-dependent function pointers */
switch (hw->device_id) {
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
hw->phy.media_type = e1000_media_type_fiber;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
break;
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_SERDES_DUAL:
case E1000_DEV_ID_82571EB_SERDES_QUAD:
case E1000_DEV_ID_82572EI_SERDES:
hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_82571;
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_copper_generic;
break;
}
@@ -301,58 +317,19 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->arc_subsystem_valid =
(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
? TRUE : FALSE;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = TRUE;
/* Function pointers */
/* bus type/speed/width */
mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
- /* function id */
- switch (hw->mac.type) {
- case e1000_82573:
- case e1000_82574:
- case e1000_82583:
- mac->ops.set_lan_id = e1000_set_lan_id_single_port;
- break;
- default:
- break;
- }
/* reset */
mac->ops.reset_hw = e1000_reset_hw_82571;
/* hw initialization */
mac->ops.init_hw = e1000_init_hw_82571;
/* link setup */
mac->ops.setup_link = e1000_setup_link_82571;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_setup_copper_link_82571
- : e1000_setup_fiber_serdes_link_82571;
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
- mac->ops.check_for_link = e1000_check_for_copper_link_generic;
- break;
- case e1000_media_type_fiber:
- mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
- break;
- case e1000_media_type_internal_serdes:
- mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
- break;
- default:
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- break;
- }
- /* check management mode */
- switch (hw->mac.type) {
- case e1000_82574:
- case e1000_82583:
- mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
- break;
- default:
- mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
- break;
- }
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* writing VFTA */
@@ -371,24 +348,29 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->ops.setup_led = e1000_setup_led_generic;
/* cleanup LED */
mac->ops.cleanup_led = e1000_cleanup_led_generic;
- /* turn on/off LED */
+ /* turn off LED */
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82571;
+
+ /* MAC-specific function pointers */
switch (hw->mac.type) {
+ case e1000_82573:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ mac->ops.led_on = e1000_led_on_generic;
+ break;
case e1000_82574:
case e1000_82583:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
mac->ops.led_on = e1000_led_on_82574;
break;
default:
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
mac->ops.led_on = e1000_led_on_generic;
break;
}
- mac->ops.led_off = e1000_led_off_generic;
- /* clear hardware counters */
- mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82571;
- /* link info */
- mac->ops.get_link_up_info =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_get_speed_and_duplex_copper_generic
- : e1000_get_speed_and_duplex_fiber_serdes_generic;
/*
* Ensure that the inter-port SWSM.SMBI lock bit is clear before
@@ -434,8 +416,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
*/
hw->dev_spec._82571.smb_counter = 0;
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -501,7 +482,6 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
ret_val = -E1000_ERR_PHY;
break;
}
-
out:
return ret_val;
}
@@ -512,7 +492,7 @@ out:
*
* Acquire the HW semaphore to access the PHY or NVM
**/
-s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
s32 ret_val = E1000_SUCCESS;
@@ -577,7 +557,7 @@ out:
*
* Release hardware semaphore used to access the PHY or NVM
**/
-void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
@@ -610,9 +590,9 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
goto out;
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
break;
default:
ret_val = e1000_acquire_nvm_generic(hw);
@@ -831,7 +811,8 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_get_cfg_done_82571");
while (timeout) {
- if (E1000_READ_REG(hw, E1000_EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) &
+ E1000_NVM_CFG_DONE_PORT_0)
break;
msec_delay(1);
timeout--;
@@ -966,9 +947,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
* Ownership defaults to firmware after a reset.
*/
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@@ -1014,9 +995,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
msec_delay(25);
break;
default:
@@ -1061,10 +1042,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
- if (ret_val) {
+ if (ret_val)
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
- }
/* Disabling VLAN filtering */
DEBUGOUT("Initializing the IEEE VLAN\n");
@@ -1097,9 +1077,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
e1000_enable_tx_pkt_filtering_generic(hw);
reg_data = E1000_READ_REG(hw, E1000_GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
@@ -1108,8 +1088,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
default:
reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC;
E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
break;
}
@@ -1178,11 +1158,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
}
/* Device Control */
-
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~(1 << 29);
E1000_WRITE_REG(hw, E1000_CTRL, reg);
@@ -1193,9 +1172,9 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
/* Extended Device Control */
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~(1 << 23);
reg |= (1 << 22);
@@ -1205,7 +1184,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
break;
}
-
if (hw->mac.type == e1000_82571) {
reg = E1000_READ_REG(hw, E1000_PBA_ECC);
reg |= E1000_PBA_ECC_CORR_EN;
@@ -1216,7 +1194,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
-
if ((hw->mac.type == e1000_82571) ||
(hw->mac.type == e1000_82572)) {
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
@@ -1225,13 +1202,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
}
/* PCI-Ex Control Registers */
-
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
reg = E1000_READ_REG(hw, E1000_GCR);
reg |= (1 << 22);
E1000_WRITE_REG(hw, E1000_GCR, reg);
+
/*
* Workaround for hardware errata.
* apply workaround for hardware errata documented in errata
@@ -1267,39 +1244,36 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_clear_vfta_82571");
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
if (hw->mng_cookie.vlan_id != 0) {
/*
- *The VFTA is a 4096b bit-field, each identifying
- *a single VLAN ID. The following operations
- *determine which 32b entry (i.e. offset) into the
- *array we want to set the VLAN ID (i.e. bit) of
- *the manageability unit.
- */
+ * The VFTA is a 4096b bit-field, each identifying
+ * a single VLAN ID. The following operations
+ * determine which 32b entry (i.e. offset) into the
+ * array we want to set the VLAN ID (i.e. bit) of
+ * the manageability unit.
+ */
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
-
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /*
- *If the offset we want to clear is the same offset of
- *the manageability VLAN ID, then clear all bits except
- *that of the manageability unit
- */
- vfta_value = (offset == vfta_offset) ?
- vfta_bit_in_reg : 0;
- E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset,
- vfta_value);
- E1000_WRITE_FLUSH(hw);
- }
break;
default:
break;
}
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /*
+ * If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of
+ * the manageability unit.
+ */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH(hw);
+ }
}
/**
@@ -1369,9 +1343,9 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
* set it to full.
*/
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
if (hw->fc.requested_mode == e1000_fc_default)
hw->fc.requested_mode = e1000_fc_full;
break;
@@ -1460,7 +1434,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
* Reports the link state as up or down.
*
* If autonegotiation is supported by the link partner, the link state is
- * determined by the result of autongotiation. This is the most likely case.
+ * determined by the result of autonegotiation. This is the most likely case.
* If autonegotiation is not supported by the link partner, and the link
* has a valid signal, force the link up.
*
@@ -1472,7 +1446,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
* 4) forced_up (the link has been forced up, it did not autonegotiate)
*
**/
-s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 rxcw;
@@ -1524,9 +1498,10 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
case e1000_serdes_link_autoneg_progress:
if (rxcw & E1000_RXCW_C) {
- /* We received /C/ ordered sets, meaning the
+ /*
+ * We received /C/ ordered sets, meaning the
* link partner has autonegotiated, and we can
- * trust the Link Up (LU) status bit
+ * trust the Link Up (LU) status bit.
*/
if (status & E1000_STATUS_LU) {
mac->serdes_link_state =
@@ -1534,13 +1509,14 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
DEBUGOUT("AN_PROG -> AN_UP\n");
mac->serdes_has_link = TRUE;
} else {
- /* Autoneg completed, but failed */
+ /* Autoneg completed, but failed. */
mac->serdes_link_state =
e1000_serdes_link_down;
DEBUGOUT("AN_PROG -> DOWN\n");
}
} else {
- /* The link partner did not autoneg.
+ /*
+ * The link partner did not autoneg.
* Force link up and full duplex, and change
* state to forced.
*/
@@ -1565,9 +1541,11 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
case e1000_serdes_link_down:
default:
- /* The link was down but the receiver has now gained
+ /*
+ * The link was down but the receiver has now gained
* valid sync, so lets see if we can bring the link
- * up. */
+ * up.
+ */
E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
E1000_WRITE_REG(hw, E1000_CTRL,
(ctrl & ~E1000_CTRL_SLU));
@@ -1583,9 +1561,9 @@ s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
DEBUGOUT("ANYSTATE -> DOWN\n");
} else {
/*
- * We have sync, and can tolerate one
- * invalid (IV) codeword before declaring
- * link down, so reread to look again
+ * We have sync, and can tolerate one invalid (IV)
+ * codeword before declaring link down, so reread
+ * to look again.
*/
usec_delay(10);
rxcw = E1000_READ_REG(hw, E1000_RXCW);
@@ -1621,15 +1599,15 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
}
switch (hw->mac.type) {
+ case e1000_82573:
case e1000_82574:
case e1000_82583:
- case e1000_82573:
- if(*data == ID_LED_RESERVED_F746)
+ if (*data == ID_LED_RESERVED_F746)
*data = ID_LED_DEFAULT_82573;
break;
default:
if (*data == ID_LED_RESERVED_0000 ||
- *data == ID_LED_RESERVED_FFFF)
+ *data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT;
break;
}
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index 2f8e8ed..5c877c2 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -59,16 +59,20 @@ static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 data);
static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
bool active);
static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
-static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
u32 offset, u16 data);
static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
-static s32 e1000_configure_pcs_link_82575(struct e1000_hw *hw);
static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
u16 *speed, u16 *duplex);
static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
@@ -77,9 +81,15 @@ static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
-void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
+static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+static const u16 e1000_82580_rxpbs_table[] =
+ { 36, 72, 144, 1, 2, 4, 8, 16,
+ 35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
/**
* e1000_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
@@ -94,11 +104,11 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
goto out;
- } else {
- phy->ops.power_up = e1000_power_up_phy_copper;
- phy->ops.power_down = e1000_power_down_phy_copper_82575;
}
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
@@ -112,6 +122,11 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
+ } else if ((hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_82580er)) {
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.read_reg = e1000_read_phy_reg_82580;
+ phy->ops.write_reg = e1000_write_phy_reg_82580;
} else {
phy->ops.reset = e1000_phy_hw_reset_generic;
phy->ops.read_reg = e1000_read_phy_reg_igp;
@@ -140,6 +155,13 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
break;
+ case I82580_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ break;
default:
ret_val = -E1000_ERR_PHY;
goto out;
@@ -192,7 +214,7 @@ static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = 1 << size;
/* Function Pointers */
nvm->ops.acquire = e1000_acquire_nvm_82575;
@@ -230,24 +252,41 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
dev_spec->sgmii_active = FALSE;
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- if ((ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) ==
- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES) {
- hw->phy.media_type = e1000_media_type_internal_serdes;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
- } else if (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII) {
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
dev_spec->sgmii_active = TRUE;
ctrl_ext |= E1000_CTRL_I2C_ENA;
- } else {
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ break;
+ default:
ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ break;
}
+
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ /*
+ * if using i2c make certain the MDICNFG register is cleared to prevent
+ * communications from being misrouted to the mdic registers
+ */
+ if ((ctrl_ext & E1000_CTRL_I2C_ENA) &&
+ ((hw->mac.type == e1000_82580) || (hw->mac.type == e1000_82580er)))
+ E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
+
/* Set mta register count */
mac->mta_reg_count = 128;
+ /* Set uta register count */
+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
if (mac->type == e1000_82576)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ if ((mac->type == e1000_82580) || (mac->type == e1000_82580er))
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
/* Set if manageability features are enabled. */
@@ -260,6 +299,9 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* bus type/speed/width */
mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
/* reset */
+ if ((mac->type == e1000_82580) || (mac->type == e1000_82580er))
+ mac->ops.reset_hw = e1000_reset_hw_82580;
+ else
mac->ops.reset_hw = e1000_reset_hw_82575;
/* hw initialization */
mac->ops.init_hw = e1000_init_hw_82575;
@@ -269,9 +311,9 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
mac->ops.setup_physical_interface =
(hw->phy.media_type == e1000_media_type_copper)
? e1000_setup_copper_link_82575
- : e1000_setup_fiber_serdes_link_82575;
+ : e1000_setup_serdes_link_82575;
/* physical interface shutdown */
- mac->ops.shutdown_serdes = e1000_shutdown_fiber_serdes_link_82575;
+ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_82575;
/* receive address register setting */
@@ -302,6 +344,9 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
return E1000_SUCCESS;
}
@@ -334,6 +379,10 @@ static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
return e1000_acquire_swfw_sync_82575(hw, mask);
}
@@ -352,6 +401,10 @@ static void e1000_release_phy_82575(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
e1000_release_swfw_sync_82575(hw, mask);
}
@@ -368,47 +421,25 @@ static void e1000_release_phy_82575(struct e1000_hw *hw)
static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
u16 *data)
{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, i2ccmd = 0;
+ s32 ret_val = -E1000_ERR_PARAM;
DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
DEBUGOUT1("PHY Address %u is out of range\n", offset);
- return -E1000_ERR_PARAM;
+ goto out;
}
- /*
- * Set up Op-code, Phy Address, and register address in the I2CCMD
- * register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
- (E1000_I2CCMD_OPCODE_READ));
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
- /* Poll the ready bit to see if the I2C read completed */
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- usec_delay(50);
- i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY)
- break;
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- DEBUGOUT("I2CCMD Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- DEBUGOUT("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
+ hw->phy.ops.release(hw);
- /* Need to byte-swap the 16-bit value. */
- *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
-
- return E1000_SUCCESS;
+out:
+ return ret_val;
}
/**
@@ -423,49 +454,25 @@ static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
u16 data)
{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, i2ccmd = 0;
- u16 phy_data_swapped;
+ s32 ret_val = -E1000_ERR_PARAM;
DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
DEBUGOUT1("PHY Address %d is out of range\n", offset);
- return -E1000_ERR_PARAM;
+ goto out;
}
- /* Swap the data bytes for the I2C interface */
- phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
- /*
- * Set up Op-code, Phy Address, and register address in the I2CCMD
- * register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE |
- phy_data_swapped);
-
- E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
-
- /* Poll the ready bit to see if the I2C read completed */
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- usec_delay(50);
- i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY)
- break;
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- DEBUGOUT("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- DEBUGOUT("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
+ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
- return E1000_SUCCESS;
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
}
/**
@@ -480,6 +487,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
u16 phy_id;
+ u32 ctrl_ext;
DEBUGFUNC("e1000_get_phy_id_82575");
@@ -490,12 +498,19 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
* work. The result of this function should mean phy->phy_addr
* and phy->id are set correctly.
*/
- if (!(e1000_sgmii_active_82575(hw))) {
+ if (!e1000_sgmii_active_82575(hw)) {
phy->addr = 1;
ret_val = e1000_get_phy_id(hw);
goto out;
}
+ /* Power on sgmii phy if it is disabled */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(300);
+
/*
* The address field in the I2CCMD register is 3 bits and 0 is invalid.
* Therefore, we need to test 1-7
@@ -522,10 +537,12 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
if (phy->addr == 8) {
phy->addr = 0;
ret_val = -E1000_ERR_PHY;
- goto out;
+ } else {
+ ret_val = e1000_get_phy_id(hw);
}
- ret_val = e1000_get_phy_id(hw);
+ /* restore previous sfp cage power state */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
out:
return ret_val;
@@ -792,21 +809,23 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_NVM_CFG_DONE_PORT_1;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_NVM_CFG_DONE_PORT_2;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_NVM_CFG_DONE_PORT_3;
while (timeout) {
if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
break;
msec_delay(1);
timeout--;
}
- if (!timeout) {
+ if (!timeout)
DEBUGOUT("MNG configuration cycle has not completed.\n");
- }
/* If EEPROM is not marked present, init the PHY manually */
if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
- (hw->phy.type == e1000_phy_igp_3)) {
+ (hw->phy.type == e1000_phy_igp_3))
e1000_phy_init_script_igp3(hw);
- }
return ret_val;
}
@@ -828,14 +847,12 @@ static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
DEBUGFUNC("e1000_get_link_up_info_82575");
- if (hw->phy.media_type != e1000_media_type_copper ||
- e1000_sgmii_active_82575(hw)) {
+ if (hw->phy.media_type != e1000_media_type_copper)
ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
duplex);
- } else {
+ else
ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
duplex);
- }
return ret_val;
}
@@ -854,9 +871,7 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_for_link_82575");
- /* SGMII link check is done through the PCS register. */
- if ((hw->phy.media_type != e1000_media_type_copper) ||
- (e1000_sgmii_active_82575(hw))) {
+ if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
&duplex);
/*
@@ -930,22 +945,28 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
}
/**
- * e1000_shutdown_fiber_serdes_link_82575 - Remove link during power down
+ * e1000_shutdown_serdes_link_82575 - Remove link during power down
* @hw: pointer to the HW structure
*
- * In the case of fiber serdes shut down optics and PCS on driver unload
+ * In the case of serdes shut down sfp and PCS on driver unload
* when management pass thru is not enabled.
**/
-void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
u16 eeprom_data = 0;
- if (hw->phy.media_type != e1000_media_type_internal_serdes)
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
return;
if (hw->bus.func == E1000_FUNC_0)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ else if ((hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_82580er))
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &eeprom_data);
else if (hw->bus.func == E1000_FUNC_1)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -962,10 +983,10 @@ void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
/* shutdown the laser */
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg |= E1000_CTRL_EXT_SDP7_DATA;
+ reg |= E1000_CTRL_EXT_SDP3_DATA;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
- /* flush the write to verfiy completion */
+ /* flush the write to verify completion */
E1000_WRITE_FLUSH(hw);
msec_delay(1);
}
@@ -974,45 +995,6 @@ void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
}
/**
- * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
- * @hw: pointer to the HW structure
- * @enable: state to enter, either enabled or disabled
- *
- * enables/disables L2 switch loopback functionality
- **/
-void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
-{
- u32 reg;
-
- reg = E1000_READ_REG(hw, E1000_DTXSWC);
- if (enable)
- reg |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- else
- reg &= ~(E1000_DTXSWC_VMDQ_LOOPBACK_EN);
- E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
-}
-
-/**
- * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
- * @hw: pointer to the HW structure
- * @enable: state to enter, either enabled or disabled
- *
- * enables/disables replication of packets across multiple pools
- **/
-void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
-{
- u32 reg;
-
- reg = E1000_READ_REG(hw, E1000_VT_CTL);
- if (enable)
- reg |= E1000_VT_CTL_VM_REPL_EN;
- else
- reg &= ~(E1000_VT_CTL_VM_REPL_EN);
-
- E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
-}
-
-/**
* e1000_reset_hw_82575 - Reset hardware
* @hw: pointer to the HW structure
*
@@ -1111,6 +1093,11 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /* Zero out the Unicast HASH table */
+ DEBUGOUT("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
/* Setup link and flow control */
ret_val = mac->ops.setup_link(hw);
@@ -1137,7 +1124,6 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
- bool link;
DEBUGFUNC("e1000_setup_copper_link_82575");
@@ -1146,6 +1132,20 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ ret_val = e1000_setup_serdes_link_82575(hw);
+ if (ret_val)
+ goto out;
+
+ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+ /* allow time for SFP cage time to power up phy */
+ msec_delay(300);
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
switch (hw->phy.type) {
case e1000_phy_m88:
ret_val = e1000_copper_link_setup_m88(hw);
@@ -1153,6 +1153,9 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
case e1000_phy_igp_3:
ret_val = e1000_copper_link_setup_igp(hw);
break;
+ case e1000_phy_82580:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ break;
default:
ret_val = -E1000_ERR_PHY;
break;
@@ -1161,66 +1164,30 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
if (ret_val)
goto out;
- if (hw->mac.autoneg) {
- /*
- * Setup autoneg and flow control advertisement
- * and perform autonegotiation.
- */
- ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val)
- goto out;
- } else {
- /*
- * PHY will be set to 10H, 10F, 100H or 100F
- * depending on user settings.
- */
- DEBUGOUT("Forcing Speed and Duplex\n");
- ret_val = hw->phy.ops.force_speed_duplex(hw);
- if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
- goto out;
- }
- }
-
- ret_val = e1000_configure_pcs_link_82575(hw);
- if (ret_val)
- goto out;
-
- /*
- * Check link status. Wait up to 100 microseconds for link to become
- * valid.
- */
- ret_val = e1000_phy_has_link_generic(hw,
- COPPER_LINK_UP_LIMIT,
- 10,
- &link);
- if (ret_val)
- goto out;
-
- if (link) {
- DEBUGOUT("Valid link established!!!\n");
- /* Config the MAC and PHY after link is up */
- e1000_config_collision_dist_generic(hw);
- ret_val = e1000_config_fc_after_link_up_generic(hw);
- } else {
- DEBUGOUT("Unable to establish link!!!\n");
- }
-
+ ret_val = e1000_setup_copper_link_generic(hw);
out:
return ret_val;
}
/**
- * e1000_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes
+ * e1000_setup_serdes_link_82575 - Setup link for serdes
* @hw: pointer to the HW structure
*
- * Configures speed and duplex for fiber and serdes links.
+ * Configure the physical coding sub-layer (PCS) link. The PCS link is
+ * used on copper connections where the serialized gigabit media independent
+ * interface (sgmii), or serdes fiber is being used. Configures the link
+ * for auto-negotiation or forces speed/duplex.
**/
-static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
{
- u32 reg;
+ u32 ctrl_ext, ctrl_reg, reg;
+ bool pcs_autoneg;
+
+ DEBUGFUNC("e1000_setup_serdes_link_82575");
- DEBUGFUNC("e1000_setup_fiber_serdes_link_82575");
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return E1000_SUCCESS;
/*
* On the 82575, SerDes loopback mode persists until it is
@@ -1230,25 +1197,54 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
*/
E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
- /* Force link up, set 1gb */
- reg = E1000_READ_REG(hw, E1000_CTRL);
- reg |= E1000_CTRL_SLU | E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD;
+ /* power on the sfp cage if present */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg |= E1000_CTRL_SLU;
+
if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
/* set both sw defined pins */
- reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+ /* Set switch control to serdes energy detect */
+ reg = E1000_READ_REG(hw, E1000_CONNSW);
+ reg |= E1000_CONNSW_ENRGSRC;
+ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
}
- E1000_WRITE_REG(hw, E1000_CTRL, reg);
- /* Power on phy for 82576 fiber adapters */
- if (hw->mac.type == e1000_82576) {
- reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
- reg &= ~E1000_CTRL_EXT_SDP7_DATA;
- E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+ /* default pcs_autoneg to the same setting as mac autoneg */
+ pcs_autoneg = hw->mac.autoneg;
+
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* sgmii mode lets the phy handle forcing speed/duplex */
+ pcs_autoneg = TRUE;
+ /* autoneg time out should be disabled for SGMII mode */
+ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ /* disable PCS autoneg and support parallel detect only */
+ pcs_autoneg = FALSE;
+ default:
+ /*
+ * non-SGMII modes only supports a speed of 1000/Full for the
+ * link so it is best to just force the MAC and let the pcs
+ * link either autoneg or be forced to 1000/Full
+ */
+ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+ /* set speed of 1000/Full if speed/duplex is forced */
+ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+ break;
}
- /* Set switch control to serdes energy detect */
- reg = E1000_READ_REG(hw, E1000_CONNSW);
- reg |= E1000_CONNSW_ENRGSRC;
- E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
/*
* New SerDes mode allows for forcing speed or autonegotiating speed
@@ -1256,35 +1252,34 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
* mode that will be compatible with older link partners and switches.
* However, both are supported by the hardware and some drivers/tools.
*/
- reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
-
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
- if (hw->mac.autoneg) {
+ /*
+ * We force flow control to prevent the CTRL register values from being
+ * overwritten by the autonegotiated flow control values
+ */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+ if (pcs_autoneg) {
/* Set PCS register for autoneg */
- reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
- E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
- E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
- E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
- DEBUGOUT1("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
+ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
- /* Set PCS register for forced speed */
- reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
- E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
- E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
- E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
- DEBUGOUT1("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
- }
+ /* Set PCS register for forced link */
+ reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
+ E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
+ E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
- if (hw->mac.type == e1000_82576) {
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
- e1000_force_mac_fc_generic(hw);
+ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+ if (!e1000_sgmii_active_82575(hw))
+ e1000_force_mac_fc_generic(hw);
+
return E1000_SUCCESS;
}
@@ -1324,72 +1319,6 @@ out:
}
/**
- * e1000_configure_pcs_link_82575 - Configure PCS link
- * @hw: pointer to the HW structure
- *
- * Configure the physical coding sub-layer (PCS) link. The PCS link is
- * only used on copper connections where the serialized gigabit media
- * independent interface (sgmii) is being used. Configures the link
- * for auto-negotiation or forces speed/duplex.
- **/
-static s32 e1000_configure_pcs_link_82575(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- u32 reg = 0;
-
- DEBUGFUNC("e1000_configure_pcs_link_82575");
-
- if (hw->phy.media_type != e1000_media_type_copper ||
- !(e1000_sgmii_active_82575(hw)))
- goto out;
-
- /* For SGMII, we need to issue a PCS autoneg restart */
- reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
-
- /* AN time out should be disabled for SGMII mode */
- reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
-
- if (mac->autoneg) {
- /* Make sure forced speed and force link are not set */
- reg &= ~(E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
-
- /*
- * The PHY should be setup prior to calling this function.
- * All we need to do is restart autoneg and enable autoneg.
- */
- reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE;
- } else {
- /* Set PCS register for forced speed */
-
- /* Turn off bits for full duplex, speed, and autoneg */
- reg &= ~(E1000_PCS_LCTL_FSV_1000 |
- E1000_PCS_LCTL_FSV_100 |
- E1000_PCS_LCTL_FDV_FULL |
- E1000_PCS_LCTL_AN_ENABLE);
-
- /* Check for duplex first */
- if (mac->forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
- reg |= E1000_PCS_LCTL_FDV_FULL;
-
- /* Now set speed */
- if (mac->forced_speed_duplex & E1000_ALL_100_SPEED)
- reg |= E1000_PCS_LCTL_FSV_100;
-
- /* Force speed and force link */
- reg |= E1000_PCS_LCTL_FSD |
- E1000_PCS_LCTL_FORCE_LINK |
- E1000_PCS_LCTL_FLV_LINK_UP;
-
- DEBUGOUT1("Wrote 0x%08X to PCS_LCTL to configure forced link\n",
- reg);
- }
- E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
-
-out:
- return E1000_SUCCESS;
-}
-
-/**
* e1000_sgmii_active_82575 - Return sgmii state
* @hw: pointer to the HW structure
*
@@ -1548,7 +1477,8 @@ static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
E1000_READ_REG(hw, E1000_LENERRS);
/* This register should not be read in copper configurations */
- if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+ e1000_sgmii_active_82575(hw))
E1000_READ_REG(hw, E1000_SCVPC);
}
@@ -1677,3 +1607,235 @@ out:
return ret_val;
}
+/**
+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+
+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+}
+
+/**
+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ * e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ u32 mdicnfg = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * We config the phy address in MDICNFG register now. Same bits
+ * as before. The values in MDIC can be written but will be
+ * ignored. This allows us to call the old function after
+ * configuring the PHY address in the new register
+ */
+ mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ u32 mdicnfg = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * We config the phy address in MDICNFG register now. Same bits
+ * as before. The values in MDIC can be written but will be
+ * ignored. This allows us to call the old function after
+ * configuring the PHY address in the new register
+ */
+ mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+/**
+ * e1000_reset_hw_82580 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets function or entire device (all ports, etc.)
+ * to a known state.
+ **/
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ /* BH SW mailbox bit in SW_FW_SYNC */
+ u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+ u32 ctrl, icr;
+ bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+ DEBUGFUNC("e1000_reset_hw_82580");
+
+ hw->dev_spec._82575.global_device_reset = FALSE;
+
+ /* Get current control state. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Determine whether or not a global dev reset is requested */
+ if (global_device_reset &&
+ e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
+ global_device_reset = FALSE;
+
+ if (global_device_reset &&
+ !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+ ctrl |= E1000_CTRL_DEV_RST;
+ else
+ ctrl |= E1000_CTRL_RST;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Add delay to insure DEV_RST has time to complete */
+ if (global_device_reset)
+ msec_delay(5);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+ if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+ e1000_reset_init_script_82575(hw);
+
+ /* clear global device reset status bit */
+ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ icr = E1000_READ_REG(hw, E1000_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ /* Release semaphore */
+ if (global_device_reset)
+ e1000_release_swfw_sync_82575(hw, swmbsw_mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
+ * @data: data received by reading RXPBS register
+ *
+ * The 82580 uses a table based approach for packet buffer allocation sizes.
+ * This function converts the retrieved value into the correct table value
+ * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ * 0x0 36 72 144 1 2 4 8 16
+ * 0x8 35 70 140 rsv rsv rsv rsv rsv
+ */
+u16 e1000_rxpbs_adjust_82580(u32 data)
+{
+ u16 ret_val = 0;
+
+ if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ ret_val = e1000_82580_rxpbs_table[data];
+
+ return ret_val;
+}
+/**
+ * e1000_erfuse_check_82580 - ER Fuse check
+ * @hw: pointer to the HW structure
+ *
+ * This function returns the status of the ER Fuse
+ **/
+s32 e1000_erfuse_check_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ s32 ufuse_reg;
+
+ ufuse_reg = E1000_READ_REG(hw, E1000_UFUSE);
+ if ((ufuse_reg & E1000_ERFUSE) == E1000_ERFUSE)
+ ret_val = E1000_ERFUSE_FAILURE;
+
+ return ret_val;
+}
diff --git a/sys/dev/e1000/e1000_82575.h b/sys/dev/e1000/e1000_82575.h
index 34e0d29..f22a963f 100644
--- a/sys/dev/e1000/e1000_82575.h
+++ b/sys/dev/e1000/e1000_82575.h
@@ -51,10 +51,14 @@
*/
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_SW_SYNCH_MB 0x00000100
+#define E1000_STAT_DEV_RST_SET 0x00100000
+#define E1000_CTRL_DEV_RST 0x20000000
#ifdef E1000_BIT_FIELDS
struct e1000_adv_data_desc {
- u64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
union {
u32 data;
struct {
@@ -128,6 +132,7 @@ struct e1000_adv_context_desc {
#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_SRRCTL_DROP_EN 0x80000000
#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
@@ -142,6 +147,7 @@ struct e1000_adv_context_desc {
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
@@ -185,31 +191,31 @@ struct e1000_adv_context_desc {
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
- u64 pkt_addr; /* Packet buffer address */
- u64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
- u32 data;
+ __le32 data;
struct {
- u16 pkt_info; /* RSS type, Packet type */
- u16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ __le16 hdr_info; /* Split Header,
+ * header buffer len*/
} hs_rss;
} lo_dword;
union {
- u32 rss; /* RSS Hash */
+ __le32 rss; /* RSS Hash */
struct {
- u16 ip_id; /* IP id */
- u16 csum; /* Packet Checksum */
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
- u32 status_error; /* ext status/error */
- u16 length; /* Packet length */
- u16 vlan; /* VLAN tag */
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
@@ -220,6 +226,8 @@ union e1000_adv_rx_desc {
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
#define E1000_RXDADV_SPH 0x8000
+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define E1000_RXDADV_ERR_HBO 0x00800000
/* RSS Hash results */
@@ -269,14 +277,14 @@ union e1000_adv_rx_desc {
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
- u64 buffer_addr; /* Address of descriptor's data buf */
- u32 cmd_type_len;
- u32 olinfo_status;
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
} read;
struct {
- u64 rsvd; /* Reserved */
- u32 nxtseq_seed;
- u32 status;
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
} wb;
};
@@ -303,10 +311,10 @@ union e1000_adv_tx_desc {
/* Context descriptors */
struct e1000_adv_tx_context_desc {
- u32 vlan_macip_lens;
- u32 seqnum_seed;
- u32 type_tucmd_mlhl;
- u32 mss_l4len_idx;
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
@@ -378,6 +386,14 @@ struct e1000_adv_tx_context_desc {
*/
#define E1000_ETQF_FILTER_EAPOL 0
+#define E1000_FTQF_VF_BP 0x00008000
+#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+#define E1000_FTQF_MASK 0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
#define E1000_NVM_APME_82575 0x0400
#define MAX_NUM_VFS 8
@@ -416,6 +432,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
#define E1000_IOVCTL 0x05BBC
@@ -424,8 +443,18 @@ struct e1000_adv_tx_context_desc {
#define E1000_RPLOLR_STRVLAN 0x40000000
#define E1000_RPLOLR_STRCRC 0x80000000
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
+
#define ALL_QUEUES 0xFFFF
+/* RX packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_erfuse_check_82580(struct e1000_hw *);
#endif /* _E1000_82575_H_ */
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index 8188658..154eff9 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -232,6 +232,7 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_ICH8_IGP_M_AMT:
case E1000_DEV_ID_ICH8_IGP_AMT:
case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_82567V_3:
mac->type = e1000_ich8lan;
break;
case E1000_DEV_ID_ICH9_IFE:
@@ -269,9 +270,21 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
case E1000_DEV_ID_82576_SERDES_QUAD:
mac->type = e1000_82576;
break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_82580_ER:
+ case E1000_DEV_ID_82580_ER_DUAL:
+ mac->type = e1000_82580er;
+ break;
default:
/* Should never have loaded on this device */
ret_val = -E1000_ERR_MAC_INIT;
@@ -362,6 +375,8 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
break;
case e1000_82575:
case e1000_82576:
+ case e1000_82580:
+ case e1000_82580er:
e1000_init_function_pointers_82575(hw);
break;
default:
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index d845fb2..4636506 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -146,12 +146,12 @@
#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
-#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
-#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
@@ -161,6 +161,8 @@
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
@@ -386,6 +388,8 @@
#define E1000_SWFW_PHY0_SM 0x02
#define E1000_SWFW_PHY1_SM 0x04
#define E1000_SWFW_CSR_SM 0x08
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
/* FACTPS Definitions */
#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */
@@ -697,6 +701,7 @@
/* Extended Configuration Control and Size */
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
@@ -769,6 +774,7 @@
#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
#define E1000_ICR_MNG 0x00040000 /* Manageability event */
#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver
* should claim the interrupt */
#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
@@ -789,6 +795,7 @@
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+#define E1000_ICR_FER 0x00400000 /* Fatal Error */
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
@@ -860,6 +867,7 @@
#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
* parity error */
#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
@@ -881,6 +889,7 @@
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
+#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
/* Extended Interrupt Mask Set */
#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
@@ -913,6 +922,7 @@
#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
* parity error */
#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
@@ -994,6 +1004,7 @@
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
+#define E1000_ERFUSE_FAILURE 16
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
@@ -1036,6 +1047,56 @@
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP 0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP 0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP 0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
@@ -1227,6 +1288,10 @@
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
/* Mask bits for fields in Word 0x0f of the NVM */
#define NVM_WORD0F_PAUSE_MASK 0x3000
@@ -1346,6 +1411,7 @@
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
+#define I82580_I_PHY_ID 0x015403A0
#define IGP04E1000_E_PHY_ID 0x02A80391
#define M88_VENDOR 0x0141
@@ -1575,5 +1641,34 @@
#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
+ * Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
+ * Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT 16
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
+ * transactions */
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
+ * Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
+ * Threshold */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
+ * current window */
+
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
+ * Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
+ * High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
+ on DMA coal */
#endif /* _E1000_DEFINES_H_ */
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index 6afa4fbb..b01c5d1 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -100,6 +100,7 @@ struct e1000_hw;
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
#define E1000_DEV_ID_ICH8_IGP_C 0x104B
@@ -130,11 +131,19 @@ struct e1000_hw;
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82576_NS 0x150A
-#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
#define E1000_DEV_ID_82575GB_QUAD_COPPER_PM 0x10E2
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_ER 0x151D
+#define E1000_DEV_ID_82580_ER_DUAL 0x151E
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
@@ -143,9 +152,13 @@ struct e1000_hw;
#define E1000_FUNC_0 0
#define E1000_FUNC_1 1
+#define E1000_FUNC_2 2
+#define E1000_FUNC_3 3
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
enum e1000_mac_type {
e1000_undefined = 0,
@@ -173,6 +186,8 @@ enum e1000_mac_type {
e1000_pchlan,
e1000_82575,
e1000_82576,
+ e1000_82580,
+ e1000_82580er,
e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */
};
@@ -213,6 +228,7 @@ enum e1000_phy_type {
e1000_phy_bm,
e1000_phy_82578,
e1000_phy_82577,
+ e1000_phy_82580,
e1000_phy_vf,
};
@@ -615,11 +631,13 @@ struct e1000_phy_operations {
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_info)(struct e1000_hw *);
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
void (*power_up)(struct e1000_hw *);
void (*power_down)(struct e1000_hw *);
};
@@ -657,6 +675,7 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
+ u16 uta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
#define MAX_MTA_REG 128
@@ -768,6 +787,10 @@ struct e1000_dev_spec_82571 {
u32 smb_counter;
};
+struct e1000_dev_spec_80003es2lan {
+ bool mdic_wa_enable;
+};
+
struct e1000_shadow_ram {
u16 value;
bool modified;
@@ -778,6 +801,9 @@ struct e1000_shadow_ram {
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
+ E1000_MUTEX nvm_mutex;
+ E1000_MUTEX swflag_mutex;
+ bool nvm_k1_enabled;
};
struct e1000_dev_spec_82575 {
@@ -810,6 +836,7 @@ struct e1000_hw {
struct e1000_dev_spec_82542 _82542;
struct e1000_dev_spec_82543 _82543;
struct e1000_dev_spec_82571 _82571;
+ struct e1000_dev_spec_80003es2lan _80003es2lan;
struct e1000_dev_spec_ich8lan ich8lan;
struct e1000_dev_spec_82575 _82575;
struct e1000_dev_spec_vf vf;
diff --git a/sys/dev/e1000/e1000_ich8lan.c b/sys/dev/e1000/e1000_ich8lan.c
index a80955a..09c08eb 100644
--- a/sys/dev/e1000/e1000_ich8lan.c
+++ b/sys/dev/e1000/e1000_ich8lan.c
@@ -68,10 +68,12 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
-static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
bool active);
static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
@@ -95,6 +97,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
@@ -103,7 +106,6 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
-static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
@@ -120,6 +122,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 data);
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
@@ -179,18 +184,16 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->reset_delay_us = 100;
phy->ops.acquire = e1000_acquire_swflag_ich8lan;
- phy->ops.check_polarity = e1000_check_polarity_ife;
phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
- phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
- phy->ops.get_info = e1000_get_phy_info_ich8lan;
phy->ops.read_reg = e1000_read_phy_reg_hv;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
phy->ops.release = e1000_release_swflag_ich8lan;
phy->ops.reset = e1000_phy_hw_reset_ich8lan;
- phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
- phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
phy->ops.write_reg = e1000_write_phy_reg_hv;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
@@ -199,13 +202,23 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
e1000_get_phy_id(hw);
phy->type = e1000_get_phy_type_from_id(phy->id);
- if (phy->type == e1000_phy_82577) {
+ switch (phy->type) {
+ case e1000_phy_82577:
phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
e1000_phy_force_speed_duplex_82577;
- phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000_phy_sw_reset_generic;
+ case e1000_phy_82578:
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
}
return ret_val;
@@ -229,12 +242,9 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->reset_delay_us = 100;
phy->ops.acquire = e1000_acquire_swflag_ich8lan;
- phy->ops.check_polarity = e1000_check_polarity_ife;
phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
- phy->ops.get_info = e1000_get_phy_info_ich8lan;
phy->ops.read_reg = e1000_read_phy_reg_igp;
phy->ops.release = e1000_release_swflag_ich8lan;
phy->ops.reset = e1000_phy_hw_reset_ich8lan;
@@ -273,12 +283,20 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
break;
case IFE_E_PHY_ID:
case IFE_PLUS_E_PHY_ID:
case IFE_C_E_PHY_ID:
phy->type = e1000_phy_ife;
phy->autoneg_mask = E1000_ALL_NOT_GIG;
+ phy->ops.get_info = e1000_get_phy_info_ife;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
break;
case BME1000_E_PHY_ID:
phy->type = e1000_phy_bm;
@@ -286,6 +304,9 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->ops.read_reg = e1000_read_phy_reg_bm;
phy->ops.write_reg = e1000_write_phy_reg_bm;
phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
break;
default:
ret_val = -E1000_ERR_PHY;
@@ -353,10 +374,13 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
+ E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
+ E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
+
/* Function Pointers */
- nvm->ops.acquire = e1000_acquire_swflag_ich8lan;
+ nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
+ nvm->ops.release = e1000_release_nvm_ich8lan;
nvm->ops.read = e1000_read_nvm_ich8lan;
- nvm->ops.release = e1000_release_swflag_ich8lan;
nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
@@ -393,6 +417,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->asf_firmware_present = TRUE;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = TRUE;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = TRUE;
/* Function pointers */
@@ -409,7 +435,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/* physical interface setup */
mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
/* check for link */
- mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* link info */
@@ -460,11 +486,99 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
-
return E1000_SUCCESS;
}
/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_k1_gig_workaround_hv(hw, link);
+ if (ret_val)
+ goto out;
+ }
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = FALSE;
+
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ e1000_config_collision_dist_generic(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
* @hw: pointer to the HW structure
*
@@ -491,12 +605,41 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_acquire_nvm_ich8lan");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_nvm_ich8lan - Release NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_ich8lan");
+
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
+
+ return;
+}
+
+/**
* e1000_acquire_swflag_ich8lan - Acquire software control flag
* @hw: pointer to the HW structure
*
- * Acquires the software control flag for performing NVM and PHY
- * operations. This is a function pointer entry point only called by
- * read/write routines for the PHY and NVM parts.
+ * Acquires the software control flag for performing PHY and select
+ * MAC CSR accesses.
**/
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
{
@@ -505,23 +648,39 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_swflag_ich8lan");
+ E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
while (timeout) {
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+ break;
- if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
- extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
- E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
-
- extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
- break;
- }
msec_delay_irq(1);
timeout--;
}
if (!timeout) {
DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ timeout = SW_FLAG_TIMEOUT;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Failed to acquire the semaphore.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
ret_val = -E1000_ERR_CONFIG;
@@ -529,6 +688,9 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
}
out:
+ if (ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
return ret_val;
}
@@ -536,9 +698,8 @@ out:
* e1000_release_swflag_ich8lan - Release software control flag
* @hw: pointer to the HW structure
*
- * Releases the software control flag for performing NVM and PHY operations.
- * This is a function pointer entry point only called by read/write
- * routines for the PHY and NVM parts.
+ * Releases the software control flag for performing PHY and select
+ * MAC CSR accesses.
**/
static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
{
@@ -550,6 +711,8 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
return;
}
@@ -594,6 +757,325 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ *
+ * SW should configure the LCD from the NVM extended configuration region
+ * as a workaround for certain parts.
+ **/
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+ s32 ret_val;
+ u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Initialize the PHY from the NVM on ICH platforms. This
+ * is needed due to an issue where the NVM configuration is
+ * not properly autoloaded after power transitions.
+ * Therefore, after each PHY reset, we will load the
+ * configuration data out of the NVM manually.
+ */
+ if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
+ (hw->mac.type == e1000_pchlan)) {
+ /* Check if SW needs to configure the PHY */
+ if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+ (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
+ (hw->mac.type == e1000_pchlan))
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ else
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+
+ data = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
+
+ /* Wait for basic configuration completes before proceeding */
+ e1000_lan_init_done_ich8lan(hw);
+
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+
+ cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+ if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) {
+ /*
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
+ */
+ data = E1000_READ_REG(hw, E1000_STRAP);
+ data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+ reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+ reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+ reg_data);
+ if (ret_val)
+ goto out;
+
+ data = E1000_READ_REG(hw, E1000_LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw,
+ HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
+ goto out;
+ }
+
+ /* Configure LCD from extended configuration region. */
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
+
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
+
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
+
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
+ }
+ }
+
+out:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_hv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ * from a lower speed. This workaround disables K1 whenever link is at 1Gig
+ * If link is down, the function will restore the default K1 setting located
+ * in the NVM.
+ **/
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 status_reg = 0;
+ bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+ DEBUGFUNC("e1000_k1_gig_workaround_hv");
+
+ if (hw->mac.type != e1000_pchlan)
+ goto out;
+
+ /* Wrap the whole flow with the sw flag */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+ if (link) {
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK;
+
+ if (status_reg == (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ k1_enable = FALSE;
+ }
+
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK;
+
+ if (status_reg == (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
+ k1_enable = FALSE;
+ }
+
+ /* Link stall fix for link up */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x0100);
+ if (ret_val)
+ goto release;
+
+ } else {
+ /* Link stall fix for link down */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x4100);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+ hw->phy.ops.release(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_configure_k1_ich8lan - Configure K1 power state
+ * @hw: pointer to the HW structure
+ * @enable: K1 state to configure
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_reg = 0;
+ u32 ctrl_ext = 0;
+ u32 reg = 0;
+ u16 kmrn_reg = 0;
+
+ ret_val = e1000_read_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ goto out;
+
+ if (k1_enable)
+ kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+ else
+ kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+ ret_val = e1000_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+ if (ret_val)
+ goto out;
+
+ usec_delay(20);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+
+ reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ reg |= E1000_CTRL_FRCSPD;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ usec_delay(20);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ usec_delay(20);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ * @d0_state: boolean if entering d0 or d3 device state
+ *
+ * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
+ * in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 oem_reg;
+
+ if (hw->mac.type != e1000_pchlan)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto out;
+
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+ goto out;
+
+ mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto out;
+
+ oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+ if (d0_state) {
+ if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ } else {
+ if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ }
+ /* Restart auto-neg to activate the bits */
+ if (!hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+ ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+out:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+
+/**
* e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
* @hw: pointer to the HW structure
**/
@@ -614,7 +1096,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
if (hw->mac.type != e1000_pchlan)
- return ret_val;
+ goto out;
/* Hanksville M Phy init for IEEE. */
if ((hw->revision_id == 2) &&
@@ -648,12 +1130,12 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
/* Disable generation of early preamble */
ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
if (ret_val)
- return ret_val;
+ goto out;
/* Preamble tuning for SSC */
ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
if (ret_val)
- return ret_val;
+ goto out;
}
if (hw->phy.type == e1000_phy_82578) {
@@ -662,13 +1144,13 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
0x66C0);
if (ret_val)
- return ret_val;
+ goto out;
/* PHY config */
ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
0xFFFF);
if (ret_val)
- return ret_val;
+ goto out;
}
/*
@@ -691,20 +1173,30 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
*/
ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
if (ret_val)
- return ret_val;
+ goto out;
ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
if (ret_val)
- return ret_val;
+ goto out;
}
/* Select page 0 */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- return ret_val;
+ goto out;
+
hw->phy.addr = 1;
- e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ if (ret_val)
+ goto out;
hw->phy.ops.release(hw);
+ /*
+ * Configure the K1 Si workaround during phy reset assuming there is
+ * link so that it disables K1 if link is in 1Gbps.
+ */
+ ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
+
+out:
return ret_val;
}
@@ -752,10 +1244,8 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
- u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
- s32 ret_val;
- u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg;
DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
@@ -772,162 +1262,52 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
}
- /*
- * Initialize the PHY from the NVM on ICH platforms. This
- * is needed due to an issue where the NVM configuration is
- * not properly autoloaded after power transitions.
- * Therefore, after each PHY reset, we will load the
- * configuration data out of the NVM manually.
- */
- if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
- /* Check if SW needs configure the PHY */
- if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (hw->device_id == E1000_DEV_ID_ICH8_IGP_M))
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
-
- data = E1000_READ_REG(hw, E1000_FEXTNVM);
- if (!(data & sw_cfg_mask))
- goto out;
-
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
-
- /*
- * Make sure HW does not configure LCD from PHY
- * extended configuration before SW configuration
- */
- data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
- goto out;
-
- cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
- cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
- cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
- if (!cnf_size)
- goto out;
-
- cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
- cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
- /* Configure LCD from extended configuration region. */
-
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
-
- for (i = 0; i < cnf_size; i++) {
- ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
- &reg_data);
- if (ret_val)
- goto out;
-
- ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
- 1, &reg_addr);
- if (ret_val)
- goto out;
-
- /* Save off the PHY page for future writes. */
- if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
- phy_page = reg_data;
- continue;
- }
+ /* Dummy read to clear the phy wakeup bit after lcd reset */
+ if (hw->mac.type == e1000_pchlan)
+ hw->phy.ops.read_reg(hw, BM_WUC, &reg);
- reg_addr |= phy_page;
+ /* Configure the LCD with the extended configuration region in NVM */
+ ret_val = e1000_sw_lcd_config_ich8lan(hw);
+ if (ret_val)
+ goto out;
- ret_val = phy->ops.write_reg(hw, (u32)reg_addr, reg_data);
- if (ret_val)
- goto out;
- }
- }
+ /* Configure the LCD with the OEM bits in NVM */
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
out:
return ret_val;
}
/**
- * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info
- * @hw: pointer to the HW structure
- *
- * Wrapper for calling the get_phy_info routines for the appropriate phy type.
- **/
-static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_PHY_TYPE;
-
- DEBUGFUNC("e1000_get_phy_info_ich8lan");
-
- switch (hw->phy.type) {
- case e1000_phy_ife:
- ret_val = e1000_get_phy_info_ife_ich8lan(hw);
- break;
- case e1000_phy_igp_3:
- case e1000_phy_bm:
- case e1000_phy_82578:
- case e1000_phy_82577:
- ret_val = e1000_get_phy_info_igp(hw);
- break;
- default:
- break;
- }
-
- return ret_val;
-}
-
-/**
- * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states
+ * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
* @hw: pointer to the HW structure
+ * @active: TRUE to enable LPLU, FALSE to disable
*
- * Populates "phy" structure with various feature states.
- * This function is only called by other family-specific
- * routines.
+ * Sets the LPLU state according to the active flag. For PCH, if OEM write
+ * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ * the phy speed. This function will manually set the LPLU bit and restart
+ * auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ * since it configures the same bit.
**/
-static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 data;
- bool link;
-
- DEBUGFUNC("e1000_get_phy_info_ife_ich8lan");
-
- ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
- if (ret_val)
- goto out;
-
- if (!link) {
- DEBUGOUT("Phy info is only valid if link is up\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- }
-
- ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
- if (ret_val)
- goto out;
- phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
- ? FALSE : TRUE;
+ s32 ret_val = E1000_SUCCESS;
+ u16 oem_reg;
- if (phy->polarity_correction) {
- ret_val = e1000_check_polarity_ife(hw);
- if (ret_val)
- goto out;
- } else {
- /* Polarity is forced */
- phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
- }
+ DEBUGFUNC("e1000_set_lplu_state_pchlan");
- ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
if (ret_val)
goto out;
- phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
+ if (active)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ else
+ oem_reg &= ~HV_OEM_BITS_LPLU;
- /* The following parameters are undefined for 10/100 operation. */
- phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
- phy->local_rx = e1000_1000t_rx_status_undefined;
- phy->remote_rx = e1000_1000t_rx_status_undefined;
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+ ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
out:
return ret_val;
@@ -1170,7 +1550,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
if (ret_val)
goto out;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
- E1000_ICH_NVM_SIG_VALUE) {
+ E1000_ICH_NVM_SIG_VALUE) {
*bank = 0;
goto out;
}
@@ -1178,11 +1558,11 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 1 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
bank1_offset,
- &sig_byte);
+ &sig_byte);
if (ret_val)
goto out;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
- E1000_ICH_NVM_SIG_VALUE) {
+ E1000_ICH_NVM_SIG_VALUE) {
*bank = 1;
goto out;
}
@@ -1223,17 +1603,18 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
goto out;
}
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- goto out;
+ nvm->ops.acquire(hw);
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
- if (ret_val != E1000_SUCCESS)
- goto release;
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
act_offset = (bank) ? nvm->flash_bank_size : 0;
act_offset += offset;
+ ret_val = E1000_SUCCESS;
for (i = 0; i < words; i++) {
if ((dev_spec->shadow_ram) &&
(dev_spec->shadow_ram[offset+i].modified)) {
@@ -1248,7 +1629,6 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
}
-release:
nvm->ops.release(hw);
out:
@@ -1534,9 +1914,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
goto out;
}
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- goto out;
+ nvm->ops.acquire(hw);
for (i = 0; i < words; i++) {
dev_spec->shadow_ram[offset+i].modified = TRUE;
@@ -1577,9 +1955,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (nvm->type != e1000_nvm_flash_sw)
goto out;
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- goto out;
+ nvm->ops.acquire(hw);
/*
* We're writing to the opposite bank so if we're on bank 1,
@@ -1588,8 +1964,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val != E1000_SUCCESS) {
- nvm->ops.release(hw);
- goto out;
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
}
if (bank == 0) {
@@ -1678,6 +2054,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
nvm->ops.release(hw);
goto out;
}
+
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
@@ -1829,10 +2206,10 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
* try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/
hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.flcerr == 1) {
+ if (hsfsts.hsf_status.flcerr == 1)
/* Repeat for some time before giving up. */
continue;
- } else if (hsfsts.hsf_status.flcdone == 0) {
+ if (hsfsts.hsf_status.flcdone == 0) {
DEBUGOUT("Timeout error - flash cycle "
"did not complete.");
break;
@@ -1960,7 +2337,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Start with the base address, then add the sector offset. */
flash_linear_addr = hw->nvm.flash_base_addr;
- flash_linear_addr += (bank) ? (sector_size * iteration) : 0;
+ flash_linear_addr += (bank) ? flash_bank_size : 0;
for (j = 0; j < iteration ; j++) {
do {
@@ -2153,6 +2530,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 reg;
u32 ctrl, icr, kab;
s32 ret_val;
@@ -2188,6 +2567,18 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
}
+ if (hw->mac.type == e1000_pchlan) {
+ /* Save the NVM K1 bit setting*/
+ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
+ if (ret_val)
+ return ret_val;
+
+ if (reg & E1000_NVM_K1_ENABLE)
+ dev_spec->nvm_k1_enabled = TRUE;
+ else
+ dev_spec->nvm_k1_enabled = FALSE;
+ }
+
ctrl = E1000_READ_REG(hw, E1000_CTRL);
if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) {
@@ -2229,6 +2620,26 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
DEBUGOUT("Auto Read Done did not complete\n");
}
}
+ /* Dummy read to clear the phy wakeup bit after lcd reset */
+ if (hw->mac.type == e1000_pchlan)
+ hw->phy.ops.read_reg(hw, BM_WUC, &reg);
+
+ ret_val = e1000_sw_lcd_config_ich8lan(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
+ if (ret_val)
+ goto out;
+ }
+ /*
+ * For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
@@ -2240,6 +2651,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_pchlan)
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+out:
return ret_val;
}
@@ -2269,8 +2681,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
if (ret_val)
- /* This is not fatal and we should not stop init due to this */
DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
/* Setup the receive address. */
e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
@@ -2316,7 +2728,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
snoop = PCIE_ICH8_SNOOP_ALL;
else
- snoop = (u32)~(PCIE_NO_SNOOP_ALL);
+ snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
e1000_set_pcie_no_snoop_generic(hw, snoop);
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
@@ -2473,8 +2885,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
* and increase the max iterations when polling the phy;
* this fixes erroneous timeouts at 10Mbps.
*/
- ret_val = e1000_write_kmrn_reg_generic(hw,
- E1000_KMRNCTRLSTA_TIMEOUTS,
+ ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
0xFFFF);
if (ret_val)
goto out;
@@ -2788,6 +3199,7 @@ void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
u32 phy_ctrl;
switch (hw->mac.type) {
+ case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pchlan:
@@ -2796,9 +3208,8 @@ void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_GBE_DISABLE;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
- /* Workaround SWFLAG unexpectedly set during S0->Sx */
if (hw->mac.type == e1000_pchlan)
- usec_delay(500);
+ e1000_phy_hw_reset_ich8lan(hw);
default:
break;
}
@@ -2982,18 +3393,17 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
if (hw->mac.type >= e1000_pchlan) {
u32 status = E1000_READ_REG(hw, E1000_STATUS);
- if (status & E1000_STATUS_PHYRA) {
+ if (status & E1000_STATUS_PHYRA)
E1000_WRITE_REG(hw, E1000_STATUS, status &
~E1000_STATUS_PHYRA);
- } else
+ else
DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
}
e1000_get_cfg_done_generic(hw);
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if ((hw->mac.type != e1000_ich10lan) &&
- (hw->mac.type != e1000_pchlan)) {
+ if (hw->mac.type <= e1000_ich9lan) {
if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000_phy_init_script_igp3(hw);
diff --git a/sys/dev/e1000/e1000_ich8lan.h b/sys/dev/e1000/e1000_ich8lan.h
index 5416eeb..33398c4 100644
--- a/sys/dev/e1000/e1000_ich8lan.h
+++ b/sys/dev/e1000/e1000_ich8lan.h
@@ -140,6 +140,31 @@
#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
#define HV_TNCRS_LOWER PHY_REG(778, 30)
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+#define LCD_CFG_PHY_ADDR_BIT 0x0020 /* Phy address bit from LCD Config word */
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
+
/*
* Additional interrupts need to be handled for ICH family:
* DSW = The FW changed the status of the DISSW bit in FWSM
@@ -169,6 +194,7 @@ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_config);
s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-
#endif
diff --git a/sys/dev/e1000/e1000_mac.c b/sys/dev/e1000/e1000_mac.c
index db6e5f52..f311565 100644
--- a/sys/dev/e1000/e1000_mac.c
+++ b/sys/dev/e1000/e1000_mac.c
@@ -230,7 +230,6 @@ s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
-
s32 ret_val;
u16 pcie_link_status;
@@ -408,6 +407,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ if (hw->bus.func == E1000_FUNC_2)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
for (i = 0; i < ETH_ADDR_LEN; i += 2) {
offset = nvm_alt_mac_addr_offset + (i >> 1);
ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -750,12 +754,6 @@ s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
mac->get_link_status = FALSE;
- if (hw->phy.type == e1000_phy_82578) {
- ret_val = e1000_link_stall_workaround_hv(hw);
- if (ret_val)
- goto out;
- }
-
/*
* Check if there was DownShift, must be checked
* immediately after link-up
@@ -994,9 +992,8 @@ s32 e1000_setup_link_generic(struct e1000_hw *hw)
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (hw->phy.ops.check_reset_block)
- if (hw->phy.ops.check_reset_block(hw))
- goto out;
+ if (e1000_check_reset_block(hw))
+ goto out;
/*
* If requested flow control is set to default, set flow control
@@ -1512,7 +1509,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
/*
* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
+ * FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
@@ -2033,7 +2030,7 @@ out:
* e1000_disable_pcie_master_generic - Disables PCI-express master access
* @hw: pointer to the HW structure
*
- * Returns 0 (E1000_SUCCESS) if successful, else returns -10
+ * Returns E1000_SUCCESS if successful, else returns -10
* (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
* the master requests to be disabled.
*
@@ -2151,7 +2148,7 @@ out:
* Verify that when not using auto-negotiation that MDI/MDIx is correctly
* set, which is forced to MDI mode only.
**/
-s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
diff --git a/sys/dev/e1000/e1000_manage.c b/sys/dev/e1000/e1000_manage.c
index b1f6541..aa0a0d4 100644
--- a/sys/dev/e1000/e1000_manage.c
+++ b/sys/dev/e1000/e1000_manage.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -123,7 +123,7 @@ bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
}
/**
- * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX
+ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
* @hw: pointer to the HW structure
*
* Enables packet filtering on transmit packets if manageability is enabled
@@ -159,11 +159,9 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
/* Read in the header. Length and offset are in dwords. */
len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
- for (i = 0; i < len; i++) {
- *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
- E1000_HOST_IF,
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
offset + i);
- }
hdr_csum = hdr->checksum;
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
diff --git a/sys/dev/e1000/e1000_osdep.h b/sys/dev/e1000/e1000_osdep.h
index b478f29..fcfe8f5 100644
--- a/sys/dev/e1000/e1000_osdep.h
+++ b/sys/dev/e1000/e1000_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -39,6 +39,8 @@
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
@@ -57,10 +59,8 @@
#define ASSERT(x) if(!(x)) panic("EM: x")
-/* The happy-fun DELAY macro is defined in /usr/src/sys/i386/include/clock.h */
#define usec_delay(x) DELAY(x)
#define msec_delay(x) DELAY(1000*(x))
-/* TODO: Should we be paranoid about delaying in interrupt context? */
#define msec_delay_irq(x) DELAY(1000*(x))
#define MSGOUT(S, A, B) printf(S "\n", A, B)
@@ -73,16 +73,22 @@
#define STATIC static
#define FALSE 0
-#define false FALSE /* shared code stupidity */
+#define false FALSE
#define TRUE 1
#define true TRUE
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
-/*
-** These typedefs are necessary due to the new
-** shared code, they are native to Linux.
-*/
+/* Mutex used in the shared code */
+#define E1000_MUTEX struct mtx
+#define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \
+ MTX_NETWORK_LOCK, \
+ MTX_DEF | MTX_SPIN)
+#define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex)
+#define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex)
+#define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex)
+#define E1000_MUTEX_UNLOCK(mutex) mtx_unlock(mutex)
+
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
@@ -97,6 +103,28 @@ typedef boolean_t bool;
#define __le32 u32
#define __le64 u64
+#if __FreeBSD_version < 800000 /* Now in HEAD */
+#if defined(__i386__) || defined(__amd64__)
+#define mb() __asm volatile("mfence" ::: "memory")
+#define wmb() __asm volatile("sfence" ::: "memory")
+#define rmb() __asm volatile("lfence" ::: "memory")
+#else
+#define mb()
+#define rmb()
+#define wmb()
+#endif
+#endif /*__FreeBSD_version < 800000 */
+
+#if defined(__i386__) || defined(__amd64__)
+static __inline
+void prefetch(void *x)
+{
+ __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+#else
+#define prefetch(x)
+#endif
+
struct e1000_osdep
{
bus_space_tag_t mem_bus_space_tag;
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index 513f2e6..51504e2 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -83,11 +83,13 @@ void e1000_init_phy_ops_generic(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_null_ops_generic;
phy->ops.get_info = e1000_null_ops_generic;
phy->ops.read_reg = e1000_null_read_reg;
+ phy->ops.read_reg_locked = e1000_null_read_reg;
phy->ops.release = e1000_null_phy_generic;
phy->ops.reset = e1000_null_ops_generic;
phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
phy->ops.write_reg = e1000_null_write_reg;
+ phy->ops.write_reg_locked = e1000_null_write_reg;
phy->ops.power_up = e1000_null_phy_generic;
phy->ops.power_down = e1000_null_phy_generic;
phy->ops.cfg_on_link_up = e1000_null_ops_generic;
@@ -190,20 +192,30 @@ s32 e1000_get_phy_id(struct e1000_hw *hw)
goto out;
/*
- * If the PHY ID is still unknown, we may have an 82577 without link.
- * We will try again after setting Slow MDIC mode. No harm in trying
- * again in this case since the PHY ID is unknown at this point anyway
+ * If the PHY ID is still unknown, we may have an 82577
+ * without link. We will try again after setting Slow MDIC
+ * mode. No harm in trying again in this case since the PHY
+ * ID is unknown at this point anyway.
*/
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ goto out;
ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
if (ret_val)
goto out;
+ phy->ops.release(hw);
retry_count++;
}
out:
/* Revert to MDIO fast mode, if applicable */
- if (retry_count)
+ if (retry_count) {
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
+ phy->ops.release(hw);
+ }
return ret_val;
}
@@ -351,6 +363,105 @@ out:
}
/**
+ * e1000_read_phy_reg_i2c - Read PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the i2c interface and stores the
+ * retrieved information in data.
+ **/
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_i2c");
+
+ /*
+ * Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Need to byte-swap the 16-bit value. */
+ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_i2c - Write PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+ u16 phy_data_swapped;
+
+ DEBUGFUNC("e1000_write_phy_reg_i2c");
+
+ /* Swap the data bytes for the I2C interface */
+ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+ /*
+ * Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* e1000_read_phy_reg_m88 - Read m88 PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -414,111 +525,179 @@ out:
}
/**
- * e1000_read_phy_reg_igp - Read igp PHY register
+ * __e1000_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retrieved information in data. Release any acquired
+ * and stores the retrieved information in data. Release any acquired
* semaphores before exiting.
**/
-s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
{
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_read_phy_reg_igp");
+ DEBUGFUNC("__e1000_read_phy_reg_igp");
- if (!(hw->phy.ops.acquire))
- goto out;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
if (offset > MAX_PHY_MULTI_PAGE_REG) {
ret_val = e1000_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
- if (ret_val) {
- hw->phy.ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release(hw);
-
+release:
+ if (!locked)
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
/**
+ * e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, FALSE);
+}
+
+/**
+ * e1000_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, TRUE);
+}
+
+/**
* e1000_write_phy_reg_igp - Write igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
{
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_write_phy_reg_igp");
- if (!(hw->phy.ops.acquire))
- goto out;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
if (offset > MAX_PHY_MULTI_PAGE_REG) {
ret_val = e1000_write_phy_reg_mdic(hw,
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
- if (ret_val) {
- hw->phy.ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release(hw);
+release:
+ if (!locked)
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
/**
- * e1000_read_kmrn_reg_generic - Read kumeran register
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, FALSE);
+}
+
+/**
+ * e1000_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, TRUE);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary. Then reads the PHY register at offset
* using the kumeran interface. The information retrieved is stored in data.
* Release any acquired semaphores before exiting.
**/
-s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
{
u32 kmrnctrlsta;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_read_kmrn_reg_generic");
+ DEBUGFUNC("__e1000_read_kmrn_reg");
- if (!(hw->phy.ops.acquire))
- goto out;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -529,48 +708,113 @@ s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
*data = (u16)kmrnctrlsta;
- hw->phy.ops.release(hw);
+ if (!locked)
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
/**
- * e1000_write_kmrn_reg_generic - Write kumeran register
+ * e1000_read_kmrn_reg_generic - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, FALSE);
+}
+
+/**
+ * e1000_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, TRUE);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary. Then write the data to PHY register
* at the offset using the kumeran interface. Release any acquired semaphores
* before exiting.
**/
-s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
{
u32 kmrnctrlsta;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_write_kmrn_reg_generic");
- if (!(hw->phy.ops.acquire))
- goto out;
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
usec_delay(2);
- hw->phy.ops.release(hw);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
/**
+ * e1000_write_kmrn_reg_generic - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, FALSE);
+}
+
+/**
+ * e1000_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, TRUE);
+}
+
+/**
* e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
* @hw: pointer to the HW structure
*
@@ -589,6 +833,14 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
goto out;
}
+ if (phy->type == e1000_phy_82580) {
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+
/* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
@@ -600,15 +852,6 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
- if (ret_val)
- goto out;
-
- /* Set number of link attempts before downshift */
- ret_val = phy->ops.read_reg(hw, I82577_CTRL_REG, &phy_data);
- if (ret_val)
- goto out;
- phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
- ret_val = phy->ops.write_reg(hw, I82577_CTRL_REG, phy_data);
out:
return ret_val;
@@ -1326,18 +1569,22 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
goto out;
if (!link) {
- /*
- * We didn't get link.
- * Reset the DSP and cross our fingers.
- */
- ret_val = phy->ops.write_reg(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
- if (ret_val)
- goto out;
- ret_val = e1000_phy_reset_dsp_generic(hw);
- if (ret_val)
- goto out;
+ if (hw->phy.type != e1000_phy_m88) {
+ DEBUGOUT("Link taking longer than expected.\n");
+ } else {
+ /*
+ * We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = phy->ops.write_reg(hw,
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ goto out;
+ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+ goto out;
+ }
}
/* Try once more */
@@ -1347,6 +1594,9 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
goto out;
}
+ if (hw->phy.type != e1000_phy_m88)
+ goto out;
+
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
@@ -1393,11 +1643,6 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
- if (phy->type != e1000_phy_ife) {
- ret_val = e1000_phy_force_speed_duplex_igp(hw);
- goto out;
- }
-
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
if (ret_val)
goto out;
@@ -1625,12 +1870,11 @@ s32 e1000_check_downshift_generic(struct e1000_hw *hw)
case e1000_phy_gg82563:
case e1000_phy_bm:
case e1000_phy_82578:
- case e1000_phy_82577:
offset = M88E1000_PHY_SPEC_STATUS;
mask = M88E1000_PSSR_DOWNSHIFT;
break;
- case e1000_phy_igp_2:
case e1000_phy_igp:
+ case e1000_phy_igp_2:
case e1000_phy_igp_3:
offset = IGP01E1000_PHY_LINK_HEALTH;
mask = IGP01E1000_PLHR_SS_DOWNGRADE;
@@ -1825,16 +2069,14 @@ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val) {
+ if (ret_val)
/*
* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
usec_delay(usec_interval);
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &phy_status);
- }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_LINK_STATUS)
@@ -1879,13 +2121,13 @@ s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE + 1) {
- ret_val = E1000_ERR_PHY;
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ ret_val = -E1000_ERR_PHY;
goto out;
}
phy->min_cable_length = e1000_m88_cable_length_table[index];
- phy->max_cable_length = e1000_m88_cable_length_table[index+1];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -1986,7 +2228,7 @@ s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
DEBUGFUNC("e1000_get_phy_info_m88");
- if (hw->phy.media_type != e1000_media_type_copper) {
+ if (phy->media_type != e1000_media_type_copper) {
DEBUGOUT("Phy info is only valid for copper media\n");
ret_val = -E1000_ERR_CONFIG;
goto out;
@@ -2088,7 +2330,7 @@ s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- ret_val = hw->phy.ops.get_cable_length(hw);
+ ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
goto out;
@@ -2114,6 +2356,63 @@ out:
}
/**
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_ife");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ goto out;
+ phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+ ? FALSE : TRUE;
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_phy_sw_reset_generic - PHY software reset
* @hw: pointer to the HW structure
*
@@ -2302,7 +2601,7 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
{
enum e1000_phy_type phy_type = e1000_phy_unknown;
- switch (phy_id) {
+ switch (phy_id) {
case M88E1000_I_PHY_ID:
case M88E1000_E_PHY_ID:
case M88E1111_I_PHY_ID:
@@ -2333,6 +2632,9 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
case I82577_E_PHY_ID:
phy_type = e1000_phy_82577;
break;
+ case I82580_I_PHY_ID:
+ phy_type = e1000_phy_82580;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
@@ -2416,6 +2718,10 @@ s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
DEBUGFUNC("e1000_write_phy_reg_bm");
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2423,10 +2729,6 @@ s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2446,18 +2748,15 @@ s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
- if (ret_val) {
- hw->phy.ops.release(hw);
+ if (ret_val)
goto out;
- }
}
ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release(hw);
-
out:
+ hw->phy.ops.release(hw);
return ret_val;
}
@@ -2480,6 +2779,10 @@ s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
DEBUGFUNC("e1000_read_phy_reg_bm");
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2487,10 +2790,6 @@ s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2510,17 +2809,14 @@ s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000_write_phy_reg_mdic(hw, page_select,
(page << page_shift));
- if (ret_val) {
- hw->phy.ops.release(hw);
+ if (ret_val)
goto out;
- }
}
ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release(hw);
-
out:
+ hw->phy.ops.release(hw);
return ret_val;
}
@@ -2541,6 +2837,10 @@ s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
DEBUGFUNC("e1000_write_phy_reg_bm2");
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2548,10 +2848,6 @@ s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2560,17 +2856,14 @@ s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
- if (ret_val) {
- hw->phy.ops.release(hw);
+ if (ret_val)
goto out;
- }
}
ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release(hw);
-
out:
+ hw->phy.ops.release(hw);
return ret_val;
}
@@ -2590,6 +2883,10 @@ s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
DEBUGFUNC("e1000_write_phy_reg_bm2");
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2597,10 +2894,6 @@ s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2608,18 +2901,15 @@ s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
- if (ret_val) {
- hw->phy.ops.release(hw);
+ if (ret_val)
goto out;
- }
}
ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- hw->phy.ops.release(hw);
-
out:
+ hw->phy.ops.release(hw);
return ret_val;
}
@@ -2639,6 +2929,8 @@ out:
* 3) Write the address using the address opcode (0x11)
* 4) Read or write the data using the data opcode (0x12)
* 5) Restore 769_17.2 to its original value
+ *
+ * Assumes semaphore already acquired.
**/
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
@@ -2646,7 +2938,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u16 reg = BM_PHY_REG_NUM(offset);
u16 phy_reg = 0;
- u8 phy_acquired = 1;
DEBUGFUNC("e1000_access_phy_wakeup_reg_bm");
@@ -2655,13 +2946,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
(!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
DEBUGOUT("Attempting to access page 800 while gig enabled.\n");
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- DEBUGOUT("Could not acquire PHY\n");
- phy_acquired = 0;
- goto out;
- }
-
/* All operations in this function are phy address 1 */
hw->phy.addr = 1;
@@ -2733,8 +3017,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
}
out:
- if (phy_acquired == 1)
- hw->phy.ops.release(hw);
return ret_val;
}
@@ -2775,46 +3057,51 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
msec_delay(1);
}
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ * @slow: TRUE for slow mode, FALSE for normal mode
+ *
+ * Assumes semaphore already acquired.
+ **/
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
{
s32 ret_val = E1000_SUCCESS;
u16 data = 0;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
/* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
hw->phy.addr = 1;
ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
(BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val) {
- hw->phy.ops.release(hw);
- return ret_val;
- }
+ if (ret_val)
+ goto out;
+
ret_val = e1000_write_phy_reg_mdic(hw, BM_CS_CTRL1,
(0x2180 | (slow << 10)));
+ if (ret_val)
+ goto out;
/* dummy read when reverting to fast mode - throw away result */
if (!slow)
- e1000_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
- hw->phy.ops.release(hw);
+ ret_val = e1000_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
+out:
return ret_val;
}
/**
- * e1000_read_phy_reg_hv - Read HV PHY register
+ * __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retrieved information in data. Release any acquired
+ * and stores the retrieved information in data. Release any acquired
* semaphore before exiting.
**/
-s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
{
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
@@ -2823,6 +3110,12 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
DEBUGFUNC("e1000_read_phy_reg_hv");
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Workaround failure in MDIO access while cable is disconnected */
if ((hw->phy.type == e1000_phy_82577) &&
!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
@@ -2846,57 +3139,80 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
if (page == HV_INTC_FC_PAGE_START)
page = 0;
if (reg > MAX_PHY_MULTI_PAGE_REG) {
- if ((hw->phy.type != e1000_phy_82578) ||
- ((reg != I82578_ADDR_REG) &&
- (reg != I82578_ADDR_REG + 1))) {
- u32 phy_addr = hw->phy.addr;
-
- hw->phy.addr = 1;
-
- /* Page is shifted left, PHY expects (page x 32) */
- ret_val = e1000_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (page << IGP_PAGE_SHIFT));
- if (ret_val) {
- hw->phy.ops.release(hw);
- goto out;
- }
- hw->phy.addr = phy_addr;
- }
+ u32 phy_addr = hw->phy.addr;
+
+ hw->phy.addr = 1;
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (page << IGP_PAGE_SHIFT));
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
}
ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
- hw->phy.ops.release(hw);
-
out:
/* Revert to MDIO fast mode, if applicable */
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
+ ret_val |= e1000_set_mdio_slow_mode_hv(hw, FALSE);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
return ret_val;
}
/**
- * e1000_write_phy_reg_hv - Write HV PHY register
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores
+ * the retrieved information in data. Release the acquired semaphore
+ * before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, FALSE);
+}
+
+/**
+ * e1000_read_phy_reg_hv_locked - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, TRUE);
+}
+
+/**
+ * __e1000_write_phy_reg_hv - Write HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
{
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
@@ -2905,6 +3221,12 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
DEBUGFUNC("e1000_write_phy_reg_hv");
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Workaround failure in MDIO access while cable is disconnected */
if ((hw->phy.type == e1000_phy_82577) &&
!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
@@ -2928,10 +3250,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
-
hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
if (page == HV_INTC_FC_PAGE_START)
@@ -2947,50 +3265,70 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
((MAX_PHY_REG_ADDRESS & reg) == 0) &&
(data & (1 << 11))) {
u16 data2 = 0x7EFF;
- hw->phy.ops.release(hw);
ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
&data2, FALSE);
if (ret_val)
goto out;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- goto out;
}
if (reg > MAX_PHY_MULTI_PAGE_REG) {
- if ((hw->phy.type != e1000_phy_82578) ||
- ((reg != I82578_ADDR_REG) &&
- (reg != I82578_ADDR_REG + 1))) {
- u32 phy_addr = hw->phy.addr;
-
- hw->phy.addr = 1;
-
- /* Page is shifted left, PHY expects (page x 32) */
- ret_val = e1000_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (page << IGP_PAGE_SHIFT));
- if (ret_val) {
- hw->phy.ops.release(hw);
- goto out;
- }
- hw->phy.addr = phy_addr;
- }
+ u32 phy_addr = hw->phy.addr;
+
+ hw->phy.addr = 1;
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (page << IGP_PAGE_SHIFT));
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
}
ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
- hw->phy.ops.release(hw);
out:
/* Revert to MDIO fast mode, if applicable */
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
+ ret_val |= e1000_set_mdio_slow_mode_hv(hw, FALSE);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
return ret_val;
}
/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register at the offset.
+ * Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, FALSE);
+}
+
+/**
+ * e1000_write_phy_reg_hv_locked - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, TRUE);
+}
+
+/**
* e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
* @page: page to be accessed
**/
@@ -3011,10 +3349,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* @data: pointer to the data to be read or written
* @read: determines if operation is read or written
*
- * Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retreived information in data. Release any acquired
- * semaphores before exiting. Note that the procedure to read these regs
- * uses the address port and data port to read/write.
+ * Reads the PHY register at offset and stores the retreived information
+ * in data. Assumes semaphore already acquired. Note that the procedure
+ * to read these regs uses the address port and data port to read/write.
**/
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
@@ -3022,7 +3359,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u32 addr_reg = 0;
u32 data_reg = 0;
- u8 phy_acquired = 1;
DEBUGFUNC("e1000_access_phy_debug_regs_hv");
@@ -3031,13 +3367,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
I82578_ADDR_REG : I82577_ADDR_REG;
data_reg = addr_reg + 1;
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- DEBUGOUT("Could not acquire PHY\n");
- phy_acquired = 0;
- goto out;
- }
-
/* All operations in this function are phy address 2 */
hw->phy.addr = 2;
@@ -3060,8 +3389,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
}
out:
- if (phy_acquired == 1)
- hw->phy.ops.release(hw);
return ret_val;
}
@@ -3090,7 +3417,7 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
if (data & PHY_CONTROL_LB)
goto out;
-
+
/* check if link is up and at 1Gbps */
ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
if (ret_val)
@@ -3309,7 +3636,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
if (length == E1000_CABLE_LENGTH_UNDEFINED)
- ret_val = E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
phy->cable_length = length;
diff --git a/sys/dev/e1000/e1000_phy.h b/sys/dev/e1000/e1000_phy.h
index 28ed0c1..01e91d6 100644
--- a/sys/dev/e1000/e1000_phy.h
+++ b/sys/dev/e1000/e1000_phy.h
@@ -45,6 +45,7 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw);
s32 e1000_check_polarity_igp(struct e1000_hw *hw);
s32 e1000_check_polarity_ife(struct e1000_hw *hw);
s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
@@ -57,19 +58,23 @@ s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
s32 e1000_get_phy_id(struct e1000_hw *hw);
s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
-s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
s32 e1000_wait_autoneg_generic(struct e1000_hw *hw);
s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
@@ -85,8 +90,12 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
@@ -140,7 +149,6 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
#define I82577_CTRL_REG 23
-#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
/* 82577 specific PHY registers */
#define I82577_PHY_CTRL_2 18
@@ -175,6 +183,13 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define BM_CS_STATUS_SPEED_MASK 0xC000
#define BM_CS_STATUS_SPEED_1000 0x8000
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
@@ -220,6 +235,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
diff --git a/sys/dev/e1000/e1000_regs.h b/sys/dev/e1000/e1000_regs.h
index 3a62d0a..9b0bb91 100644
--- a/sys/dev/e1000/e1000_regs.h
+++ b/sys/dev/e1000/e1000_regs.h
@@ -43,6 +43,12 @@
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
@@ -121,11 +127,7 @@
#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
-#define E1000_RXCTL(_n) (0x0C014 + (0x40 * (_n)))
-#define E1000_RQDPC(_n) (0x0C030 + (0x40 * (_n)))
-#define E1000_TXCTL(_n) (0x0E014 + (0x40 * (_n)))
-#define E1000_RXCTL(_n) (0x0C014 + (0x40 * (_n)))
-#define E1000_RQDPC(_n) (0x0C030 + (0x40 * (_n)))
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer adapters - RW */
#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
/*
@@ -146,10 +148,15 @@
(0x0C00C + ((_n) * 0x40)))
#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
(0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
(0x0C018 + ((_n) * 0x40)))
#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
(0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+ (0x0C030 + ((_n) * 0x40)))
#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
(0x0E000 + ((_n) * 0x40)))
#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
@@ -158,17 +165,18 @@
(0x0E008 + ((_n) * 0x40)))
#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
(0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
(0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
(0x0E028 + ((_n) * 0x40)))
-#define E1000_TARC(_n) (0x03840 + (_n << 8))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
(0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
(0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */
@@ -187,6 +195,7 @@
#define E1000_PBSLAC 0x03100 /* Packet Buffer Slave Access Control */
#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Packet Buffer DWORD (_n) */
#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+#define E1000_ITPBS 0x03404 /* Same as TXPBS, renamed for newer adpaters - RW */
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
@@ -271,6 +280,7 @@
#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
@@ -392,6 +402,7 @@
#define E1000_SWSM2 0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
#define E1000_HICR 0x08F00 /* Host Interface Control */
@@ -437,6 +448,7 @@
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
/* Time Sync */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
@@ -450,6 +462,8 @@
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
@@ -493,4 +507,16 @@
#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
+/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current RX Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS 0x05BA8
+#define E1000_ERFUSE 0x00000400
#endif
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 7a2dbad..8756b0c 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -94,7 +94,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char em_driver_version[] = "6.9.14";
+char em_driver_version[] = "6.9.24";
/*********************************************************************
@@ -194,7 +194,7 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
-
+ { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -211,6 +211,10 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
};
@@ -240,7 +244,6 @@ static int em_mq_start_locked(struct ifnet *, struct mbuf *);
static void em_qflush(struct ifnet *);
#endif
static int em_ioctl(struct ifnet *, u_long, caddr_t);
-static void em_watchdog(struct adapter *);
static void em_init(void *);
static void em_init_locked(struct adapter *);
static void em_stop(void *);
@@ -315,7 +318,9 @@ static void em_init_manageability(struct adapter *);
static void em_release_manageability(struct adapter *);
static void em_get_hw_control(struct adapter *);
static void em_release_hw_control(struct adapter *);
+static void em_get_wakeup(device_t);
static void em_enable_wakeup(device_t);
+static int em_enable_phy_wakeup(struct adapter *);
#ifdef EM_LEGACY_IRQ
static void em_intr(void *);
@@ -490,7 +495,6 @@ em_attach(device_t dev)
struct adapter *adapter;
int tsize, rsize;
int error = 0;
- u16 eeprom_data, device_id;
INIT_DEBUGOUT("em_attach: begin");
@@ -531,6 +535,7 @@ em_attach(device_t dev)
** identified
*/
if ((adapter->hw.mac.type == e1000_ich8lan) ||
+ (adapter->hw.mac.type == e1000_pchlan) ||
(adapter->hw.mac.type == e1000_ich9lan) ||
(adapter->hw.mac.type == e1000_ich10lan)) {
int rid = EM_BAR_TYPE_FLASH;
@@ -728,6 +733,11 @@ em_attach(device_t dev)
if (error)
goto err_rx_struct;
+ /*
+ * Get Wake-on-Lan and Management info for later use
+ */
+ em_get_wakeup(dev);
+
/* Setup OS specific network interface */
em_setup_interface(dev, adapter);
@@ -742,69 +752,6 @@ em_attach(device_t dev)
device_printf(dev,
"PHY reset is blocked due to SOL/IDER session.\n");
- /* Determine if we have to control management hardware */
- adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
-
- /*
- * Setup Wake-on-Lan
- */
- switch (adapter->hw.mac.type) {
-
- case e1000_82542:
- case e1000_82543:
- break;
- case e1000_82546:
- case e1000_82546_rev_3:
- case e1000_82571:
- case e1000_80003es2lan:
- if (adapter->hw.bus.func == 1)
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
- else
- e1000_read_nvm(&adapter->hw,
- NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- eeprom_data &= EM_EEPROM_APME;
- break;
- default:
- /* APME bit in EEPROM is mapped to WUC.APME */
- eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
- E1000_WUC_APME;
- break;
- }
- if (eeprom_data)
- adapter->wol = E1000_WUFC_MAG;
- /*
- * We have the eeprom settings, now apply the special cases
- * where the eeprom may be wrong or the board won't support
- * wake on lan on a particular port
- */
- device_id = pci_get_device(dev);
- switch (device_id) {
- case E1000_DEV_ID_82546GB_PCIE:
- adapter->wol = 0;
- break;
- case E1000_DEV_ID_82546EB_FIBER:
- case E1000_DEV_ID_82546GB_FIBER:
- case E1000_DEV_ID_82571EB_FIBER:
- /* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
- if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
- E1000_STATUS_FUNC_1)
- adapter->wol = 0;
- break;
- case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- case E1000_DEV_ID_82571EB_QUAD_COPPER:
- case E1000_DEV_ID_82571EB_QUAD_FIBER:
- case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
- /* if quad port adapter, disable WoL on all but port A */
- if (global_quad_port_a != 0)
- adapter->wol = 0;
- /* Reset for multiple quad port adapters */
- if (++global_quad_port_a == 4)
- global_quad_port_a = 0;
- break;
- }
-
/* Do we need workaround for 82544 PCI-X adapter? */
if (adapter->hw.bus.type == e1000_bus_type_pcix &&
adapter->hw.mac.type == e1000_82544)
@@ -820,6 +767,10 @@ em_attach(device_t dev)
em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
#endif
+ /* Non-AMT based hardware can now take control from firmware */
+ if (adapter->has_manage && !adapter->has_amt)
+ em_get_hw_control(adapter);
+
/* Tell the stack that the interface is not active */
adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
@@ -886,20 +837,6 @@ em_detach(device_t dev)
em_release_manageability(adapter);
- if (((adapter->hw.mac.type == e1000_82573) ||
- (adapter->hw.mac.type == e1000_82583) ||
- (adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_ich10lan) ||
- (adapter->hw.mac.type == e1000_ich9lan)) &&
- e1000_check_mng_mode(&adapter->hw))
- em_release_hw_control(adapter);
-
- if (adapter->wol) {
- E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
- E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
- em_enable_wakeup(dev);
- }
-
EM_TX_UNLOCK(adapter);
EM_CORE_UNLOCK(adapter);
@@ -934,6 +871,7 @@ em_detach(device_t dev)
adapter->rx_desc_base = NULL;
}
+ em_release_hw_control(adapter);
EM_TX_LOCK_DESTROY(adapter);
EM_RX_LOCK_DESTROY(adapter);
EM_CORE_LOCK_DESTROY(adapter);
@@ -963,25 +901,9 @@ em_suspend(device_t dev)
EM_CORE_LOCK(adapter);
- EM_TX_LOCK(adapter);
- em_stop(adapter);
- EM_TX_UNLOCK(adapter);
-
em_release_manageability(adapter);
-
- if (((adapter->hw.mac.type == e1000_82573) ||
- (adapter->hw.mac.type == e1000_82583) ||
- (adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_ich10lan) ||
- (adapter->hw.mac.type == e1000_ich9lan)) &&
- e1000_check_mng_mode(&adapter->hw))
- em_release_hw_control(adapter);
-
- if (adapter->wol) {
- E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
- E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
- em_enable_wakeup(dev);
- }
+ em_release_hw_control(adapter);
+ em_enable_wakeup(dev);
EM_CORE_UNLOCK(adapter);
@@ -1035,7 +957,7 @@ em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
} else if (drbr_empty(ifp, adapter->br) &&
(adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
if ((error = em_xmit(adapter, &m)) != 0) {
- if (m != NULL)
+ if (m)
error = drbr_enqueue(ifp, adapter->br, m);
return (error);
} else {
@@ -1049,7 +971,7 @@ em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
** listener and set the watchdog on.
*/
ETHER_BPF_MTAP(ifp, m);
- adapter->watchdog_timer = EM_TX_TIMEOUT;
+ adapter->watchdog_check = TRUE;
}
} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
return (error);
@@ -1072,7 +994,7 @@ process:
drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
ETHER_BPF_MTAP(ifp, next);
/* Set the watchdog */
- adapter->watchdog_timer = EM_TX_TIMEOUT;
+ adapter->watchdog_check = TRUE;
}
if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
@@ -1151,7 +1073,7 @@ em_start_locked(struct ifnet *ifp)
ETHER_BPF_MTAP(ifp, m_head);
/* Set timeout in case hardware has problems transmitting. */
- adapter->watchdog_timer = EM_TX_TIMEOUT;
+ adapter->watchdog_check = TRUE;
}
if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
@@ -1209,8 +1131,7 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
em_init_locked(adapter);
EM_CORE_UNLOCK(adapter);
}
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
+ arp_ifinit(ifp, ifa);
} else
#endif
error = ether_ioctl(ifp, command, data);
@@ -1244,6 +1165,9 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
case e1000_80003es2lan: /* Limit Jumbo Frame size */
max_frame_size = 9234;
break;
+ case e1000_pchlan:
+ max_frame_size = 4096;
+ break;
/* Adapters that do not support jumbo frames */
case e1000_82542:
case e1000_82583:
@@ -1359,11 +1283,17 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
reinit = 1;
}
#endif
-
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
+ if ((mask & IFCAP_WOL) &&
+ (ifp->if_capabilities & IFCAP_WOL) != 0) {
+ if (mask & IFCAP_WOL_MCAST)
+ ifp->if_capenable ^= IFCAP_WOL_MCAST;
+ if (mask & IFCAP_WOL_MAGIC)
+ ifp->if_capenable ^= IFCAP_WOL_MAGIC;
+ }
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
em_init(adapter);
#if __FreeBSD_version >= 700000
@@ -1380,53 +1310,6 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
return (error);
}
-/*********************************************************************
- * Watchdog timer:
- *
- * This routine is called from the local timer every second.
- * As long as transmit descriptors are being cleaned the value
- * is non-zero and we do nothing. Reaching 0 indicates a tx hang
- * and we then reset the device.
- *
- **********************************************************************/
-
-static void
-em_watchdog(struct adapter *adapter)
-{
-
- EM_CORE_LOCK_ASSERT(adapter);
-
- /*
- ** The timer is set to 5 every time start queues a packet.
- ** Then txeof keeps resetting it as long as it cleans at
- ** least one descriptor.
- ** Finally, anytime all descriptors are clean the timer is
- ** set to 0.
- */
- EM_TX_LOCK(adapter);
- if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) {
- EM_TX_UNLOCK(adapter);
- return;
- }
-
- /* If we are in this routine because of pause frames, then
- * don't reset the hardware.
- */
- if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
- E1000_STATUS_TXOFF) {
- adapter->watchdog_timer = EM_TX_TIMEOUT;
- EM_TX_UNLOCK(adapter);
- return;
- }
-
- if (e1000_check_for_link(&adapter->hw) == 0)
- device_printf(adapter->dev, "watchdog timeout -- resetting\n");
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
- EM_TX_UNLOCK(adapter);
-
- em_init_locked(adapter);
-}
/*********************************************************************
* Init entry point
@@ -1492,6 +1375,9 @@ em_init_locked(struct adapter *adapter)
break;
case e1000_ich9lan:
case e1000_ich10lan:
+ case e1000_pchlan:
+ pba = E1000_PBA_10K;
+ break;
case e1000_ich8lan:
pba = E1000_PBA_8K;
break;
@@ -1614,6 +1500,10 @@ em_init_locked(struct adapter *adapter)
#endif /* DEVICE_POLLING */
em_enable_intr(adapter);
+ /* AMT based hardware can now take control from firmware */
+ if (adapter->has_manage && adapter->has_amt)
+ em_get_hw_control(adapter);
+
/* Don't reset the phy next time init gets called */
adapter->hw.phy.reset_disable = TRUE;
}
@@ -1667,7 +1557,7 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
if (!drbr_empty(ifp, adapter->br))
em_mq_start_locked(ifp, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp);
#endif
EM_TX_UNLOCK(adapter);
@@ -1769,7 +1659,7 @@ em_handle_rxtx(void *context, int pending)
if (!drbr_empty(ifp, adapter->br))
em_mq_start_locked(ifp, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp);
#endif
EM_TX_UNLOCK(adapter);
@@ -1933,7 +1823,7 @@ em_handle_tx(void *context, int pending)
if (!drbr_empty(ifp, adapter->br))
em_mq_start_locked(ifp, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->snd))
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp);
#endif
EM_TX_UNLOCK(adapter);
@@ -2541,7 +2431,11 @@ em_set_multi(struct adapter *adapter)
if (mta == NULL)
panic("em_set_multi memory failure\n");
+#if __FreeBSD_version < 800000
+ IF_ADDR_LOCK(ifp);
+#else
if_maddr_rlock(ifp);
+#endif
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
@@ -2553,8 +2447,11 @@ em_set_multi(struct adapter *adapter)
&mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
mcnt++;
}
+#if __FreeBSD_version < 800000
+ IF_ADDR_UNLOCK(ifp);
+#else
if_maddr_runlock(ifp);
-
+#endif
if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl |= E1000_RCTL_MPE;
@@ -2605,13 +2502,21 @@ em_local_timer(void *arg)
em_smartspeed(adapter);
/*
- * Each second we check the watchdog to
- * protect against hardware hangs.
+ * We check the watchdog: the time since
+ * the last TX descriptor was cleaned.
+ * This implies a functional TX engine.
*/
- em_watchdog(adapter);
+ if ((adapter->watchdog_check == TRUE) &&
+ (ticks - adapter->watchdog_time > EM_WATCHDOG))
+ goto hung;
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
-
+ return;
+hung:
+ device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ adapter->watchdog_events++;
+ em_init_locked(adapter);
}
static void
@@ -2677,7 +2582,7 @@ em_update_link_status(struct adapter *adapter)
device_printf(dev, "Link is Down\n");
adapter->link_active = 0;
/* Link down, disable watchdog */
- adapter->watchdog_timer = FALSE;
+ adapter->watchdog_check = FALSE;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
@@ -3093,15 +2998,6 @@ em_hardware_init(struct adapter *adapter)
/* Issue a global reset */
e1000_reset_hw(&adapter->hw);
- /* Get control from any management/hw control */
- if (((adapter->hw.mac.type == e1000_82573) ||
- (adapter->hw.mac.type == e1000_82583) ||
- (adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_ich10lan) ||
- (adapter->hw.mac.type == e1000_ich9lan)) &&
- e1000_check_mng_mode(&adapter->hw))
- em_get_hw_control(adapter);
-
/* When hardware is reset, fifo_head is also reset */
adapter->tx_fifo_head = 0;
@@ -3151,6 +3047,9 @@ em_hardware_init(struct adapter *adapter)
else
adapter->hw.fc.requested_mode = e1000_fc_none;
+ /* Override - workaround for PCHLAN issue */
+ if (adapter->hw.mac.type == e1000_pchlan)
+ adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
if (e1000_init_hw(&adapter->hw) < 0) {
device_printf(dev, "Hardware Initialization Failed\n");
@@ -3233,6 +3132,12 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities |= IFCAP_POLLING;
#endif
+ /* Enable All WOL methods by default */
+ if (adapter->wol) {
+ ifp->if_capabilities |= IFCAP_WOL;
+ ifp->if_capenable |= IFCAP_WOL;
+ }
+
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
@@ -3299,7 +3204,7 @@ em_smartspeed(struct adapter *adapter)
PHY_1000T_CTRL, phy_tmp);
adapter->smartspeed++;
if(adapter->hw.mac.autoneg &&
- !e1000_phy_setup_autoneg(&adapter->hw) &&
+ !e1000_copper_link_autoneg(&adapter->hw) &&
!e1000_read_phy_reg(&adapter->hw,
PHY_CONTROL, &phy_tmp)) {
phy_tmp |= (MII_CR_AUTO_NEG_EN |
@@ -3316,7 +3221,7 @@ em_smartspeed(struct adapter *adapter)
phy_tmp |= CR_1000T_MS_ENABLE;
e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
if(adapter->hw.mac.autoneg &&
- !e1000_phy_setup_autoneg(&adapter->hw) &&
+ !e1000_copper_link_autoneg(&adapter->hw) &&
!e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
phy_tmp |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
@@ -3975,7 +3880,6 @@ static void
em_txeof(struct adapter *adapter)
{
int first, last, done, num_avail;
- u32 cleaned = 0;
struct em_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
@@ -4011,7 +3915,7 @@ em_txeof(struct adapter *adapter)
tx_desc->upper.data = 0;
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
- ++num_avail; ++cleaned;
+ ++num_avail;
if (tx_buffer->m_head) {
ifp->if_opackets++;
@@ -4025,6 +3929,7 @@ em_txeof(struct adapter *adapter)
tx_buffer->m_head = NULL;
}
tx_buffer->next_eop = -1;
+ adapter->watchdog_time = ticks;
if (++first == adapter->num_tx_desc)
first = 0;
@@ -4050,20 +3955,17 @@ em_txeof(struct adapter *adapter)
/*
* If we have enough room, clear IFF_DRV_OACTIVE to
* tell the stack that it is OK to send packets.
- * If there are no pending descriptors, clear the timeout.
+ * If there are no pending descriptors, clear the watchdog.
*/
if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if (num_avail == adapter->num_tx_desc) {
- adapter->watchdog_timer = 0;
+ adapter->watchdog_check = FALSE;
adapter->num_tx_desc_avail = num_avail;
return;
}
}
- /* If any descriptors cleaned, reset the watchdog */
- if (cleaned)
- adapter->watchdog_timer = EM_TX_TIMEOUT;
adapter->num_tx_desc_avail = num_avail;
return;
}
@@ -4071,7 +3973,7 @@ em_txeof(struct adapter *adapter)
/*********************************************************************
*
* When Link is lost sometimes there is work still in the TX ring
- * which will result in a watchdog, rather than allow that do an
+ * which may result in a watchdog, rather than allow that we do an
* attempted cleanup and then reinit here. Note that this has been
* seens mostly with fiber adapters.
*
@@ -4079,14 +3981,12 @@ em_txeof(struct adapter *adapter)
static void
em_tx_purge(struct adapter *adapter)
{
- if ((!adapter->link_active) && (adapter->watchdog_timer)) {
+ if ((!adapter->link_active) && (adapter->watchdog_check)) {
EM_TX_LOCK(adapter);
em_txeof(adapter);
EM_TX_UNLOCK(adapter);
- if (adapter->watchdog_timer) { /* Still not clean? */
- adapter->watchdog_timer = 0;
+ if (adapter->watchdog_check) /* Still outstanding? */
em_init_locked(adapter);
- }
}
}
@@ -4877,72 +4777,54 @@ em_release_manageability(struct adapter *adapter)
}
/*
- * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the network i/f is open.
- *
+ * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded. For AMT version type f/w
+ * this means that the network i/f is open.
*/
static void
em_get_hw_control(struct adapter *adapter)
{
u32 ctrl_ext, swsm;
- /* Let firmware know the driver has taken over */
- switch (adapter->hw.mac.type) {
- case e1000_82573:
+ if (adapter->hw.mac.type == e1000_82573) {
swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
swsm | E1000_SWSM_DRV_LOAD);
- break;
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- case e1000_ich9lan:
- case e1000_ich10lan:
- ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
- break;
- default:
- break;
+ return;
}
+ /* else */
+ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ return;
}
/*
* em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the network i/f is closed.
- *
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is no longer loaded. For AMT versions of the
+ * f/w this means that the network i/f is closed.
*/
static void
em_release_hw_control(struct adapter *adapter)
{
u32 ctrl_ext, swsm;
- /* Let firmware taken over control of h/w */
- switch (adapter->hw.mac.type) {
- case e1000_82573:
+ if (!adapter->has_manage)
+ return;
+
+ if (adapter->hw.mac.type == e1000_82573) {
swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
- break;
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- case e1000_ich9lan:
- case e1000_ich10lan:
- ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
- break;
- default:
- break;
-
+ return;
}
+ /* else */
+ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ return;
}
static int
@@ -4958,29 +4840,251 @@ em_is_valid_ether_addr(u8 *addr)
}
/*
+** Parse the interface capabilities with regard
+** to both system management and wake-on-lan for
+** later use.
+*/
+static void
+em_get_wakeup(device_t dev)
+{
+ struct adapter *adapter = device_get_softc(dev);
+ u16 eeprom_data = 0, device_id, apme_mask;
+
+ adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
+ apme_mask = EM_EEPROM_APME;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82542:
+ case e1000_82543:
+ break;
+ case e1000_82544:
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
+ apme_mask = EM_82544_APME;
+ break;
+ case e1000_82573:
+ case e1000_82583:
+ adapter->has_amt = TRUE;
+ /* Falls thru */
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ if (adapter->hw.bus.func == 1) {
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ break;
+ } else
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ break;
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ apme_mask = E1000_WUC_APME;
+ adapter->has_amt = TRUE;
+ eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
+ break;
+ default:
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ break;
+ }
+ if (eeprom_data & apme_mask)
+ adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
+ /*
+ * We have the eeprom settings, now apply the special cases
+ * where the eeprom may be wrong or the board won't support
+ * wake on lan on a particular port
+ */
+ device_id = pci_get_device(dev);
+ switch (device_id) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+ E1000_STATUS_FUNC_1)
+ adapter->wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->wol = 0;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+ return;
+}
+
+
+/*
* Enable PCI Wake On Lan capability
*/
void
em_enable_wakeup(device_t dev)
{
- u16 cap, status;
- u8 id;
-
- /* First find the capabilities pointer*/
- cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
- /* Read the PM Capabilities */
- id = pci_read_config(dev, cap, 1);
- if (id != PCIY_PMG) /* Something wrong */
+ struct adapter *adapter = device_get_softc(dev);
+ struct ifnet *ifp = adapter->ifp;
+ u32 pmc, ctrl, ctrl_ext, rctl;
+ u16 status;
+
+ if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
return;
- /* OK, we have the power capabilities, so
- now get the status register */
- cap += PCIR_POWER_STATUS;
- status = pci_read_config(dev, cap, 2);
- status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(dev, cap, status, 2);
+
+ /* Advertise the wakeup capability */
+ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+ E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+
+ /* ICH workaround code */
+ if ((adapter->hw.mac.type == e1000_ich8lan) ||
+ (adapter->hw.mac.type == e1000_pchlan) ||
+ (adapter->hw.mac.type == e1000_ich9lan) ||
+ (adapter->hw.mac.type == e1000_ich10lan)) {
+ e1000_disable_gig_wol_ich8lan(&adapter->hw);
+ e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ }
+
+ /* Keep the laser running on Fiber adapters */
+ if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+ adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+
+ /*
+ ** Determine type of Wakeup: note that wol
+ ** is set with all bits on by default.
+ */
+ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
+ adapter->wol &= ~E1000_WUFC_MAG;
+
+ if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
+ adapter->wol &= ~E1000_WUFC_MC;
+ else {
+ rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+ }
+
+ if (adapter->hw.mac.type == e1000_pchlan) {
+ if (em_enable_phy_wakeup(adapter))
+ return;
+ } else {
+ E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+ E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+ }
+
+ if (adapter->hw.phy.type == e1000_phy_igp_3)
+ e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+
+ /* Request PME */
+ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
+ status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
+ if (ifp->if_capenable & IFCAP_WOL)
+ status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
+
return;
}
+/*
+** WOL in the newer chipset interfaces (pchlan)
+** require thing to be copied into the phy
+*/
+static int
+em_enable_phy_wakeup(struct adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mreg, ret = 0;
+ u16 preg;
+
+ /* copy MAC RARs to PHY RARs */
+ for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
+ mreg = E1000_READ_REG(hw, E1000_RAL(i));
+ e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
+ e1000_write_phy_reg(hw, BM_RAR_M(i),
+ (u16)((mreg >> 16) & 0xFFFF));
+ mreg = E1000_READ_REG(hw, E1000_RAH(i));
+ e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
+ e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
+ (u16)((mreg >> 16) & 0xFFFF));
+ }
+
+ /* copy MAC MTA to PHY MTA */
+ for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+ mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+ e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
+ e1000_write_phy_reg(hw, BM_MTA(i) + 1,
+ (u16)((mreg >> 16) & 0xFFFF));
+ }
+
+ /* configure PHY Rx Control register */
+ e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
+ mreg = E1000_READ_REG(hw, E1000_RCTL);
+ if (mreg & E1000_RCTL_UPE)
+ preg |= BM_RCTL_UPE;
+ if (mreg & E1000_RCTL_MPE)
+ preg |= BM_RCTL_MPE;
+ preg &= ~(BM_RCTL_MO_MASK);
+ if (mreg & E1000_RCTL_MO_3)
+ preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ << BM_RCTL_MO_SHIFT);
+ if (mreg & E1000_RCTL_BAM)
+ preg |= BM_RCTL_BAM;
+ if (mreg & E1000_RCTL_PMCF)
+ preg |= BM_RCTL_PMCF;
+ mreg = E1000_READ_REG(hw, E1000_CTRL);
+ if (mreg & E1000_CTRL_RFCE)
+ preg |= BM_RCTL_RFCE;
+ e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
+
+ /* enable PHY wakeup in MAC register */
+ E1000_WRITE_REG(hw, E1000_WUC,
+ E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+ E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
+
+ /* configure and enable PHY wakeup in PHY registers */
+ e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
+ e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+ /* activate PHY wakeup */
+ ret = hw->phy.ops.acquire(hw);
+ if (ret) {
+ printf("Could not acquire PHY\n");
+ return ret;
+ }
+ e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
+ ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
+ if (ret) {
+ printf("Could not read PHY page 769\n");
+ goto out;
+ }
+ preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+ ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
+ if (ret)
+ printf("Could not set PHY Host Wakeup bit\n");
+out:
+ hw->phy.ops.release(hw);
+
+ return ret;
+}
+
/*********************************************************************
* 82544 Coexistence issue workaround.
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 7487a89..5019e0a 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -135,9 +135,9 @@
#define EM_RADV 64
/*
- * This parameter controls the duration of transmit watchdog timer.
+ * This parameter controls the max duration of transmit watchdog.
*/
-#define EM_TX_TIMEOUT 5
+#define EM_WATCHDOG (5 * hz)
/*
* This parameter controls when the driver calls the routine to reclaim
@@ -189,6 +189,7 @@
#define ETHER_ALIGN 2
#define EM_FC_PAUSE_TIME 0x0680
#define EM_EEPROM_APME 0x400;
+#define EM_82544_APME 0x0004;
/* Code compatilbility between 6 and 7 */
#ifndef ETHER_BPF_MTAP
@@ -308,7 +309,8 @@ struct adapter {
struct ifmedia media;
struct callout timer;
struct callout tx_fifo_timer;
- int watchdog_timer;
+ bool watchdog_check;
+ int watchdog_time;
int msi;
int if_flags;
int max_frame_size;
@@ -332,8 +334,9 @@ struct adapter {
#endif
/* Management and WOL features */
- int wol;
- int has_manage;
+ u32 wol;
+ bool has_manage;
+ bool has_amt;
/* Info about the board itself */
uint8_t link_active;
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 61743df..dc2e85f 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -101,7 +101,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char igb_driver_version[] = "version - 1.7.3";
+char igb_driver_version[] = "version - 1.8.4";
/*********************************************************************
@@ -123,12 +123,19 @@ static igb_vendor_info_t igb_vendor_info_array[] =
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
};
@@ -159,7 +166,6 @@ static int igb_mq_start_locked(struct ifnet *,
static void igb_qflush(struct ifnet *);
#endif
static int igb_ioctl(struct ifnet *, u_long, caddr_t);
-static void igb_watchdog(struct adapter *);
static void igb_init(void *);
static void igb_init_locked(struct adapter *);
static void igb_stop(void *);
@@ -172,7 +178,7 @@ static int igb_allocate_legacy(struct adapter *);
static int igb_setup_msix(struct adapter *);
static void igb_free_pci_resources(struct adapter *);
static void igb_local_timer(void *);
-static int igb_hardware_init(struct adapter *);
+static void igb_reset(struct adapter *);
static void igb_setup_interface(device_t, struct adapter *);
static int igb_allocate_queues(struct adapter *);
static void igb_configure_queues(struct adapter *);
@@ -204,7 +210,7 @@ static void igb_disable_promisc(struct adapter *);
static void igb_set_multi(struct adapter *);
static void igb_print_hw_stats(struct adapter *);
static void igb_update_link_status(struct adapter *);
-static int igb_get_buf(struct rx_ring *, int, u8);
+static int igb_get_buf(struct rx_ring *, int, int);
static void igb_register_vlan(void *, struct ifnet *, u16);
static void igb_unregister_vlan(void *, struct ifnet *, u16);
@@ -296,11 +302,27 @@ static int igb_bulk_latency = IGB_BULK_LATENCY;
TUNABLE_INT("hw.igb.bulk_latency", &igb_bulk_latency);
/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */
+static int igb_enable_msix = 1;
+TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
+
+/*
+ * Header split has seemed to be beneficial in
+ * all circumstances tested, so its on by default
+ * however this variable will allow it to be disabled
+ * for some debug purposes.
+ */
+static bool igb_header_split = TRUE;
+TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
+
+/*
** This will autoconfigure based on the number
-** of CPUs if set to 0. Only a matched pair of
+** of CPUs if left at 0. Only a matched pair of
** TX and RX rings are allowed.
*/
-static int igb_num_queues = 1;
+static int igb_num_queues = 0;
TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
/* How many packets rxeof tries to clean at a time */
@@ -540,17 +562,10 @@ igb_attach(device_t dev)
goto err_late;
}
- /* Now Initialize the hardware */
- if (igb_hardware_init(adapter)) {
- device_printf(dev, "Unable to initialize the hardware\n");
- error = EIO;
- goto err_late;
- }
-
/*
** Configure Interrupts
*/
- if (adapter->msix > 1) /* MSIX */
+ if ((adapter->msix > 1) && (igb_enable_msix))
error = igb_allocate_msix(adapter);
else /* MSI or Legacy */
error = igb_allocate_legacy(adapter);
@@ -560,6 +575,9 @@ igb_attach(device_t dev)
/* Setup OS specific network interface */
igb_setup_interface(dev, adapter);
+ /* Now get a good starting state */
+ igb_reset(adapter);
+
#ifdef IGB_IEEE1588
/*
** Setup the timer: IEEE 1588 support
@@ -787,8 +805,8 @@ igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, m_head);
- /* Set timeout in case hardware has problems transmitting. */
- txr->watchdog_timer = IGB_TX_TIMEOUT;
+ /* Set watchdog on */
+ txr->watchdog_check = TRUE;
}
}
@@ -854,17 +872,16 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
/* If nothing queued go right to xmit */
if (drbr_empty(ifp, txr->br)) {
- if ((err = igb_xmit(txr, &m)) != 0) {
- if (m != NULL)
- err = drbr_enqueue(ifp, txr->br, m);
- return (err);
+ if (igb_xmit(txr, &m)) {
+ if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
} else {
/* Success, update stats */
drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, m);
/* Set the watchdog */
- txr->watchdog_timer = IGB_TX_TIMEOUT;
+ txr->watchdog_check = TRUE;
}
} else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
@@ -881,15 +898,11 @@ process:
next = drbr_dequeue(ifp, txr->br);
if (next == NULL)
break;
- if ((err = igb_xmit(txr, &next)) != 0) {
- if (next != NULL)
- err = drbr_enqueue(ifp, txr->br, next);
+ if (igb_xmit(txr, &next))
break;
- }
- drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
ETHER_BPF_MTAP(ifp, next);
/* Set the watchdog */
- txr->watchdog_timer = IGB_TX_TIMEOUT;
+ txr->watchdog_check = TRUE;
}
if (txr->tx_avail <= IGB_TX_OP_THRESHOLD)
@@ -957,8 +970,7 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
igb_init_locked(adapter);
IGB_CORE_UNLOCK(adapter);
}
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
+ arp_ifinit(ifp, ifa);
} else
#endif
error = ether_ioctl(ifp, command, data);
@@ -1049,7 +1061,7 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
- if (mask & IFCAP_LRO) {
+ if ((mask & IFCAP_LRO) && (igb_header_split)) {
ifp->if_capenable ^= IFCAP_LRO;
reinit = 1;
}
@@ -1076,80 +1088,6 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
return (error);
}
-/*********************************************************************
- * Watchdog timer:
- *
- * This routine is called from the local timer every second.
- * As long as transmit descriptors are being cleaned the value
- * is non-zero and we do nothing. Reaching 0 indicates a tx hang
- * and we then reset the device.
- *
- **********************************************************************/
-
-static void
-igb_watchdog(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
- bool tx_hang = FALSE;
-
- IGB_CORE_LOCK_ASSERT(adapter);
-
- /*
- ** The timer is set to 5 every time start() queues a packet.
- ** Then txeof keeps resetting it as long as it cleans at
- ** least one descriptor.
- ** Finally, anytime all descriptors are clean the timer is
- ** set to 0.
- **
- ** With TX Multiqueue we need to check every queue's timer,
- ** if any time out we do the reset.
- */
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IGB_TX_LOCK(txr);
- if (txr->watchdog_timer == 0 ||
- (--txr->watchdog_timer)) {
- IGB_TX_UNLOCK(txr);
- continue;
- } else {
- tx_hang = TRUE;
- IGB_TX_UNLOCK(txr);
- break;
- }
- }
- if (tx_hang == FALSE)
- return;
-
- /* If we are in this routine because of pause frames, then
- * don't reset the hardware.
- */
- if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
- E1000_STATUS_TXOFF) {
- txr = adapter->tx_rings; /* reset pointer */
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IGB_TX_LOCK(txr);
- txr->watchdog_timer = IGB_TX_TIMEOUT;
- IGB_TX_UNLOCK(txr);
- }
- return;
- }
-
- if (e1000_check_for_link(&adapter->hw) == 0)
- device_printf(adapter->dev, "watchdog timeout -- resetting\n");
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- device_printf(adapter->dev, "Queue(%d) tdh = %d, tdt = %d\n",
- i, E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
- E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
- device_printf(adapter->dev, "Queue(%d) desc avail = %d,"
- " Next Desc to Clean = %d\n", i, txr->tx_avail,
- txr->next_to_clean);
- }
-
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
-
- igb_init_locked(adapter);
-}
/*********************************************************************
* Init entry point
@@ -1165,29 +1103,16 @@ igb_watchdog(struct adapter *adapter)
static void
igb_init_locked(struct adapter *adapter)
{
- struct rx_ring *rxr = adapter->rx_rings;
- struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
- u32 pba = 0;
INIT_DEBUGOUT("igb_init: begin");
IGB_CORE_LOCK_ASSERT(adapter);
- igb_stop(adapter);
+ igb_disable_intr(adapter);
+ callout_stop(&adapter->timer);
- /*
- * Packet Buffer Allocation (PBA)
- * Writing PBA sets the receive portion of the buffer
- * the remainder is used for the transmit buffer.
- */
- if (adapter->hw.mac.type == e1000_82575) {
- INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
- pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
- E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
- }
-
/* Get the latest mac address, User can use a LAA */
bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
ETHER_ADDR_LEN);
@@ -1195,15 +1120,9 @@ igb_init_locked(struct adapter *adapter)
/* Put the address into the Receive Address Array */
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
- /* Initialize the hardware */
- if (igb_hardware_init(adapter)) {
- device_printf(dev, "Unable to initialize the hardware\n");
- return;
- }
+ igb_reset(adapter);
igb_update_link_status(adapter);
- E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
-
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM) {
@@ -1239,7 +1158,6 @@ igb_init_locked(struct adapter *adapter)
/* Prepare receive descriptors and buffers */
if (igb_setup_receive_structures(adapter)) {
device_printf(dev, "Could not setup receive structures\n");
- igb_stop(adapter);
return;
}
igb_initialize_receive_units(adapter);
@@ -1259,26 +1177,10 @@ igb_init_locked(struct adapter *adapter)
/* Set up VLAN tag offload and filter */
igb_setup_vlan_hw_support(adapter);
- /* Set default RX interrupt moderation */
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- E1000_WRITE_REG(&adapter->hw,
- E1000_EITR(rxr->msix), igb_ave_latency);
- rxr->eitr_setting = igb_ave_latency;
- }
-
- /* Set TX interrupt rate & reset TX watchdog */
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- E1000_WRITE_REG(&adapter->hw,
- E1000_EITR(txr->msix), igb_ave_latency);
- txr->watchdog_timer = FALSE;
- }
-
- {
- /* this clears any pending interrupts */
- E1000_READ_REG(&adapter->hw, E1000_ICR);
- igb_enable_intr(adapter);
- E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
- }
+ /* this clears any pending interrupts */
+ E1000_READ_REG(&adapter->hw, E1000_ICR);
+ igb_enable_intr(adapter);
+ E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
/* Don't reset the phy next time init gets called */
adapter->hw.phy.reset_disable = TRUE;
@@ -1329,13 +1231,15 @@ igb_handle_rx(void *context, int pending)
{
struct rx_ring *rxr = context;
struct adapter *adapter = rxr->adapter;
- struct ifnet *ifp = adapter->ifp;
+ u32 loop = IGB_MAX_LOOP;
+ bool more;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- if (igb_rxeof(rxr, adapter->rx_process_limit) != 0)
- /* More to clean, schedule another task */
- taskqueue_enqueue(adapter->tq, &rxr->rx_task);
+ do {
+ more = igb_rxeof(rxr, -1);
+ } while (loop-- && more);
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
}
static void
@@ -1343,20 +1247,24 @@ igb_handle_tx(void *context, int pending)
{
struct tx_ring *txr = context;
struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
+ struct ifnet *ifp = adapter->ifp;
+ u32 loop = IGB_MAX_LOOP;
+ bool more;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IGB_TX_LOCK(txr);
- igb_txeof(txr);
+ IGB_TX_LOCK(txr);
+ do {
+ more = igb_txeof(txr);
+ } while (loop-- && more);
#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, txr->br))
- igb_mq_start_locked(ifp, txr, NULL);
+ if (!drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- igb_start_locked(txr, ifp);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ igb_start_locked(txr, ifp);
#endif
- IGB_TX_UNLOCK(txr);
- }
+ IGB_TX_UNLOCK(txr);
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
}
@@ -1416,23 +1324,21 @@ igb_msix_tx(void *arg)
{
struct tx_ring *txr = arg;
struct adapter *adapter = txr->adapter;
- u32 loop = IGB_MAX_LOOP;
bool more;
- ++txr->tx_irq;
- IGB_TX_LOCK(txr);
-
- do {
- more = igb_txeof(txr);
- } while (loop-- && more);
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMC, txr->eims);
+ IGB_TX_LOCK(txr);
+ ++txr->tx_irq;
+ more = igb_txeof(txr);
IGB_TX_UNLOCK(txr);
- /* Schedule a clean task */
- taskqueue_enqueue(adapter->tq, &txr->tx_task);
-
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
+ /* Schedule a clean task if needed*/
+ if (more)
+ taskqueue_enqueue(txr->tq, &txr->tx_task);
+ else
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, txr->eims);
return;
}
@@ -1447,23 +1353,23 @@ igb_msix_rx(void *arg)
{
struct rx_ring *rxr = arg;
struct adapter *adapter = rxr->adapter;
- u32 loop = IGB_MAX_LOOP;
bool more;
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMC, rxr->eims);
+
++rxr->rx_irq;
- do {
- more = igb_rxeof(rxr, adapter->rx_process_limit);
- } while (loop-- && more);
+ more = igb_rxeof(rxr, adapter->rx_process_limit);
/* Update interrupt rate */
if (igb_enable_aim == TRUE)
igb_update_aim(rxr);
/* Schedule another clean */
- taskqueue_enqueue(adapter->tq, &rxr->rx_task);
-
- /* Reenable this interrupt */
- E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
+ if (more)
+ taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ else
+ /* Reenable this interrupt */
+ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims);
return;
}
@@ -1536,11 +1442,8 @@ igb_update_aim(struct rx_ring *rxr)
if (olditr != newitr) {
/* Change interrupt rate */
rxr->eitr_setting = newitr;
- if (adapter->hw.mac.type == e1000_82575)
- newitr |= newitr << 16;
- else
- newitr |= 0x8000000;
- E1000_WRITE_REG(&adapter->hw, E1000_EITR(rxr->me), newitr);
+ E1000_WRITE_REG(&adapter->hw, E1000_EITR(rxr->me),
+ newitr | (newitr << 16));
}
rxr->bytes = 0;
@@ -1789,6 +1692,10 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
<< E1000_ADVTXD_PAYLEN_SHIFT);
+ /* 82575 needs the queue index added */
+ if (adapter->hw.mac.type == e1000_82575)
+ olinfo_status |= txr->me << 4;
+
/* Set up our transmit descriptors */
i = txr->next_avail_desc;
for (j = 0; j < nsegs; j++) {
@@ -1896,7 +1803,11 @@ igb_set_multi(struct adapter *adapter)
IOCTL_DEBUGOUT("igb_set_multi: begin");
+#if __FreeBSD_version < 800000
+ IF_ADDR_LOCK(ifp);
+#else
if_maddr_rlock(ifp);
+#endif
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
@@ -1908,8 +1819,11 @@ igb_set_multi(struct adapter *adapter)
&mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
mcnt++;
}
+#if __FreeBSD_version < 800000
+ IF_ADDR_UNLOCK(ifp);
+#else
if_maddr_runlock(ifp);
-
+#endif
if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl |= E1000_RCTL_MPE;
@@ -1920,17 +1834,20 @@ igb_set_multi(struct adapter *adapter)
/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status and updates statistics.
+ * Timer routine:
+ * This routine checks for link status,
+ * updates statistics, and does the watchdog.
*
**********************************************************************/
static void
igb_local_timer(void *arg)
{
- struct adapter *adapter = arg;
- struct ifnet *ifp = adapter->ifp;
+ struct adapter *adapter = arg;
+ struct ifnet *ifp = adapter->ifp;
+ device_t dev = adapter->dev;
+ struct tx_ring *txr = adapter->tx_rings;
+
IGB_CORE_LOCK_ASSERT(adapter);
@@ -1940,17 +1857,32 @@ igb_local_timer(void *arg)
if (igb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
igb_print_hw_stats(adapter);
- /*
- * Each second we check the watchdog to
- * protect against hardware hangs.
- */
- igb_watchdog(adapter);
+ /*
+ ** Watchdog: check for time since any descriptor was cleaned
+ */
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ if (txr->watchdog_check == FALSE)
+ continue;
+ if ((ticks - txr->watchdog_time) > IGB_WATCHDOG)
+ goto timeout;
+ }
/* Trigger an RX interrupt on all queues */
E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->rx_mask);
-
callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
+ return;
+timeout:
+ device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+ device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+ E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
+ E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
+ device_printf(dev,"TX(%d) desc avail = %d,"
+ "Next TX to Clean = %d\n",
+ txr->me, txr->tx_avail, txr->next_to_clean);
+ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ adapter->watchdog_events++;
+ igb_init_locked(adapter);
}
static void
@@ -2007,7 +1939,7 @@ igb_update_link_status(struct adapter *adapter)
if_link_state_change(ifp, LINK_STATE_DOWN);
/* Turn off watchdogs */
for (int i = 0; i < adapter->num_queues; i++, txr++)
- txr->watchdog_timer = FALSE;
+ txr->watchdog_check = FALSE;
}
}
@@ -2192,8 +2124,6 @@ igb_allocate_msix(struct adapter *adapter)
device_printf(dev, "Failed to register TX handler");
return (error);
}
- /* Make tasklet for deferred handling - one per queue */
- TASK_INIT(&txr->tx_task, 0, igb_handle_tx, txr);
txr->msix = vector;
if (adapter->hw.mac.type == e1000_82575)
txr->eims = E1000_EICR_TX_QUEUE0 << i;
@@ -2205,6 +2135,12 @@ igb_allocate_msix(struct adapter *adapter)
*/
if (adapter->num_queues > 1)
bus_bind_intr(dev, txr->res, i);
+ /* Make tasklet for deferred handling - one per queue */
+ TASK_INIT(&txr->tx_task, 0, igb_handle_tx, txr);
+ txr->tq = taskqueue_create_fast("igb_txq", M_NOWAIT,
+ taskqueue_thread_enqueue, &txr->tq);
+ taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
+ device_get_nameunit(adapter->dev));
}
/* RX Setup */
@@ -2226,8 +2162,6 @@ igb_allocate_msix(struct adapter *adapter)
device_printf(dev, "Failed to register RX handler");
return (error);
}
- /* Make tasklet for deferred handling - one per queue */
- TASK_INIT(&rxr->rx_task, 0, igb_handle_rx, rxr);
rxr->msix = vector;
if (adapter->hw.mac.type == e1000_82575)
rxr->eims = E1000_EICR_RX_QUEUE0 << i;
@@ -2244,6 +2178,13 @@ igb_allocate_msix(struct adapter *adapter)
*/
if (adapter->num_queues > 1)
bus_bind_intr(dev, rxr->res, i);
+
+ /* Make tasklet for deferred handling - one per queue */
+ TASK_INIT(&rxr->rx_task, 0, igb_handle_rx, rxr);
+ rxr->tq = taskqueue_create_fast("igb_rxq", M_NOWAIT,
+ taskqueue_thread_enqueue, &rxr->tq);
+ taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
+ device_get_nameunit(adapter->dev));
}
/* And Link */
@@ -2263,10 +2204,6 @@ igb_allocate_msix(struct adapter *adapter)
return (error);
}
adapter->linkvec = vector;
- adapter->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
- device_get_nameunit(adapter->dev));
return (0);
}
@@ -2278,20 +2215,55 @@ igb_configure_queues(struct adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct tx_ring *txr;
struct rx_ring *rxr;
+ u32 tmp, ivar = 0;
- /* Turn on MSIX */
- /*
- ** 82576 uses IVARs to route MSI/X
- ** interrupts, its not very intuitive,
- ** study the code carefully :)
- */
- if (adapter->hw.mac.type == e1000_82576) {
- u32 ivar = 0;
- /* First turn on the capability */
+ /* First turn on RSS capability */
+ if (adapter->hw.mac.type > e1000_82575)
E1000_WRITE_REG(hw, E1000_GPIE,
- E1000_GPIE_MSIX_MODE |
- E1000_GPIE_EIAME |
+ E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
E1000_GPIE_PBA | E1000_GPIE_NSICR);
+
+ /* Turn on MSIX */
+ switch (adapter->hw.mac.type) {
+ case e1000_82580:
+ /* RX */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ u32 index = i >> 1;
+ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+ rxr = &adapter->rx_rings[i];
+ if (i & 1) {
+ ivar &= 0xFF00FFFF;
+ ivar |= (rxr->msix | E1000_IVAR_VALID) << 16;
+ } else {
+ ivar &= 0xFFFFFF00;
+ ivar |= rxr->msix | E1000_IVAR_VALID;
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+ adapter->eims_mask |= rxr->eims;
+ }
+ /* TX */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ u32 index = i >> 1;
+ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+ txr = &adapter->tx_rings[i];
+ if (i & 1) {
+ ivar &= 0x00FFFFFF;
+ ivar |= (txr->msix | E1000_IVAR_VALID) << 24;
+ } else {
+ ivar &= 0xFFFF00FF;
+ ivar |= (txr->msix | E1000_IVAR_VALID) << 8;
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+ adapter->eims_mask |= txr->eims;
+ }
+
+ /* And for the link interrupt */
+ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
+ adapter->link_mask = 1 << adapter->linkvec;
+ adapter->eims_mask |= adapter->link_mask;
+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
+ break;
+ case e1000_82576:
/* RX */
for (int i = 0; i < adapter->num_queues; i++) {
u32 index = i & 0x7; /* Each IVAR has two entries */
@@ -2328,11 +2300,10 @@ igb_configure_queues(struct adapter *adapter)
adapter->link_mask = 1 << adapter->linkvec;
adapter->eims_mask |= adapter->link_mask;
E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
- } else
- { /* 82575 */
- int tmp;
+ break;
- /* enable MSI-X PBA support*/
+ case e1000_82575:
+ /* enable MSI-X support*/
tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
tmp |= E1000_CTRL_EXT_PBA_CLR;
/* Auto-Mask interrupts upon ICR read. */
@@ -2361,7 +2332,10 @@ igb_configure_queues(struct adapter *adapter)
E1000_EIMS_OTHER);
adapter->link_mask |= E1000_EIMS_OTHER;
adapter->eims_mask |= adapter->link_mask;
+ default:
+ break;
}
+
return;
}
@@ -2444,6 +2418,10 @@ igb_setup_msix(struct adapter *adapter)
device_t dev = adapter->dev;
int rid, want, queues, msgs;
+ /* tuneable override */
+ if (igb_enable_msix == 0)
+ goto msi;
+
/* First try MSI/X */
rid = PCIR_BAR(IGB_MSIX_BAR);
adapter->msix_mem = bus_alloc_resource_any(dev,
@@ -2497,66 +2475,145 @@ msi:
/*********************************************************************
*
- * Initialize the hardware to a configuration
- * as specified by the adapter structure.
+ * Set up an fresh starting state
*
**********************************************************************/
-static int
-igb_hardware_init(struct adapter *adapter)
+static void
+igb_reset(struct adapter *adapter)
{
device_t dev = adapter->dev;
- u32 rx_buffer_size;
-
- INIT_DEBUGOUT("igb_hardware_init: begin");
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_fc_info *fc = &hw->fc;
+ struct ifnet *ifp = adapter->ifp;
+ u32 pba = 0;
+ u16 hwm;
- /* Issue a global reset */
- e1000_reset_hw(&adapter->hw);
+ INIT_DEBUGOUT("igb_reset: begin");
/* Let the firmware know the OS is in control */
igb_get_hw_control(adapter);
/*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ pba = E1000_PBA_32K;
+ break;
+ case e1000_82576:
+ pba = E1000_PBA_64K;
+ break;
+ case e1000_82580:
+ pba = E1000_PBA_35K;
+ default:
+ break;
+ }
+
+ /* Special needs in case of Jumbo frames */
+ if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
+ u32 tx_space, min_tx, min_rx;
+ pba = E1000_READ_REG(hw, E1000_PBA);
+ tx_space = pba >> 16;
+ pba &= 0xffff;
+ min_tx = (adapter->max_frame_size +
+ sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
+ min_tx = roundup2(min_tx, 1024);
+ min_tx >>= 10;
+ min_rx = adapter->max_frame_size;
+ min_rx = roundup2(min_rx, 1024);
+ min_rx >>= 10;
+ if (tx_space < min_tx &&
+ ((min_tx - tx_space) < pba)) {
+ pba = pba - (min_tx - tx_space);
+ /*
+ * if short on rx space, rx wins
+ * and must trump tx adjustment
+ */
+ if (pba < min_rx)
+ pba = min_rx;
+ }
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
+
+ INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
+
+ /*
* These parameters control the automatic generation (Tx) and
* response (Rx) to Ethernet PAUSE frames.
* - High water mark should allow for at least two frames to be
* received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
- * drained a bit. Here we use an arbitary value of 1500 which will
- * restart after one full frame is pulled from the buffer. There
- * could be several smaller frames in the buffer and if so they will
- * not trigger the XON until their total number reduces the buffer
- * by 1500.
- * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ * drained a bit.
*/
- if (adapter->hw.mac.type == e1000_82576)
- rx_buffer_size = ((E1000_READ_REG(&adapter->hw,
- E1000_RXPBS) & 0xffff) << 10 );
- else
- rx_buffer_size = ((E1000_READ_REG(&adapter->hw,
- E1000_PBA) & 0xffff) << 10 );
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - 2 * adapter->max_frame_size));
- adapter->hw.fc.high_water = rx_buffer_size -
- roundup2(adapter->max_frame_size, 1024);
- adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
+ if (hw->mac.type < e1000_82576) {
+ fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
+ fc->low_water = fc->high_water - 8;
+ } else {
+ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
+ }
- adapter->hw.fc.pause_time = IGB_FC_PAUSE_TIME;
- adapter->hw.fc.send_xon = TRUE;
+ fc->pause_time = IGB_FC_PAUSE_TIME;
+ fc->send_xon = TRUE;
/* Set Flow control, use the tunable location if sane */
if ((igb_fc_setting >= 0) || (igb_fc_setting < 4))
- adapter->hw.fc.requested_mode = igb_fc_setting;
+ fc->requested_mode = igb_fc_setting;
else
- adapter->hw.fc.requested_mode = e1000_fc_none;
+ fc->requested_mode = e1000_fc_none;
+
+ fc->current_mode = fc->requested_mode;
+
+ /* Issue a global reset */
+ e1000_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
- if (e1000_init_hw(&adapter->hw) < 0) {
+ if (e1000_init_hw(hw) < 0)
device_printf(dev, "Hardware Initialization Failed\n");
- return (EIO);
- }
- e1000_check_for_link(&adapter->hw);
+ if (hw->mac.type == e1000_82580) {
+ u32 reg;
- return (0);
+ hwm = (pba << 10) - (2 * adapter->max_frame_size);
+ /*
+ * 0x80000000 - enable DMA COAL
+ * 0x10000000 - use L0s as low power
+ * 0x20000000 - use L1 as low power
+ * X << 16 - exit dma coal when rx data exceeds X kB
+ * Y - upper limit to stay in dma coal in units of 32usecs
+ */
+ E1000_WRITE_REG(hw, E1000_DMACR,
+ 0xA0000006 | ((hwm << 6) & 0x00FF0000));
+
+ /* set hwm to PBA - 2 * max frame size */
+ E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
+ /*
+ * This sets the time to wait before requesting transition to
+ * low power state to number of usecs needed to receive 1 512
+ * byte frame at gigabit line rate
+ */
+ E1000_WRITE_REG(hw, E1000_DMCTLX, 4);
+
+ /* free space in tx packet buffer to wake from DMA coal */
+ E1000_WRITE_REG(hw, E1000_DMCTXTH,
+ (20480 - (2 * adapter->max_frame_size)) >> 6);
+
+ /* make low power state decision controlled by DMA coal */
+ reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+ E1000_WRITE_REG(hw, E1000_PCIEMISC,
+ reg | E1000_PCIEMISC_LX_DECISION);
+ }
+
+ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+ e1000_get_phy_info(hw);
+ e1000_check_for_link(hw);
+ return;
}
/*********************************************************************
@@ -2585,8 +2642,8 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_transmit = igb_mq_start;
ifp->if_qflush = igb_qflush;
#endif
- IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
- ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
+ ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
IFQ_SET_READY(&ifp->if_snd);
ether_ifattach(ifp, adapter->hw.mac.addr);
@@ -2596,6 +2653,9 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
ifp->if_capabilities |= IFCAP_TSO4;
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ if (igb_header_split)
+ ifp->if_capabilities |= IFCAP_LRO;
+
ifp->if_capenable = ifp->if_capabilities;
/*
@@ -2866,14 +2926,14 @@ igb_allocate_transmit_buffers(struct tx_ring *txr)
/*
* Setup DMA descriptor areas.
*/
- if ((error = bus_dma_tag_create(NULL, /* parent */
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
IGB_TSO_SIZE, /* maxsize */
IGB_MAX_SCATTER, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
+ IGB_TSO_SEG_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
@@ -2977,7 +3037,10 @@ igb_initialize_transmit_units(struct adapter *adapter)
INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
- /* Setup the Base and Length of the Tx Descriptor Rings */
+ /* Setup Transmit Descriptor Base Settings */
+ adapter->txd_cmd = E1000_TXD_CMD_IFCS;
+
+ /* Setup the Tx Descriptor Rings */
for (int i = 0; i < adapter->num_queues; i++, txr++) {
u64 bus_addr = txr->txdma.dma_paddr;
@@ -2996,12 +3059,15 @@ igb_initialize_transmit_units(struct adapter *adapter)
E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
- /* Setup Transmit Descriptor Base Settings */
- adapter->txd_cmd = E1000_TXD_CMD_IFCS;
+ txr->watchdog_check = FALSE;
txdctl = E1000_READ_REG(&adapter->hw, E1000_TXDCTL(i));
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
E1000_WRITE_REG(&adapter->hw, E1000_TXDCTL(i), txdctl);
+
+ /* Default interrupt rate */
+ E1000_WRITE_REG(&adapter->hw, E1000_EITR(txr->msix),
+ igb_ave_latency);
}
/* Program the Transmit Control Register */
@@ -3094,7 +3160,7 @@ igb_free_transmit_buffers(struct tx_ring *txr)
/**********************************************************************
*
* Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors (82575)
+ * adapters using advanced tx descriptors
*
**********************************************************************/
static boolean_t
@@ -3165,6 +3231,9 @@ igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
/* MSS L4LEN IDX */
mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
+ /* 82575 needs the queue index added */
+ if (adapter->hw.mac.type == e1000_82575)
+ mss_l4len_idx |= txr->me << 4;
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
TXD->seqnum_seed = htole32(0);
@@ -3192,7 +3261,7 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
struct adapter *adapter = txr->adapter;
struct e1000_adv_tx_context_desc *TXD;
struct igb_tx_buffer *tx_buffer;
- uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
struct ether_vlan_header *eh;
struct ip *ip = NULL;
struct ip6_hdr *ip6;
@@ -3204,6 +3273,7 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
offload = FALSE;
+ vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
ctxd = txr->next_avail_desc;
tx_buffer = &txr->tx_buffers[ctxd];
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
@@ -3283,11 +3353,15 @@ igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
break;
}
+ /* 82575 needs the queue index added */
+ if (adapter->hw.mac.type == e1000_82575)
+ mss_l4len_idx = txr->me << 4;
+
/* Now copy bits into descriptor */
TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
TXD->seqnum_seed = htole32(0);
- TXD->mss_l4len_idx = htole32(0);
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
tx_buffer->m_head = NULL;
tx_buffer->next_eop = -1;
@@ -3314,8 +3388,7 @@ static bool
igb_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
- int first, last, done, num_avail;
- u32 cleaned = 0;
+ int first, last, done;
struct igb_tx_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
@@ -3325,7 +3398,6 @@ igb_txeof(struct tx_ring *txr)
if (txr->tx_avail == adapter->num_tx_desc)
return FALSE;
- num_avail = txr->tx_avail;
first = txr->next_to_clean;
tx_desc = &txr->tx_base[first];
tx_buffer = &txr->tx_buffers[first];
@@ -3343,7 +3415,7 @@ igb_txeof(struct tx_ring *txr)
done = last;
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
/* We clean the range of the packet */
@@ -3351,7 +3423,7 @@ igb_txeof(struct tx_ring *txr)
tx_desc->upper.data = 0;
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
- ++num_avail; ++cleaned;
+ ++txr->tx_avail;
if (tx_buffer->m_head) {
ifp->if_opackets++;
@@ -3365,6 +3437,7 @@ igb_txeof(struct tx_ring *txr)
tx_buffer->m_head = NULL;
}
tx_buffer->next_eop = -1;
+ txr->watchdog_time = ticks;
if (++first == adapter->num_tx_desc)
first = 0;
@@ -3388,141 +3461,112 @@ igb_txeof(struct tx_ring *txr)
txr->next_to_clean = first;
/*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
- * that it is OK to send packets.
- * If there are no pending descriptors, clear the timeout. Otherwise,
- * if some descriptors have been freed, restart the timeout.
+ * If we have enough room, clear IFF_DRV_OACTIVE
+ * to tell the stack that it is OK to send packets.
*/
- if (num_avail > IGB_TX_CLEANUP_THRESHOLD) {
+ if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- /* All clean, turn off the timer */
- if (num_avail == adapter->num_tx_desc) {
- txr->watchdog_timer = 0;
- txr->tx_avail = num_avail;
+ /* All clean, turn off the watchdog */
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->watchdog_check = FALSE;
return FALSE;
}
}
- /* Some cleaned, reset the timer */
- if (cleaned)
- txr->watchdog_timer = IGB_TX_TIMEOUT;
- txr->tx_avail = num_avail;
return TRUE;
}
/*********************************************************************
*
- * Setup descriptor buffer(s) from system mbuf buffer pools.
- * i - designates the ring index
- * clean - tells the function whether to update
- * the header, the packet buffer, or both.
+ * Refresh mbuf buffers for a range of descriptors
*
**********************************************************************/
static int
-igb_get_buf(struct rx_ring *rxr, int i, u8 clean)
+igb_get_buf(struct rx_ring *rxr, int first, int limit)
{
struct adapter *adapter = rxr->adapter;
- struct mbuf *mh, *mp;
bus_dma_segment_t seg[2];
+ struct igb_rx_buf *rxbuf;
+ struct mbuf *mh, *mp;
bus_dmamap_t map;
- struct igb_rx_buffer *rx_buffer;
- int error, nsegs;
- int merr = 0;
+ int i, nsegs, error;
+ i = first;
+ while (i != limit) {
+ rxbuf = &rxr->rx_buffers[i];
- rx_buffer = &rxr->rx_buffers[i];
+ if (rxbuf->m_head == NULL) {
+ mh = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (mh == NULL)
+ goto failure;
+ } else /* reuse */
+ mh = rxbuf->m_head;
+
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+
+ if (rxbuf->m_pack == NULL) {
+ mp = m_getjcl(M_DONTWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
+ if (mp == NULL)
+ goto failure;
+ mp->m_len = adapter->rx_mbuf_sz;
+ mp->m_flags &= ~M_PKTHDR;
+ } else { /* reusing */
+ mp = rxbuf->m_pack;
+ mp->m_len = adapter->rx_mbuf_sz;
+ mp->m_flags &= ~M_PKTHDR;
+ }
- /* First get our header and payload mbuf */
- if (clean & IGB_CLEAN_HEADER) {
- mh = m_gethdr(M_DONTWAIT, MT_DATA);
- if (mh == NULL)
- goto remap;
- } else /* reuse */
- mh = rxr->rx_buffers[i].m_head;
+ /*
+ ** Need to create a chain for the following
+ ** dmamap call at this point.
+ */
+ mh->m_next = mp;
+ mh->m_pkthdr.len = mh->m_len + mp->m_len;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
+ rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0)
+ panic("igb_get_buf: dmamap load failure\n");
- if (clean & IGB_CLEAN_PAYLOAD) {
- mp = m_getjcl(M_DONTWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (mp == NULL)
- goto remap;
- mp->m_len = adapter->rx_mbuf_sz;
- mp->m_flags &= ~M_PKTHDR;
- } else { /* reusing */
- mp = rxr->rx_buffers[i].m_pack;
- mp->m_len = adapter->rx_mbuf_sz;
- mp->m_flags &= ~M_PKTHDR;
- }
- /*
- ** Need to create a chain for the following
- ** dmamap call at this point.
- */
- mh->m_next = mp;
- mh->m_pkthdr.len = mh->m_len + mp->m_len;
-
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
- rxr->rx_spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load failure - %d\n", error);
- m_free(mh);
- return (error);
+ /* Unload old mapping and update buffer struct */
+ if (rxbuf->m_head != NULL)
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ map = rxbuf->map;
+ rxbuf->map = rxr->spare_map;
+ rxr->spare_map = map;
+ rxbuf->m_head = mh;
+ rxbuf->m_pack = mp;
+ bus_dmamap_sync(rxr->rxtag,
+ rxbuf->map, BUS_DMASYNC_PREREAD);
+
+ /* Update descriptor */
+ rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
+ rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
+
+ /* Calculate next index */
+ if (++i == adapter->num_rx_desc)
+ i = 0;
}
- /* Unload old mapping and update buffer struct */
- if (rx_buffer->m_head != NULL)
- bus_dmamap_unload(rxr->rxtag, rx_buffer->map);
- map = rx_buffer->map;
- rx_buffer->map = rxr->rx_spare_map;
- rxr->rx_spare_map = map;
- rx_buffer->m_head = mh;
- rx_buffer->m_pack = mp;
- bus_dmamap_sync(rxr->rxtag,
- rx_buffer->map, BUS_DMASYNC_PREREAD);
-
- /* Update descriptor */
- rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
- rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
-
return (0);
+failure:
/*
- ** If we get here, we have an mbuf resource
- ** issue, so we discard the incoming packet
- ** and attempt to reuse existing mbufs next
- ** pass thru the ring, but to do so we must
- ** fix up the descriptor which had the address
- ** clobbered with writeback info.
+ ** Its unforunate to have to panic, but
+ ** with the new design I see no other
+ ** graceful failure mode, this is ONLY
+ ** called in the RX clean path, and the
+ ** old mbuf has been used, it MUST be
+ ** refreshed. This should be avoided by
+ ** proper configuration. -jfv
*/
-remap:
- adapter->mbuf_header_failed++;
- merr = ENOBUFS;
- /* Is there a reusable buffer? */
- mh = rxr->rx_buffers[i].m_head;
- if (mh == NULL) /* Nope, init error */
- return (merr);
- mp = rxr->rx_buffers[i].m_pack;
- if (mp == NULL) /* Nope, init error */
- return (merr);
- /* Get our old mapping */
- rx_buffer = &rxr->rx_buffers[i];
- error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
- rx_buffer->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- /* We really have a problem */
- m_free(mh);
- return (error);
- }
- /* Now fix the descriptor as needed */
- rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
- rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
- return (merr);
+ panic("igb_get_buf: ENOBUFS\n");
}
-
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
@@ -3536,12 +3580,12 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
device_t dev = adapter->dev;
- struct igb_rx_buffer *rxbuf;
+ struct igb_rx_buf *rxbuf;
int i, bsize, error;
- bsize = sizeof(struct igb_rx_buffer) * adapter->num_rx_desc;
+ bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc;
if (!(rxr->rx_buffers =
- (struct igb_rx_buffer *) malloc(bsize,
+ (struct igb_rx_buf *) malloc(bsize,
M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
error = ENOMEM;
@@ -3553,7 +3597,7 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
** with packet split (hence the two segments, even though
** it may not always use this.
*/
- if ((error = bus_dma_tag_create(NULL, /* parent */
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -3571,7 +3615,7 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
/* Create the spare map (used by getbuf) */
error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
- &rxr->rx_spare_map);
+ &rxr->spare_map);
if (error) {
device_printf(dev,
"%s: bus_dmamap_create header spare failed: %d\n",
@@ -3608,15 +3652,14 @@ igb_setup_receive_ring(struct rx_ring *rxr)
struct adapter *adapter;
struct ifnet *ifp;
device_t dev;
- struct igb_rx_buffer *rxbuf;
+ struct igb_rx_buf *rxbuf;
+ bus_dma_segment_t seg[2];
struct lro_ctrl *lro = &rxr->lro;
- int j, rsize;
+ int rsize, nsegs, error = 0;
adapter = rxr->adapter;
dev = adapter->dev;
ifp = adapter->ifp;
- rxr->lro_enabled = FALSE;
- rxr->hdr_split = FALSE;
/* Clear the ring contents */
rsize = roundup2(adapter->num_rx_desc *
@@ -3639,20 +3682,48 @@ igb_setup_receive_ring(struct rx_ring *rxr)
rxbuf->m_pack = NULL;
}
- /* Next replenish the ring */
- for (j = 0; j < adapter->num_rx_desc; j++) {
- if (igb_get_buf(rxr, j, IGB_CLEAN_BOTH) == ENOBUFS) {
- rxr->rx_buffers[j].m_head = NULL;
- rxr->rx_buffers[j].m_pack = NULL;
- rxr->rx_base[j].read.hdr_addr = 0;
- rxr->rx_base[j].read.pkt_addr = 0;
- goto fail;
- }
+ /* Now replenish the mbufs */
+ for (int j = 0; j != adapter->num_rx_desc; ++j) {
+
+ rxbuf = &rxr->rx_buffers[j];
+ rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (rxbuf->m_head == NULL)
+ panic("RX ring hdr initialization failed!\n");
+ rxbuf->m_head->m_len = MHLEN;
+ rxbuf->m_head->m_flags |= M_PKTHDR;
+ rxbuf->m_head->m_pkthdr.len = rxbuf->m_head->m_len;
+
+ rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
+ if (rxbuf->m_pack == NULL)
+ panic("RX ring pkt initialization failed!\n");
+ rxbuf->m_pack->m_len = adapter->rx_mbuf_sz;
+ rxbuf->m_head->m_next = rxbuf->m_pack;
+ rxbuf->m_head->m_pkthdr.len += rxbuf->m_pack->m_len;
+
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
+ rxbuf->map, rxbuf->m_head, seg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0)
+ panic("RX ring dma initialization failed!\n");
+ bus_dmamap_sync(rxr->rxtag,
+ rxbuf->map, BUS_DMASYNC_PREREAD);
+
+ /* Update descriptor */
+ rxr->rx_base[j].read.hdr_addr = htole64(seg[0].ds_addr);
+ rxr->rx_base[j].read.pkt_addr = htole64(seg[1].ds_addr);
}
/* Setup our descriptor indices */
rxr->next_to_check = 0;
rxr->last_cleaned = 0;
+ rxr->lro_enabled = FALSE;
+
+ if (igb_header_split)
+ rxr->hdr_split = TRUE;
+ else
+ ifp->if_capabilities &= ~IFCAP_LRO;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -3663,19 +3734,17 @@ igb_setup_receive_ring(struct rx_ring *rxr)
** is enabled, since so often they
** are undesireable in similar setups.
*/
- if (ifp->if_capenable & IFCAP_LRO) {
+ if ((ifp->if_capenable & IFCAP_LRO) && (rxr->hdr_split)) {
int err = tcp_lro_init(lro);
- if (err) {
- device_printf(dev,"LRO Initialization failed!\n");
- goto fail;
- }
+ if (err)
+ panic("LRO Initialization failed!\n");
INIT_DEBUGOUT("RX LRO Initialized\n");
rxr->lro_enabled = TRUE;
- rxr->hdr_split = TRUE;
lro->ifp = adapter->ifp;
}
return (0);
+#if 0
fail:
/*
* We need to clean up any buffers allocated
@@ -3692,6 +3761,7 @@ fail:
}
}
return (ENOBUFS);
+#endif
}
/*********************************************************************
@@ -3720,7 +3790,7 @@ fail:
rxr = adapter->rx_rings;
for (--i; i > 0; i--, rxr++) {
for (j = 0; j < adapter->num_rx_desc; j++) {
- struct igb_rx_buffer *rxbuf;
+ struct igb_rx_buf *rxbuf;
rxbuf = &rxr->rx_buffers[j];
if (rxbuf->m_head != NULL) {
bus_dmamap_sync(rxr->rxtag, rxbuf->map,
@@ -3806,6 +3876,11 @@ igb_initialize_receive_units(struct adapter *adapter)
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(i), rxdctl);
+
+ /* Initial RX interrupt moderation */
+ rxr->eitr_setting = igb_ave_latency;
+ E1000_WRITE_REG(&adapter->hw,
+ E1000_EITR(rxr->msix), igb_ave_latency);
}
/*
@@ -3927,14 +4002,14 @@ igb_free_receive_structures(struct adapter *adapter)
static void
igb_free_receive_buffers(struct rx_ring *rxr)
{
- struct adapter *adapter = rxr->adapter;
- struct igb_rx_buffer *rx_buffer;
+ struct adapter *adapter = rxr->adapter;
+ struct igb_rx_buf *rx_buffer;
INIT_DEBUGOUT("free_receive_structures: begin");
- if (rxr->rx_spare_map) {
- bus_dmamap_destroy(rxr->rxtag, rxr->rx_spare_map);
- rxr->rx_spare_map = NULL;
+ if (rxr->spare_map) {
+ bus_dmamap_destroy(rxr->rxtag, rxr->spare_map);
+ rxr->spare_map = NULL;
}
/* Cleanup any existing buffers */
@@ -3980,20 +4055,20 @@ igb_free_receive_buffers(struct rx_ring *rxr)
*
* Return TRUE if more to clean, FALSE otherwise
*********************************************************************/
+
static bool
igb_rxeof(struct rx_ring *rxr, int count)
{
- struct adapter *adapter = rxr->adapter;
- struct ifnet *ifp;
+ struct adapter *adapter = rxr->adapter;
+ struct ifnet *ifp = adapter->ifp;
struct lro_ctrl *lro = &rxr->lro;
struct lro_entry *queued;
- int i;
+ int i, processed = 0;
u32 staterr;
union e1000_adv_rx_desc *cur;
IGB_RX_LOCK(rxr);
- ifp = adapter->ifp;
i = rxr->next_to_check;
cur = &rxr->rx_base[i];
staterr = cur->wb.upper.status_error;
@@ -4007,35 +4082,56 @@ igb_rxeof(struct rx_ring *rxr, int count)
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD);
- /* Main clean loop */
- while ((staterr & E1000_RXD_STAT_DD) &&
- (count != 0) &&
+ while ((staterr & E1000_RXD_STAT_DD) && (count != 0) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- struct mbuf *sendmp, *mh, *mp;
- u16 hlen, plen, hdr, ptype, len_adj, vtag;
- u8 dopayload, accept_frame, eop;
-
- accept_frame = 1;
- hlen = plen = len_adj = vtag = 0;
- sendmp = mh = mp = NULL;
- ptype = (u16)(cur->wb.lower.lo_dword.data >> 4);
+ struct mbuf *sendmp, *mh, *mp, *nh, *np;
+ struct igb_rx_buf *nxtbuf;
+ u32 ptype;
+ u16 hlen, plen, hdr, nextp, vtag;
+ bool accept_frame, eop, sctp = FALSE;
+
+
+ accept_frame = TRUE;
+ hlen = plen = nextp = 0;
+ sendmp = mh = mp = nh = np = NULL;
+
+ ptype = (le32toh(cur->wb.lower.lo_dword.data) &
+ IGB_PKTTYPE_MASK);
+ if (((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0) &&
+ ((ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0))
+ sctp = TRUE;
/* Sync the buffers */
bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
BUS_DMASYNC_POSTREAD);
+ mh = rxr->rx_buffers[i].m_head;
+ mp = rxr->rx_buffers[i].m_pack;
+ vtag = le16toh(cur->wb.upper.vlan);
+ eop = ((staterr & E1000_RXD_STAT_EOP) != 0);
+
+ /* Get the next descriptor we will process */
+ if (!eop) {
+ nextp = i + 1;
+ if (nextp == adapter->num_rx_desc)
+ nextp = 0;
+ nxtbuf = &rxr->rx_buffers[nextp];
+ prefetch(nxtbuf);
+ }
/*
** The way the hardware is configured to
** split, it will ONLY use the header buffer
** when header split is enabled, otherwise we
- ** get normal behavior, ie, both header and
- ** payload are DMA'd into the payload buffer.
+ ** get legacy behavior, ie, both header and
+ ** payload are DMA'd into JUST the payload buffer.
+ **
+ ** Rather than using the fmp/lmp global pointers
+ ** we now keep the head of a packet chain in the
+ ** m_nextpkt pointer and pass this along from one
+ ** descriptor to the next, until we get EOP.
**
- ** The fmp test is to catch the case where a
- ** packet spans multiple descriptors, in that
- ** case only the first header is valid.
*/
- if ((rxr->hdr_split) && (rxr->fmp == NULL)){
+ if ((rxr->hdr_split) && (mh->m_nextpkt == NULL)) {
hdr = le16toh(cur->
wb.lower.lo_dword.hs_rss.hdr_info);
hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
@@ -4043,183 +4139,136 @@ igb_rxeof(struct rx_ring *rxr, int count)
if (hlen > IGB_HDR_BUF)
hlen = IGB_HDR_BUF;
plen = le16toh(cur->wb.upper.length);
- /* Handle the header mbuf */
- mh = rxr->rx_buffers[i].m_head;
mh->m_len = hlen;
- dopayload = IGB_CLEAN_HEADER;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_next = NULL;
+ mh->m_pkthdr.len = mh->m_len;
+ /* Null this so getbuf replenishes */
+ rxr->rx_buffers[i].m_head = NULL;
/*
** Get the payload length, this
** could be zero if its a small
** packet.
*/
if (plen) {
- mp = rxr->rx_buffers[i].m_pack;
mp->m_len = plen;
mp->m_next = NULL;
mp->m_flags &= ~M_PKTHDR;
mh->m_next = mp;
- mh->m_flags |= M_PKTHDR;
- dopayload = IGB_CLEAN_BOTH;
+ mh->m_pkthdr.len += mp->m_len;
+ /* Null this so getbuf replenishes */
+ rxr->rx_buffers[i].m_pack = NULL;
rxr->rx_split_packets++;
- } else { /* small packets */
- mh->m_flags &= ~M_PKTHDR;
- mh->m_next = NULL;
+ }
+ /* Setup the forward chain */
+ if (eop == 0) {
+ nh = rxr->rx_buffers[nextp].m_head;
+ np = rxr->rx_buffers[nextp].m_pack;
+ nh->m_nextpkt = mh;
+ if (plen)
+ mp->m_next = np;
+ else
+ mh->m_next = np;
+ } else {
+ sendmp = mh;
+ if (staterr & E1000_RXD_STAT_VP) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
}
} else {
/*
** Either no header split, or a
** secondary piece of a fragmented
- ** split packet.
- */
- mh = rxr->rx_buffers[i].m_pack;
- mh->m_flags |= M_PKTHDR;
- mh->m_len = le16toh(cur->wb.upper.length);
- dopayload = IGB_CLEAN_PAYLOAD;
- }
-
- if (staterr & E1000_RXD_STAT_EOP) {
- count--;
- eop = 1;
- /*
- ** Strip CRC and account for frag
+ ** packet.
*/
- if (mp) {
- if (mp->m_len < ETHER_CRC_LEN) {
- /* a frag, how much is left? */
- len_adj = ETHER_CRC_LEN - mp->m_len;
- mp->m_len = 0;
- } else
- mp->m_len -= ETHER_CRC_LEN;
- } else { /* not split */
- if (mh->m_len < ETHER_CRC_LEN) {
- len_adj = ETHER_CRC_LEN - mh->m_len;
- mh->m_len = 0;
- } else
- mh->m_len -= ETHER_CRC_LEN;
+ mp->m_len = le16toh(cur->wb.upper.length);
+ rxr->rx_buffers[i].m_pack = NULL;
+ /* stored head pointer */
+ sendmp = mh->m_nextpkt;
+ if (sendmp != NULL) {
+ sendmp->m_pkthdr.len += mp->m_len;
+ sendmp->m_nextpkt = NULL;
+ } else {
+ /* first desc of a non-ps chain */
+ sendmp = mp;
+ sendmp->m_flags |= M_PKTHDR;
+ sendmp->m_pkthdr.len = mp->m_len;
+ if (staterr & E1000_RXD_STAT_VP) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
+ }
+ /* Carry head forward */
+ if (eop == 0) {
+ nh = rxr->rx_buffers[nextp].m_head;
+ np = rxr->rx_buffers[nextp].m_pack;
+ nh->m_nextpkt = sendmp;
+ mp->m_next = np;
+ sendmp = NULL;
}
- } else
- eop = 0;
+ mh->m_nextpkt = NULL;
+ }
if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)
- accept_frame = 0;
-#ifdef IGB_IEEE1588
- This linux code needs to be converted to work here
- -----------------------------------------------------
- if (unlikely(staterr & E1000_RXD_STAT_TS)) {
- u64 regval;
- u64 ns;
-// Create an mtag and set it up
- struct skb_shared_hwtstamps *shhwtstamps =
- skb_hwtstamps(skb);
-
- rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
- "igb: no RX time stamp available for time stamped packet");
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-// Do time conversion from the register
- ns = timecounter_cyc2time(&adapter->clock, regval);
- clocksync_update(&adapter->sync, ns);
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
- shhwtstamps->syststamp =
- clocksync_hw2sys(&adapter->sync, ns);
- }
-#endif
- if (accept_frame) {
- /*
- ** get_buf will overwrite the writeback
- ** descriptor so save the VLAN tag now.
- */
- vtag = le16toh(cur->wb.upper.vlan);
- if (igb_get_buf(rxr, i, dopayload) != 0) {
- ifp->if_iqdrops++;
- goto discard;
- }
- /* Initial frame - setup */
- if (rxr->fmp == NULL) {
- mh->m_flags |= M_PKTHDR;
- mh->m_pkthdr.len = mh->m_len;
- rxr->fmp = mh; /* Store the first mbuf */
- rxr->lmp = mh;
- if (mp) { /* Add payload if split */
- mh->m_pkthdr.len += mp->m_len;
- rxr->lmp = mh->m_next;
- }
- } else {
- /* Chain mbuf's together */
- mh->m_flags &= ~M_PKTHDR;
- rxr->lmp->m_next = mh;
- rxr->lmp = rxr->lmp->m_next;
- rxr->fmp->m_pkthdr.len += mh->m_len;
- /* Adjust for CRC frag */
- if (len_adj) {
- rxr->lmp->m_len -= len_adj;
- rxr->fmp->m_pkthdr.len -= len_adj;
- }
- }
+ accept_frame = FALSE;
+ if (accept_frame) {
+ ++processed;
if (eop) {
- bool sctp = ((ptype & 0x40) != 0);
- rxr->fmp->m_pkthdr.rcvif = ifp;
+ --count;
+ sendmp->m_pkthdr.rcvif = ifp;
ifp->if_ipackets++;
rxr->rx_packets++;
/* capture data for AIM */
- rxr->bytes += rxr->fmp->m_pkthdr.len;
- rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
-
- igb_rx_checksum(staterr, rxr->fmp, sctp);
- if (staterr & E1000_RXD_STAT_VP) {
- rxr->fmp->m_pkthdr.ether_vtag = vtag;
- rxr->fmp->m_flags |= M_VLANTAG;
- }
+ rxr->bytes += sendmp->m_pkthdr.len;
+ rxr->rx_bytes += rxr->bytes;
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ igb_rx_checksum(staterr, sendmp, sctp);
+ else
+ sendmp->m_pkthdr.csum_flags = 0;
#if __FreeBSD_version >= 800000
- rxr->fmp->m_pkthdr.flowid = curcpu;
- rxr->fmp->m_flags |= M_FLOWID;
+ /* Get the RSS Hash */
+ sendmp->m_pkthdr.flowid =
+ le32toh(cur->wb.lower.hi_dword.rss);
+ curcpu;
+ sendmp->m_flags |= M_FLOWID;
#endif
- sendmp = rxr->fmp;
- rxr->fmp = NULL;
- rxr->lmp = NULL;
}
} else {
ifp->if_ierrors++;
-discard:
/* Reuse loaded DMA map and just update mbuf chain */
- if (hlen) {
- mh = rxr->rx_buffers[i].m_head;
- mh->m_len = MHLEN;
- mh->m_next = NULL;
- }
- mp = rxr->rx_buffers[i].m_pack;
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_next = NULL;
mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
mp->m_data = mp->m_ext.ext_buf;
+ if (mp->m_next) { /* Free chain */
+ sendmp = mp->m_next;
+ m_free(sendmp);
+ }
mp->m_next = NULL;
if (adapter->max_frame_size <=
(MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN);
- if (rxr->fmp != NULL) {
- /* handles the whole chain */
- m_freem(rxr->fmp);
- rxr->fmp = NULL;
- rxr->lmp = NULL;
- }
sendmp = NULL;
}
-
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- rxr->last_cleaned = i; /* For updating tail */
-
- /* Advance our pointers to the next descriptor. */
+ rxr->last_cleaned = i; /* for updating tail */
if (++i == adapter->num_rx_desc)
i = 0;
-
+ /* Prefetch next descriptor */
+ cur = &rxr->rx_base[i];
+ prefetch(cur);
+
/*
- ** Note that we hold the RX lock thru
- ** the following call so this ring's
- ** next_to_check is not gonna change.
+ ** Now send up to the stack,
+ ** note that the RX lock is
+ ** held thru this call.
*/
- if (sendmp != NULL) {
+ if (sendmp != NULL) {
/*
** Send to the stack if:
** - LRO not enabled, or
@@ -4228,17 +4277,31 @@ discard:
*/
if ((!rxr->lro_enabled) ||
((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
- (*ifp->if_input)(ifp, sendmp);
+ (*ifp->if_input)(ifp, sendmp);
}
- /* Get the next descriptor */
- cur = &rxr->rx_base[i];
+ /* Replenish every 4 max */
+ if (processed == 4) {
+ igb_get_buf(rxr, rxr->next_to_check, i);
+ processed = 0;
+ E1000_WRITE_REG(&adapter->hw,
+ E1000_RDT(rxr->me), rxr->last_cleaned);
+ rxr->next_to_check = i;
+ }
+
+ /* Next iteration */
staterr = cur->wb.upper.status_error;
}
- rxr->next_to_check = i;
- /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
- E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->last_cleaned);
+ /* Replenish remaining */
+ if (processed != 0) {
+ igb_get_buf(rxr, rxr->next_to_check, i);
+ processed = 0;
+ E1000_WRITE_REG(&adapter->hw,
+ E1000_RDT(rxr->me), rxr->last_cleaned);
+ }
+
+ rxr->next_to_check = i;
/*
* Flush any outstanding LRO work
@@ -4252,8 +4315,8 @@ discard:
IGB_RX_UNLOCK(rxr);
/*
- ** We still have cleaning to do?
- ** Schedule another interrupt if so.
+ ** Leaving with more to clean?
+ ** then schedule another interrupt.
*/
if (staterr & E1000_RXD_STAT_DD) {
E1000_WRITE_REG(&adapter->hw, E1000_EICS, rxr->eims);
@@ -4263,7 +4326,6 @@ discard:
return FALSE;
}
-
/*********************************************************************
*
* Verify that the hardware indicated that the checksum is valid.
@@ -4302,7 +4364,7 @@ igb_rx_checksum(u32 staterr, struct mbuf *mp, bool sctp)
/* Did it pass? */
if (!(errors & E1000_RXD_ERR_TCPE)) {
mp->m_pkthdr.csum_flags |= type;
- if (!sctp)
+ if (sctp == FALSE)
mp->m_pkthdr.csum_data = htons(0xffff);
}
}
@@ -4319,7 +4381,7 @@ igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
struct adapter *adapter = ifp->if_softc;
u32 index, bit;
- if (ifp->if_softc != arg) /* Not our event */
+ if (ifp->if_softc != arg) /* Not our event */
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@@ -4343,7 +4405,7 @@ igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
struct adapter *adapter = ifp->if_softc;
u32 index, bit;
- if (ifp->if_softc != arg)
+ if (ifp->if_softc != arg)
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index ddc4d8a..ce5b726 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -48,7 +48,7 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_TXD 80
-#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TXD 1024
#define IGB_MAX_TXD 4096
/*
@@ -63,7 +63,7 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_RXD 80
-#define IGB_DEFAULT_RXD 256
+#define IGB_DEFAULT_RXD 1024
#define IGB_MAX_RXD 4096
/*
@@ -128,7 +128,7 @@
/*
* This parameter controls the duration of transmit watchdog timer.
*/
-#define IGB_TX_TIMEOUT 5 /* set to 5 seconds */
+#define IGB_WATCHDOG (10 * hz)
/*
* This parameter controls when the driver calls the routine to reclaim
@@ -225,6 +225,7 @@
#define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */
#define IGB_HDR_BUF 128
+#define IGB_PKTTYPE_MASK 0x0000FFF0
#define ETH_ZLEN 60
#define ETH_ADDR_LEN 6
@@ -235,11 +236,6 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
-/* Header split codes for get_buf */
-#define IGB_CLEAN_HEADER 1
-#define IGB_CLEAN_PAYLOAD 2
-#define IGB_CLEAN_BOTH 3
-
/*
* Interrupt Moderation parameters
*/
@@ -280,6 +276,7 @@ struct tx_ring {
struct igb_dma_alloc txdma; /* bus_dma glue for tx desc */
struct e1000_tx_desc *tx_base;
struct task tx_task; /* cleanup tasklet */
+ struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
volatile u16 tx_avail;
@@ -291,7 +288,8 @@ struct tx_ring {
struct resource *res;
void *tag;
- u32 watchdog_timer;
+ bool watchdog_check;
+ int watchdog_time;
u64 no_desc_avail;
u64 tx_irq;
u64 tx_packets;
@@ -311,13 +309,14 @@ struct rx_ring {
bool lro_enabled;
bool hdr_split;
struct task rx_task; /* cleanup tasklet */
+ struct taskqueue *tq;
struct mtx rx_mtx;
char mtx_name[16];
u32 last_cleaned;
u32 next_to_check;
- struct igb_rx_buffer *rx_buffers;
+ struct igb_rx_buf *rx_buffers;
bus_dma_tag_t rxtag; /* dma tag for tx */
- bus_dmamap_t rx_spare_map;
+ bus_dmamap_t spare_map;
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
@@ -364,7 +363,6 @@ struct adapter {
int min_frame_size;
struct mtx core_mtx;
int igb_insert_vlan_header;
- struct task link_task;
struct task rxtx_task;
struct taskqueue *tq; /* private task queue */
u16 num_queues;
@@ -445,7 +443,7 @@ struct igb_tx_buffer {
bus_dmamap_t map; /* bus_dma map for packet */
};
-struct igb_rx_buffer {
+struct igb_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
bus_dmamap_t map; /* bus_dma map for packet */
@@ -454,12 +452,12 @@ struct igb_rx_buffer {
#define IGB_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
#define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
-#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
-#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
+#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
+#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
-#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
-#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
-#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
+#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
+#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
+#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
OpenPOWER on IntegriCloud