summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/82571.c273
-rw-r--r--drivers/net/e1000e/defines.h1
-rw-r--r--drivers/net/e1000e/e1000.h4
-rw-r--r--drivers/net/e1000e/ethtool.c3
-rw-r--r--drivers/net/e1000e/hw.h11
-rw-r--r--drivers/net/e1000e/ich8lan.c17
-rw-r--r--drivers/net/e1000e/lib.c66
-rw-r--r--drivers/net/e1000e/netdev.c261
8 files changed, 407 insertions, 229 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 0890162..6c01a207 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -40,6 +40,7 @@
* 82573E Gigabit Ethernet Controller (Copper)
* 82573L Gigabit Ethernet Controller
* 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
*/
#include <linux/netdevice.h>
@@ -61,6 +62,7 @@
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
@@ -99,6 +101,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->type = e1000_phy_m88;
break;
case e1000_82574:
+ case e1000_82583:
phy->type = e1000_phy_bm;
break;
default:
@@ -121,6 +124,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
return -E1000_ERR_PHY;
break;
case e1000_82574:
+ case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2)
return -E1000_ERR_PHY;
break;
@@ -164,6 +168,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
@@ -250,7 +255,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
case e1000_media_type_internal_serdes:
func->setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
- func->check_for_link = e1000e_check_for_serdes_link;
+ func->check_for_link = e1000_check_for_serdes_link_82571;
func->get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
@@ -261,6 +266,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_82574:
+ case e1000_82583:
func->check_mng_mode = e1000_check_mng_mode_82574;
func->led_on = e1000_led_on_82574;
break;
@@ -374,6 +380,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
return e1000e_get_phy_id(hw);
break;
case e1000_82574:
+ case e1000_82583:
ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
if (ret_val)
return ret_val;
@@ -463,8 +470,15 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- if (hw->mac.type != e1000_82573 && hw->mac.type != e1000_82574)
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ break;
+ default:
ret_val = e1000e_acquire_nvm(hw);
+ break;
+ }
if (ret_val)
e1000_put_hw_semaphore_82571(hw);
@@ -504,6 +518,7 @@ static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
break;
case e1000_82571:
@@ -778,7 +793,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
- if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@@ -794,6 +812,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
msleep(2);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
+ break;
+ default:
+ break;
}
ctrl = er32(CTRL);
@@ -819,8 +840,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
- if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574)
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
msleep(25);
+ break;
+ default:
+ break;
+ }
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
@@ -830,6 +859,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
hw->dev_spec.e82571.alt_mac_addr_is_present)
e1000e_set_laa_state_82571(hw, true);
+ /* Reinitialize the 82571 serdes link state machine */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ hw->mac.serdes_link_state = e1000_serdes_link_down;
+
return 0;
}
@@ -886,17 +919,22 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
- if (mac->type != e1000_82573 && mac->type != e1000_82574) {
+ switch (mac->type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ e1000e_enable_tx_pkt_filtering(hw);
+ reg_data = er32(GCR);
+ reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ ew32(GCR, reg_data);
+ break;
+ default:
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(1), reg_data);
- } else {
- e1000e_enable_tx_pkt_filtering(hw);
- reg_data = er32(GCR);
- reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
- ew32(GCR, reg_data);
+ break;
}
/*
@@ -961,18 +999,30 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
}
/* Device Control */
- if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
reg = er32(CTRL);
reg &= ~(1 << 29);
ew32(CTRL, reg);
+ break;
+ default:
+ break;
}
/* Extended Device Control */
- if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
reg = er32(CTRL_EXT);
reg &= ~(1 << 23);
reg |= (1 << 22);
ew32(CTRL_EXT, reg);
+ break;
+ default:
+ break;
}
if (hw->mac.type == e1000_82571) {
@@ -980,9 +1030,23 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
reg |= E1000_PBA_ECC_CORR_EN;
ew32(PBA_ECC, reg);
}
+ /*
+ * Workaround for hardware errata.
+ * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+ */
+
+ if ((hw->mac.type == e1000_82571) ||
+ (hw->mac.type == e1000_82572)) {
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, reg);
+ }
+
/* PCI-Ex Control Registers */
- if (hw->mac.type == e1000_82574) {
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
reg = er32(GCR);
reg |= (1 << 22);
ew32(GCR, reg);
@@ -990,6 +1054,9 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
reg = er32(GCR2);
reg |= 1;
ew32(GCR2, reg);
+ break;
+ default:
+ break;
}
return;
@@ -1009,7 +1076,10 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
u32 vfta_offset = 0;
u32 vfta_bit_in_reg = 0;
- if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
if (hw->mng_cookie.vlan_id != 0) {
/*
* The VFTA is a 4096b bit-field, each identifying
@@ -1024,6 +1094,9 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
+ break;
+ default:
+ break;
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/*
@@ -1122,9 +1195,16 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
* the default flow control setting, so we explicitly
* set it to full.
*/
- if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
- hw->fc.requested_mode == e1000_fc_default)
- hw->fc.requested_mode = e1000_fc_full;
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+ break;
+ default:
+ break;
+ }
return e1000e_setup_link(hw);
}
@@ -1203,6 +1283,131 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
}
/**
+ * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val = 0;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+ /* Receiver is synchronized with no invalid bits. */
+ switch (mac->serdes_link_state) {
+ case e1000_serdes_link_autoneg_complete:
+ if (!(status & E1000_STATUS_LU)) {
+ /*
+ * We have lost link, retry autoneg before
+ * reporting link failure
+ */
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ hw_dbg(hw, "AN_UP -> AN_PROG\n");
+ }
+ break;
+
+ case e1000_serdes_link_forced_up:
+ /*
+ * If we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+ */
+ if (rxcw & E1000_RXCW_C) {
+ /* Enable autoneg, and unforce link up */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL,
+ (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ hw_dbg(hw, "FORCED_UP -> AN_PROG\n");
+ }
+ break;
+
+ case e1000_serdes_link_autoneg_progress:
+ /*
+ * If the LU bit is set in the STATUS register,
+ * autoneg has completed sucessfully. If not,
+ * try foring the link because the far end may be
+ * available but not capable of autonegotiation.
+ */
+ if (status & E1000_STATUS_LU) {
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_complete;
+ hw_dbg(hw, "AN_PROG -> AN_UP\n");
+ } else {
+ /*
+ * Disable autoneg, force link up and
+ * full duplex, and change state to forced
+ */
+ ew32(TXCW,
+ (mac->txcw & ~E1000_TXCW_ANE));
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after link up. */
+ ret_val =
+ e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ hw_dbg(hw, "Error config flow control\n");
+ break;
+ }
+ mac->serdes_link_state =
+ e1000_serdes_link_forced_up;
+ hw_dbg(hw, "AN_PROG -> FORCED_UP\n");
+ }
+ mac->serdes_has_link = true;
+ break;
+
+ case e1000_serdes_link_down:
+ default:
+ /* The link was down but the receiver has now gained
+ * valid sync, so lets see if we can bring the link
+ * up. */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL,
+ (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ hw_dbg(hw, "DOWN -> AN_PROG\n");
+ break;
+ }
+ } else {
+ if (!(rxcw & E1000_RXCW_SYNCH)) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state = e1000_serdes_link_down;
+ hw_dbg(hw, "ANYSTATE -> DOWN\n");
+ } else {
+ /*
+ * We have sync, and can tolerate one
+ * invalid (IV) codeword before declaring
+ * link down, so reread to look again
+ */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_IV) {
+ mac->serdes_link_state = e1000_serdes_link_down;
+ mac->serdes_has_link = false;
+ hw_dbg(hw, "ANYSTATE -> DOWN\n");
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/**
* e1000_valid_led_default_82571 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
@@ -1220,11 +1425,19 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
return ret_val;
}
- if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
- *data == ID_LED_RESERVED_F746)
- *data = ID_LED_DEFAULT_82573;
- else if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
- *data = ID_LED_DEFAULT;
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (*data == ID_LED_RESERVED_F746)
+ *data = ID_LED_DEFAULT_82573;
+ break;
+ default:
+ if (*data == ID_LED_RESERVED_0000 ||
+ *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+ break;
+ }
return 0;
}
@@ -1517,3 +1730,19 @@ struct e1000_info e1000_82574_info = {
.nvm_ops = &e82571_nvm_ops,
};
+struct e1000_info e1000_82583_info = {
+ .mac = e1000_82583,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_RX_CSUM_ENABLED
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+ .pba = 20,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_bm,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e6caf29..243aa49 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -69,6 +69,7 @@
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 37bcb19..f37360a 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -101,6 +101,7 @@ enum e1000_boards {
board_82572,
board_82573,
board_82574,
+ board_82583,
board_80003es2lan,
board_ich8lan,
board_ich9lan,
@@ -195,8 +196,6 @@ struct e1000_adapter {
u16 link_duplex;
u16 eeprom_vers;
- spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
-
/* track device up/down/testing state */
unsigned long state;
@@ -401,6 +400,7 @@ extern struct e1000_info e1000_82571_info;
extern struct e1000_info e1000_82572_info;
extern struct e1000_info e1000_82573_info;
extern struct e1000_info e1000_82574_info;
+extern struct e1000_info e1000_82583_info;
extern struct e1000_info e1000_ich8_info;
extern struct e1000_info e1000_ich9_info;
extern struct e1000_info e1000_ich10_info;
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e48956d..4d25ede 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -790,6 +790,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
break;
case e1000_82573:
case e1000_82574:
+ case e1000_82583:
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
@@ -1589,7 +1590,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
- hw->mac.serdes_has_link = 0;
+ hw->mac.serdes_has_link = false;
/*
* On some blade server designs, link establishment
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2d4ce04..d8b8229 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -339,6 +339,8 @@ enum e1e_registers {
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
@@ -376,6 +378,7 @@ enum e1000_mac_type {
e1000_82572,
e1000_82573,
e1000_82574,
+ e1000_82583,
e1000_80003es2lan,
e1000_ich8lan,
e1000_ich9lan,
@@ -459,6 +462,13 @@ enum e1000_smart_speed {
e1000_smart_speed_off
};
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
/* Receive Descriptor */
struct e1000_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -787,6 +797,7 @@ struct e1000_mac_info {
bool in_ifs_mode;
bool serdes_has_link;
bool tx_pkt_filtering;
+ enum e1000_serdes_link_state serdes_link_state;
};
struct e1000_phy_info {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index e415e81..6d1aab6 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -390,8 +390,6 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
}
static DEFINE_MUTEX(nvm_mutex);
-static pid_t nvm_owner_pid = -1;
-static char nvm_owner_name[TASK_COMM_LEN] = "";
/**
* e1000_acquire_swflag_ich8lan - Acquire software control flag
@@ -408,16 +406,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
might_sleep();
- if (!mutex_trylock(&nvm_mutex)) {
- WARN(1, KERN_ERR "e1000e mutex contention. Owned by process "
- "%s (pid %d), required by process %s (pid %d)\n",
- nvm_owner_name, nvm_owner_pid,
- current->comm, current->pid);
-
- mutex_lock(&nvm_mutex);
- }
- nvm_owner_pid = current->pid;
- strncpy(nvm_owner_name, current->comm, TASK_COMM_LEN);
+ mutex_lock(&nvm_mutex);
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -435,8 +424,6 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
- nvm_owner_pid = -1;
- strcpy(nvm_owner_name, "");
mutex_unlock(&nvm_mutex);
return -E1000_ERR_CONFIG;
}
@@ -460,8 +447,6 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
- nvm_owner_pid = -1;
- strcpy(nvm_owner_name, "");
mutex_unlock(&nvm_mutex);
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 6674110..18a4f59 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -159,41 +159,6 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
}
/**
- * e1000_mta_set - Set multicast filter table address
- * @hw: pointer to the HW structure
- * @hash_value: determines the MTA register and bit to set
- *
- * The multicast table address is a register array of 32-bit registers.
- * The hash_value is used to determine what register the bit is in, the
- * current value is read, the new bit is OR'd in and the new value is
- * written back into the register.
- **/
-static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
-{
- u32 hash_bit, hash_reg, mta;
-
- /*
- * The MTA is a register array of 32-bit registers. It is
- * treated like an array of (32*mta_reg_count) bits. We want to
- * set bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
- * mask to bits 31:5 of the hash value which gives us the
- * register we're modifying. The hash bit within that register
- * is determined by the lower 5 bits of the hash value.
- */
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
-
- mta |= (1 << hash_bit);
-
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
- e1e_flush();
-}
-
-/**
* e1000_hash_mc_addr - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
@@ -281,8 +246,13 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
- u32 hash_value;
u32 i;
+ u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
+
+ if (!mcarray) {
+ printk(KERN_ERR "multicast array memory allocation failed\n");
+ return;
+ }
/*
* Load the first set of multicast addresses into the exact
@@ -302,20 +272,24 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
}
- /* Clear the old settings from the MTA */
- hw_dbg(hw, "Clearing MTA\n");
- for (i = 0; i < hw->mac.mta_reg_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
- e1e_flush();
- }
-
/* Load any remaining multicast addresses into the hash table. */
for (; mc_addr_count > 0; mc_addr_count--) {
+ u32 hash_value, hash_reg, hash_bit, mta;
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
- e1000_mta_set(hw, hash_value);
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+ mta = (1 << hash_bit);
+ mcarray[hash_reg] |= mta;
mc_addr_list += ETH_ALEN;
}
+
+ /* write the hash table completely */
+ for (i = 0; i < hw->mac.mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
+
+ e1e_flush();
+ kfree(mcarray);
}
/**
@@ -501,7 +475,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
- mac->serdes_has_link = 1;
+ mac->serdes_has_link = true;
}
return 0;
@@ -566,7 +540,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
- mac->serdes_has_link = 1;
+ mac->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
/*
* If we force link for non-auto-negotiation switch, check
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 91817d0..bfb2d6c 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -44,10 +44,11 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "0.3.3.3-k6"
+#define DRV_VERSION "0.3.3.4-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -56,6 +57,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_82572] = &e1000_82572_info,
[board_82573] = &e1000_82573_info,
[board_82574] = &e1000_82574_info,
+ [board_82583] = &e1000_82583_info,
[board_80003es2lan] = &e1000_es2_info,
[board_ich8lan] = &e1000_ich8_info,
[board_ich9lan] = &e1000_ich9_info,
@@ -99,8 +101,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(vlan));
+ vlan_gro_receive(&adapter->napi, adapter->vlgrp,
+ le16_to_cpu(vlan), skb);
else
napi_gro_receive(&adapter->napi, skb);
}
@@ -565,15 +567,14 @@ next_desc:
static void e1000_put_txbuf(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
- if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
+ buffer_info->dma = 0;
if (buffer_info->skb) {
+ skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
+ buffer_info->time_stamp = 0;
}
static void e1000_print_tx_hang(struct e1000_adapter *adapter)
@@ -620,15 +621,16 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
- bool cleaned = 0;
+ bool cleaned;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
- while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
- for (cleaned = 0; !cleaned; ) {
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ for (cleaned = 0; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
@@ -654,10 +656,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
-#define E1000_TX_WEIGHT 64
- /* weight of a sort for tx, to avoid endless transmit cleanup */
- if (count++ == E1000_TX_WEIGHT)
- break;
}
tx_ring->next_to_clean = i;
@@ -678,13 +676,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
if (adapter->detect_tx_hung) {
- /*
- * Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i
- */
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = 0;
- if (tx_ring->buffer_info[eop].dma &&
- time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ))
&& !(er32(STATUS) & E1000_STATUS_TXOFF)) {
e1000_print_tx_hang(adapter);
@@ -695,7 +691,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
adapter->total_tx_packets += total_tx_packets;
adapter->net_stats.tx_bytes += total_tx_bytes;
adapter->net_stats.tx_packets += total_tx_packets;
- return cleaned;
+ return (count < tx_ring->count);
}
/**
@@ -1152,7 +1148,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
* read ICR disables interrupts using IAM
*/
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = 1;
/*
* ICH8 workaround-- Call gig speed drop workaround on cable
@@ -1179,12 +1175,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1218,7 +1214,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
* IMC write
*/
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = 1;
/*
* ICH8 workaround-- Call gig speed drop workaround on cable
@@ -1246,12 +1242,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1265,7 +1261,8 @@ static irqreturn_t e1000_msix_other(int irq, void *data)
u32 icr = er32(ICR);
if (!(icr & E1000_ICR_INT_ASSERTED)) {
- ew32(IMS, E1000_IMS_OTHER);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_OTHER);
return IRQ_NONE;
}
@@ -1282,7 +1279,8 @@ static irqreturn_t e1000_msix_other(int irq, void *data)
}
no_link_interrupt:
- ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
return IRQ_HANDLED;
}
@@ -1320,10 +1318,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
adapter->rx_ring->set_itr = 0;
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
@@ -1698,7 +1696,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- spin_lock_init(&adapter->tx_queue_lock);
return 0;
err:
@@ -2007,32 +2004,25 @@ static int e1000_clean(struct napi_struct *napi, int budget)
!(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
goto clean_rx;
- /*
- * e1000_clean is called per-cpu. This lock protects
- * tx_ring from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring is currently being cleaned anyway.
- */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter);
- spin_unlock(&adapter->tx_queue_lock);
- }
+ tx_cleaned = e1000_clean_tx_irq(adapter);
clean_rx:
adapter->clean_rx(adapter, &work_done, budget);
- if (tx_cleaned)
+ if (!tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- netif_rx_complete(napi);
- if (adapter->msix_entries)
- ew32(IMS, adapter->rx_ring->ims_val);
- else
- e1000_irq_enable(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ ew32(IMS, adapter->rx_ring->ims_val);
+ else
+ e1000_irq_enable(adapter);
+ }
}
return work_done;
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_alloc_queues(adapter))
return -ENOMEM;
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -3320,7 +3308,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.algnerrc += er32(ALGNERRC);
adapter->stats.rxerrc += er32(RXERRC);
- if (hw->mac.type != e1000_82574)
+ if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
adapter->stats.tncrs += er32(TNCRS);
adapter->stats.cexterr += er32(CEXTERR);
adapter->stats.tsctc += er32(TSCTC);
@@ -3777,23 +3765,30 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case cpu_to_be16(ETH_P_IPV6):
/* XXX not handling all IPV6 headers */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
break;
default:
if (unlikely(net_ratelimit()))
- e_warn("checksum_partial proto=%x!\n", skb->protocol);
+ e_warn("checksum_partial proto=%x!\n",
+ be16_to_cpu(protocol));
break;
}
@@ -3832,42 +3827,40 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
{
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_buffer *buffer_info;
- unsigned int len = skb->len - skb->data_len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset, size, count = 0, i;
unsigned int f;
+ dma_addr_t *map;
i = tx_ring->next_to_use;
+ if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+ adapter->tx_dma_failed++;
+ return 0;
+ }
+
+ map = skb_shinfo(skb)->dma_maps;
+ offset = 0;
+
while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (mss && !nr_frags && size == len && size > 8)
- size -= 4;
-
buffer_info->length = size;
- /* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->dma =
- pci_map_single(adapter->pdev,
- skb->data + offset,
- size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
- dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
- adapter->tx_dma_failed++;
- return -1;
- }
buffer_info->next_to_watch = i;
+ buffer_info->dma = map[0] + offset;
+ count++;
len -= size;
offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
+
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
}
for (f = 0; f < nr_frags; f++) {
@@ -3875,49 +3868,27 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (mss && f == (nr_frags-1) && size == len && size > 8)
- size -= 4;
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
- buffer_info->dma =
- pci_map_page(adapter->pdev,
- frag->page,
- offset,
- size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)) {
- dev_err(&adapter->pdev->dev,
- "TX DMA page map failed\n");
- adapter->tx_dma_failed++;
- return -1;
- }
-
buffer_info->next_to_watch = i;
+ buffer_info->dma = map[f + 1] + offset;
len -= size;
offset += size;
count++;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
}
}
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i--;
-
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
@@ -4069,7 +4040,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len;
- unsigned long irq_flags;
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4138,18 +4108,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
- if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
/*
* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time
*/
- if (e1000_maybe_stop_tx(netdev, count + 2)) {
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
+ if (e1000_maybe_stop_tx(netdev, count + 2))
return NETDEV_TX_BUSY;
- }
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
@@ -4161,7 +4125,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = e1000_tso(adapter, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4178,22 +4141,20 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
+ /* if count is 0 then mapping error has occured */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
- if (count < 0) {
- /* handle pci_map_single() error in e1000_tx_map */
+ if (count) {
+ e1000_tx_queue(adapter, tx_flags, count);
+ netdev->trans_start = jiffies;
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+
+ } else {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
- return NETDEV_TX_OK;
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
}
- e1000_tx_queue(adapter, tx_flags, count);
-
- netdev->trans_start = jiffies;
-
- /* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
-
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4543,6 +4504,14 @@ static int e1000_resume(struct pci_dev *pdev)
return err;
}
+ /* AER (Advanced Error Reporting) hooks */
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
+ "0x%x\n", err);
+ /* non-fatal, continue */
+ }
+
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4637,24 +4606,29 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int err;
+ pci_ers_result_t result;
e1000e_disable_l1aspm(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
- pci_restore_state(pdev);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
- e1000e_reset(adapter);
- ew32(WUS, ~0);
+ e1000e_reset(adapter);
+ ew32(WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
@@ -4922,12 +4896,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /*
- * We should not be using LLTX anymore, but we are still Tx faster with
- * it.
- */
- netdev->features |= NETIF_F_LLTX;
-
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
@@ -5091,6 +5059,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ int err;
/*
* flush_scheduled work may reschedule our watchdog task, so
@@ -5125,6 +5094,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
+ /* AER disable */
+ err = pci_disable_pcie_error_reporting(pdev);
+ if (err)
+ dev_err(&pdev->dev,
+ "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+
pci_disable_device(pdev);
}
@@ -5156,6 +5131,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
board_80003es2lan },
OpenPOWER on IntegriCloud