summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2011-03-18 18:54:00 +0000
committerjfv <jfv@FreeBSD.org>2011-03-18 18:54:00 +0000
commit51a535dc50d49b8d56b485c6a76c90178a62acce (patch)
tree01d2201de78ddabb2a291d0d3f1d2d09564237d5
parenta49f2105409febc6cbd31d723137adaad8135189 (diff)
downloadFreeBSD-src-51a535dc50d49b8d56b485c6a76c90178a62acce.zip
FreeBSD-src-51a535dc50d49b8d56b485c6a76c90178a62acce.tar.gz
This delta updates the em driver to version 7.2.2 which has
been undergoing test for some weeks. This improves the RX mbuf handling to avoid system hang due to depletion. Thanks to all those who have been testing the code, and to Beezar Liu for the design changes. Next the igb driver is updated for similar RX changes, but also to add new features support for our upcoming i350 family of adapters. MFC after a week
-rw-r--r--sys/dev/e1000/e1000_82575.c47
-rw-r--r--sys/dev/e1000/e1000_82575.h2
-rw-r--r--sys/dev/e1000/e1000_defines.h19
-rw-r--r--sys/dev/e1000/e1000_phy.c12
-rw-r--r--sys/dev/e1000/if_em.c249
-rw-r--r--sys/dev/e1000/if_em.h4
-rw-r--r--sys/dev/e1000/if_igb.c405
-rw-r--r--sys/dev/e1000/if_igb.h3
-rw-r--r--sys/dev/e1000/if_lem.h4
9 files changed, 404 insertions, 341 deletions
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index db5609b..7257d15 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -36,7 +36,6 @@
* 82575EB Gigabit Network Connection
* 82575EB Gigabit Backplane Connection
* 82575GB Gigabit Network Connection
- * 82575GB Gigabit Network Connection
* 82576 Gigabit Network Connection
* 82576 Quad Port Gigabit Mezzanine Adapter
*/
@@ -44,7 +43,6 @@
#include "e1000_api.h"
static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
-static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
static void e1000_release_phy_82575(struct e1000_hw *hw);
@@ -197,12 +195,14 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
switch (phy->id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
case M88E1111_I_PHY_ID:
phy->type = e1000_phy_m88;
phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.get_info = e1000_get_phy_info_m88;
if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID)
+ phy->id == M88E1112_E_PHY_ID ||
+ phy->id == M88E1340M_E_PHY_ID)
phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
else
phy->ops.get_cable_length = e1000_get_cable_length_m88;
@@ -241,7 +241,7 @@ out:
* e1000_init_nvm_params_82575 - Init NVM func ptrs.
* @hw: pointer to the HW structure
**/
-static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
@@ -258,7 +258,6 @@ static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
size += NVM_WORD_SIZE_BASE_SHIFT;
nvm->word_size = 1 << size;
-
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
switch (nvm->override) {
@@ -278,20 +277,23 @@ static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
- if (nvm->word_size == (1 << 15)) {
+ if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
- }
-
/* Function Pointers */
- nvm->ops.acquire = e1000_acquire_nvm_82575;
- if (nvm->word_size < (1 << 15)) {
- nvm->ops.read = e1000_read_nvm_eerd;
- } else {
- nvm->ops.read = e1000_read_nvm_spi;
- }
- nvm->ops.release = e1000_release_nvm_82575;
- nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+ nvm->ops.acquire = e1000_acquire_nvm_82575;
+ nvm->ops.release = e1000_release_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = e1000_read_nvm_eerd;
+ else
+ nvm->ops.read = e1000_read_nvm_spi;
+
+ nvm->ops.write = e1000_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+
+ /* override genric family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = e1000_validate_nvm_checksum_82580;
@@ -302,10 +304,8 @@ static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
nvm->ops.update = e1000_update_nvm_checksum_i350;
break;
default:
- nvm->ops.validate = e1000_validate_nvm_checksum_generic;
- nvm->ops.update = e1000_update_nvm_checksum_generic;
+ break;
}
- nvm->ops.write = e1000_write_nvm_spi;
return E1000_SUCCESS;
}
@@ -889,9 +889,7 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
if (ret_val)
goto out;
-
ret_val = e1000_acquire_nvm_generic(hw);
-
if (ret_val)
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
@@ -910,7 +908,6 @@ static void e1000_release_nvm_82575(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_release_nvm_82575");
- e1000_release_nvm_generic(hw);
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
}
@@ -1365,7 +1362,8 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.type) {
case e1000_phy_m88:
if (hw->phy.id == I347AT4_E_PHY_ID ||
- hw->phy.id == M88E1112_E_PHY_ID)
+ hw->phy.id == M88E1112_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID)
ret_val = e1000_copper_link_setup_m88_gen2(hw);
else
ret_val = e1000_copper_link_setup_m88(hw);
@@ -1840,7 +1838,6 @@ out:
return ret_val;
}
-
/**
* e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
* @hw: pointer to the hardware struct
@@ -1986,7 +1983,7 @@ out:
* e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/sys/dev/e1000/e1000_82575.h b/sys/dev/e1000/e1000_82575.h
index e895d58..529325b 100644
--- a/sys/dev/e1000/e1000_82575.h
+++ b/sys/dev/e1000/e1000_82575.h
@@ -469,6 +469,8 @@ struct e1000_adv_tx_context_desc {
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+
enum e1000_promisc_type {
e1000_promisc_disabled = 0, /* all promisc modes disabled */
e1000_promisc_unicast = 1, /* unicast promiscuous enabled */
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index a1eec00..fd6a128 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -76,8 +76,8 @@
#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
#define E1000_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
#define E1000_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
-#define E1000_WUFC_FLX6 0x00400000 /* Flexible Filter 6 Enable */
-#define E1000_WUFC_FLX7 0x00800000 /* Flexible Filter 7 Enable */
+#define E1000_WUFC_FLX6 0x00400000 /* Flexible Filter 6 Enable */
+#define E1000_WUFC_FLX7 0x00800000 /* Flexible Filter 7 Enable */
#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */
#define E1000_WUFC_ALL_FILTERS_PHY_4 0x0000F0FF /*Mask for all wakeup filters*/
#define E1000_WUFC_FLX_OFFSET_PHY 12 /* Offset to the Flexible Filters bits */
@@ -249,6 +249,7 @@
#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 12
+#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
@@ -1478,7 +1479,8 @@
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
#define M88E1112_E_PHY_ID 0x01410C90
-#define I347AT4_E_PHY_ID 0x01410DC0
+#define I347AT4_E_PHY_ID 0x01410DC0
+#define M88E1340M_E_PHY_ID 0x01410DF0
#define GG82563_E_PHY_ID 0x01410CA0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define IFE_E_PHY_ID 0x02A80330
@@ -1764,11 +1766,10 @@
#define E1000_RTTBCNRC_RF_INT_MASK \
(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
-
/* DMA Coalescing register fields */
#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
* Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Rx
* Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
@@ -1781,15 +1782,15 @@
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate
* Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx packet rate in
* current window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Traffic
* Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rx Threshold
* High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index f01fde4..40fd2bd 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -255,10 +255,6 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
- /* Workaround for Si errata */
- if ((hw->phy.type == e1000_phy_82577) && (hw->revision_id <= 2))
- msec_delay(10);
-
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
@@ -326,10 +322,6 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
- /* Workaround for Si errata */
- if ((hw->phy.type == e1000_phy_82577) && (hw->revision_id <= 2))
- msec_delay(10);
-
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
@@ -1656,6 +1648,7 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (!link) {
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID) {
DEBUGOUT("Link taking longer than expected.\n");
} else {
@@ -1683,6 +1676,7 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID)
goto out;
@@ -2233,6 +2227,7 @@ s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
DEBUGFUNC("e1000_get_cable_length_m88_gen2");
switch (hw->phy.id) {
+ case M88E1340M_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -2787,6 +2782,7 @@ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
case M88E1011_I_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
phy_type = e1000_phy_m88;
break;
case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 6508ae2..cbf0bbb 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -93,7 +93,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char em_driver_version[] = "7.1.9";
+char em_driver_version[] = "7.2.2";
/*********************************************************************
* PCI Device ID Table
@@ -284,9 +284,7 @@ static void em_handle_tx(void *context, int pending);
static void em_handle_rx(void *context, int pending);
static void em_handle_link(void *context, int pending);
-static void em_add_rx_process_limit(struct adapter *, const char *,
- const char *, int *, int);
-static void em_set_flow_cntrl(struct adapter *, const char *,
+static void em_set_sysctl_value(struct adapter *, const char *,
const char *, int *, int);
static __inline void em_rx_discard(struct rx_ring *, int);
@@ -365,6 +363,10 @@ TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
static int em_fc_setting = e1000_fc_full;
TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
+/* Energy efficient ethernet - default to OFF */
+static int eee_setting = 0;
+TUNABLE_INT("hw.em.eee_setting", &eee_setting);
+
/* Global used in WOL setup with multiport cards */
static int global_quad_port_a = 0;
@@ -433,12 +435,14 @@ static int
em_attach(device_t dev)
{
struct adapter *adapter;
+ struct e1000_hw *hw;
int error = 0;
INIT_DEBUGOUT("em_attach: begin");
adapter = device_get_softc(dev);
adapter->dev = adapter->osdep.dev = dev;
+ hw = &adapter->hw;
EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
/* SYSCTL stuff */
@@ -470,11 +474,11 @@ em_attach(device_t dev)
** must happen after the MAC is
** identified
*/
- if ((adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_ich9lan) ||
- (adapter->hw.mac.type == e1000_ich10lan) ||
- (adapter->hw.mac.type == e1000_pchlan) ||
- (adapter->hw.mac.type == e1000_pch2lan)) {
+ if ((hw->mac.type == e1000_ich8lan) ||
+ (hw->mac.type == e1000_ich9lan) ||
+ (hw->mac.type == e1000_ich10lan) ||
+ (hw->mac.type == e1000_pchlan) ||
+ (hw->mac.type == e1000_pch2lan)) {
int rid = EM_BAR_TYPE_FLASH;
adapter->flash = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
@@ -484,7 +488,7 @@ em_attach(device_t dev)
goto err_pci;
}
/* This is used in the shared code */
- adapter->hw.flash_address = (u8 *)adapter->flash;
+ hw->flash_address = (u8 *)adapter->flash;
adapter->osdep.flash_bus_space_tag =
rman_get_bustag(adapter->flash);
adapter->osdep.flash_bus_space_handle =
@@ -492,39 +496,39 @@ em_attach(device_t dev)
}
/* Do Shared Code initialization */
- if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
+ if (e1000_setup_init_funcs(hw, TRUE)) {
device_printf(dev, "Setup of Shared code failed\n");
error = ENXIO;
goto err_pci;
}
- e1000_get_bus_info(&adapter->hw);
+ e1000_get_bus_info(hw);
/* Set up some sysctls for the tunable interrupt delays */
em_add_int_delay_sysctl(adapter, "rx_int_delay",
"receive interrupt delay in usecs", &adapter->rx_int_delay,
- E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
+ E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "tx_int_delay",
"transmit interrupt delay in usecs", &adapter->tx_int_delay,
- E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
+ E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
"receive interrupt delay limit in usecs",
&adapter->rx_abs_int_delay,
- E1000_REGISTER(&adapter->hw, E1000_RADV),
+ E1000_REGISTER(hw, E1000_RADV),
em_rx_abs_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
"transmit interrupt delay limit in usecs",
&adapter->tx_abs_int_delay,
- E1000_REGISTER(&adapter->hw, E1000_TADV),
+ E1000_REGISTER(hw, E1000_TADV),
em_tx_abs_int_delay_dflt);
/* Sysctl for limiting the amount of work done in the taskqueue */
- em_add_rx_process_limit(adapter, "rx_processing_limit",
+ em_set_sysctl_value(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
em_rx_process_limit);
/* Sysctl for setting the interface flow control */
- em_set_flow_cntrl(adapter, "flow_control",
+ em_set_sysctl_value(adapter, "flow_control",
"configure flow control",
&adapter->fc_setting, em_fc_setting);
@@ -549,15 +553,15 @@ em_attach(device_t dev)
} else
adapter->num_rx_desc = em_rxd;
- adapter->hw.mac.autoneg = DO_AUTO_NEG;
- adapter->hw.phy.autoneg_wait_to_complete = FALSE;
- adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ hw->mac.autoneg = DO_AUTO_NEG;
+ hw->phy.autoneg_wait_to_complete = FALSE;
+ hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
/* Copper options */
- if (adapter->hw.phy.media_type == e1000_media_type_copper) {
- adapter->hw.phy.mdix = AUTO_ALL_MODES;
- adapter->hw.phy.disable_polarity_correction = FALSE;
- adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = AUTO_ALL_MODES;
+ hw->phy.disable_polarity_correction = FALSE;
+ hw->phy.ms_type = EM_MASTER_SLAVE;
}
/*
@@ -571,7 +575,7 @@ em_attach(device_t dev)
* This controls when hardware reports transmit completion
* status.
*/
- adapter->hw.mac.report_tx_early = 1;
+ hw->mac.report_tx_early = 1;
/*
** Get queue/ring memory
@@ -591,25 +595,31 @@ em_attach(device_t dev)
}
/* Check SOL/IDER usage */
- if (e1000_check_reset_block(&adapter->hw))
+ if (e1000_check_reset_block(hw))
device_printf(dev, "PHY reset is blocked"
" due to SOL/IDER session.\n");
+ /* Sysctl for setting Energy Efficient Ethernet */
+ em_set_sysctl_value(adapter, "eee_control",
+ "enable Energy Efficient Ethernet",
+ &hw->dev_spec.ich8lan.eee_disable, eee_setting);
+
/*
** Start from a known state, this is
** important in reading the nvm and
** mac from that.
*/
- e1000_reset_hw(&adapter->hw);
+ e1000_reset_hw(hw);
+
/* Make sure we have a good EEPROM before we read from it */
- if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+ if (e1000_validate_nvm_checksum(hw) < 0) {
/*
** Some PCI-E parts fail the first check due to
** the link being in sleep state, call it again,
** if it fails a second time its a real issue.
*/
- if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+ if (e1000_validate_nvm_checksum(hw) < 0) {
device_printf(dev,
"The EEPROM Checksum Is Not Valid\n");
error = EIO;
@@ -618,14 +628,14 @@ em_attach(device_t dev)
}
/* Copy the permanent MAC address out of the EEPROM */
- if (e1000_read_mac_addr(&adapter->hw) < 0) {
+ if (e1000_read_mac_addr(hw) < 0) {
device_printf(dev, "EEPROM read error while reading MAC"
" address\n");
error = EIO;
goto err_late;
}
- if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
+ if (!em_is_valid_ether_addr(hw->mac.addr)) {
device_printf(dev, "Invalid MAC address\n");
error = EIO;
goto err_late;
@@ -655,7 +665,7 @@ em_attach(device_t dev)
/* Initialize statistics */
em_update_stats_counters(adapter);
- adapter->hw.mac.get_link_status = 1;
+ hw->mac.get_link_status = 1;
em_update_link_status(adapter);
/* Register for VLAN events */
@@ -927,11 +937,10 @@ em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
if (!adapter->link_active)
return;
- /* Call cleanup if number of TX descriptors low */
- if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
- em_txeof(txr);
-
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ /* Call cleanup if number of TX descriptors low */
+ if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
+ em_txeof(txr);
if (txr->tx_avail < EM_MAX_SCATTER) {
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
@@ -1411,8 +1420,7 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp, txr);
+ em_start_locked(ifp, txr);
#endif
EM_TX_UNLOCK(txr);
@@ -1475,24 +1483,20 @@ em_handle_que(void *context, int pending)
struct ifnet *ifp = adapter->ifp;
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- bool more;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
-
+ bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
EM_TX_LOCK(txr);
em_txeof(txr);
#ifdef EM_MULTIQUEUE
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp, txr);
+ em_start_locked(ifp, txr);
#endif
- em_txeof(txr);
EM_TX_UNLOCK(txr);
- if (more) {
+ if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
taskqueue_enqueue(adapter->tq, &adapter->que_task);
return;
}
@@ -1601,10 +1605,8 @@ em_handle_tx(void *context, int pending)
if (!drbr_empty(ifp, txr->br))
em_mq_start_locked(ifp, txr, NULL);
#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp, txr);
+ em_start_locked(ifp, txr);
#endif
- em_txeof(txr);
E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
EM_TX_UNLOCK(txr);
}
@@ -2179,6 +2181,7 @@ em_local_timer(void *arg)
struct adapter *adapter = arg;
struct ifnet *ifp = adapter->ifp;
struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
EM_CORE_LOCK_ASSERT(adapter);
@@ -2190,6 +2193,13 @@ em_local_timer(void *arg)
e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ /* trigger tq to refill rx ring queue if it is empty */
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ if (rxr->next_to_check == rxr->next_to_refresh) {
+ taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ }
+ }
+
/*
** Don't do TX watchdog check if we've been paused
*/
@@ -3730,17 +3740,17 @@ em_txeof(struct tx_ring *txr)
txr->queue_status = EM_QUEUE_HUNG;
/*
- * If we have enough room, clear IFF_DRV_OACTIVE
+ * If we have a minimum free, clear IFF_DRV_OACTIVE
* to tell the stack that it is OK to send packets.
*/
- if (txr->tx_avail > EM_TX_CLEANUP_THRESHOLD) {
+ if (txr->tx_avail > EM_MAX_SCATTER)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- /* Disable watchdog if all clean */
- if (txr->tx_avail == adapter->num_tx_desc) {
- txr->queue_status = EM_QUEUE_IDLE;
- return (FALSE);
- }
- }
+
+ /* Disable watchdog if all clean */
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->queue_status = EM_QUEUE_IDLE;
+ return (FALSE);
+ }
return (TRUE);
}
@@ -3758,11 +3768,19 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
struct mbuf *m;
bus_dma_segment_t segs[1];
struct em_buffer *rxbuf;
- int i, error, nsegs, cleaned;
+ int i, j, error, nsegs;
+ bool cleaned = FALSE;
+
+ i = j = rxr->next_to_refresh;
+ /*
+ ** Get one descriptor beyond
+ ** our work mark to control
+ ** the loop.
+ */
+ if (++j == adapter->num_rx_desc)
+ j = 0;
- i = rxr->next_to_refresh;
- cleaned = -1;
- while (i != limit) {
+ while (j != limit) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head == NULL) {
m = m_getjcl(M_DONTWAIT, MT_DATA,
@@ -3796,21 +3814,22 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
bus_dmamap_sync(rxr->rxtag,
rxbuf->map, BUS_DMASYNC_PREREAD);
rxr->rx_base[i].buffer_addr = htole64(segs[0].ds_addr);
+ cleaned = TRUE;
- cleaned = i;
- /* Calculate next index */
- if (++i == adapter->num_rx_desc)
- i = 0;
+ i = j; /* Next is precalulated for us */
rxr->next_to_refresh = i;
+ /* Calculate next controlling index */
+ if (++j == adapter->num_rx_desc)
+ j = 0;
}
update:
/*
** Update the tail pointer only if,
** and as far as we have refreshed.
*/
- if (cleaned != -1) /* Update tail index */
+ if (cleaned)
E1000_WRITE_REG(&adapter->hw,
- E1000_RDT(rxr->me), cleaned);
+ E1000_RDT(rxr->me), rxr->next_to_refresh);
return;
}
@@ -3888,36 +3907,32 @@ em_setup_receive_ring(struct rx_ring *rxr)
struct adapter *adapter = rxr->adapter;
struct em_buffer *rxbuf;
bus_dma_segment_t seg[1];
- int rsize, nsegs, error;
+ int i, j, nsegs, error;
/* Clear the ring contents */
EM_RX_LOCK(rxr);
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
- /*
- ** Free current RX buffer structs and their mbufs
- */
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag, rxbuf->map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag, rxbuf->map);
- m_freem(rxbuf->m_head);
- }
+ /* Invalidate all descriptors */
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ struct e1000_rx_desc* cur;
+ cur = &rxr->rx_base[i];
+ cur->status = 0;
}
/* Now replenish the mbufs */
- for (int j = 0; j != adapter->num_rx_desc; ++j) {
+ i = j = rxr->next_to_refresh;
+ if (++j == adapter->num_rx_desc)
+ j = 0;
- rxbuf = &rxr->rx_buffers[j];
+ while(j != rxr->next_to_check) {
+ rxbuf = &rxr->rx_buffers[i];
rxbuf->m_head = m_getjcl(M_DONTWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
- if (rxbuf->m_head == NULL)
- return (ENOBUFS);
+ if (rxbuf->m_head == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
rxbuf->m_head->m_len = adapter->rx_mbuf_sz;
rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */
rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz;
@@ -3929,25 +3944,24 @@ em_setup_receive_ring(struct rx_ring *rxr)
if (error != 0) {
m_freem(rxbuf->m_head);
rxbuf->m_head = NULL;
- return (error);
+ goto fail;
}
bus_dmamap_sync(rxr->rxtag,
rxbuf->map, BUS_DMASYNC_PREREAD);
/* Update descriptor */
- rxr->rx_base[j].buffer_addr = htole64(seg[0].ds_addr);
+ rxr->rx_base[i].buffer_addr = htole64(seg[0].ds_addr);
+ i = j;
+ if (++j == adapter->num_rx_desc)
+ j = 0;
}
-
- /* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
-
+fail:
+ rxr->next_to_refresh = i;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
EM_RX_UNLOCK(rxr);
- return (0);
+ return (error);
}
/*********************************************************************
@@ -3959,9 +3973,9 @@ static int
em_setup_receive_structures(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
- int j;
+ int q;
- for (j = 0; j < adapter->num_queues; j++, rxr++)
+ for (q = 0; q < adapter->num_queues; q++, rxr++)
if (em_setup_receive_ring(rxr))
goto fail;
@@ -3970,11 +3984,12 @@ fail:
/*
* Free RX buffers allocated so far, we will only handle
* the rings that completed, the failing case will have
- * cleaned up for itself. 'j' failed, so its the terminus.
+ * cleaned up for itself. 'q' failed, so its the terminus.
*/
- for (int i = 0; i < j; ++i) {
+ for (int i = 0, n = 0; i < q; ++i) {
rxr = &adapter->rx_rings[i];
- for (int n = 0; n < adapter->num_rx_desc; n++) {
+ n = rxr->next_to_check;
+ while(n != rxr->next_to_refresh) {
struct em_buffer *rxbuf;
rxbuf = &rxr->rx_buffers[n];
if (rxbuf->m_head != NULL) {
@@ -3984,7 +3999,11 @@ fail:
m_freem(rxbuf->m_head);
rxbuf->m_head = NULL;
}
+ if (++n == adapter->num_rx_desc)
+ n = 0;
}
+ rxr->next_to_check = 0;
+ rxr->next_to_refresh = 0;
}
return (ENOBUFS);
@@ -4025,7 +4044,8 @@ em_free_receive_buffers(struct rx_ring *rxr)
INIT_DEBUGOUT("free_receive_buffers: begin");
if (rxr->rx_buffers != NULL) {
- for (int i = 0; i < adapter->num_rx_desc; i++) {
+ int i = rxr->next_to_check;
+ while(i != rxr->next_to_refresh) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->map != NULL) {
bus_dmamap_sync(rxr->rxtag, rxbuf->map,
@@ -4037,9 +4057,13 @@ em_free_receive_buffers(struct rx_ring *rxr)
m_freem(rxbuf->m_head);
rxbuf->m_head = NULL;
}
+ if (++i == adapter->num_rx_desc)
+ i = 0;
}
free(rxr->rx_buffers, M_DEVBUF);
rxr->rx_buffers = NULL;
+ rxr->next_to_check = 0;
+ rxr->next_to_refresh = 0;
}
if (rxr->rxtag != NULL) {
@@ -4122,8 +4146,8 @@ em_initialize_receive_unit(struct adapter *adapter)
E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
/* Setup the Head and Tail Descriptor Pointers */
- E1000_WRITE_REG(hw, E1000_RDH(i), 0);
- E1000_WRITE_REG(hw, E1000_RDT(i), adapter->num_rx_desc - 1);
+ E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh);
}
/* Set early receive threshold on appropriate hw */
@@ -4303,7 +4327,8 @@ next_desc:
}
/* Catch any remaining refresh work */
- em_refresh_mbufs(rxr, i);
+ if (processed != 0 || i == rxr->next_to_refresh)
+ em_refresh_mbufs(rxr, i);
rxr->next_to_check = i;
if (done != NULL)
@@ -4743,10 +4768,8 @@ em_enable_wakeup(device_t dev)
if ((adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_pchlan) ||
(adapter->hw.mac.type == e1000_ich9lan) ||
- (adapter->hw.mac.type == e1000_ich10lan)) {
+ (adapter->hw.mac.type == e1000_ich10lan))
e1000_disable_gig_wol_ich8lan(&adapter->hw);
- e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
- }
/* Keep the laser running on Fiber adapters */
if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
@@ -5442,17 +5465,7 @@ em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
}
static void
-em_add_rx_process_limit(struct adapter *adapter, const char *name,
- const char *description, int *limit, int value)
-{
- *limit = value;
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
-}
-
-static void
-em_set_flow_cntrl(struct adapter *adapter, const char *name,
+em_set_sysctl_value(struct adapter *adapter, const char *name,
const char *description, int *limit, int value)
{
*limit = value;
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 8bfd600..4f12aba 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -212,6 +212,10 @@
#define EM_BAR_MEM_TYPE_64BIT 0x00000004
#define EM_MSIX_BAR 3 /* On 82575 */
+#if !defined(SYSTCL_ADD_UQUAD)
+#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
+#endif
+
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 55e9be5..b9ab0a3 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -99,7 +99,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char igb_driver_version[] = "version - 2.1.4";
+char igb_driver_version[] = "version - 2.1.7";
/*********************************************************************
@@ -255,14 +255,13 @@ static void igb_enable_wakeup(device_t);
static void igb_led_func(void *, int);
static int igb_irq_fast(void *);
-static void igb_add_rx_process_limit(struct adapter *, const char *,
- const char *, int *, int);
+static void igb_msix_que(void *);
+static void igb_msix_link(void *);
static void igb_handle_que(void *context, int pending);
static void igb_handle_link(void *context, int pending);
-/* These are MSIX only irq handlers */
-static void igb_msix_que(void *);
-static void igb_msix_link(void *);
+static void igb_set_sysctl_value(struct adapter *, const char *,
+ const char *, int *, int);
#ifdef DEVICE_POLLING
static poll_handler_t igb_poll;
@@ -350,6 +349,17 @@ TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
static int igb_fc_setting = e1000_fc_full;
TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
+/* Energy Efficient Ethernet - default to off */
+static int igb_eee_setting = FALSE;
+TUNABLE_INT("hw.igb.ee_setting", &igb_eee_setting);
+
+/*
+** DMA Coalescing, only for i350 - default to off,
+** this feature is for power savings
+*/
+static int igb_dma_coalesce = FALSE;
+TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
+
/*********************************************************************
* Device identification routine
*
@@ -430,11 +440,6 @@ igb_attach(device_t dev)
OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
igb_sysctl_nvm_info, "I", "NVM Information");
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
- &igb_fc_setting, 0, "Flow Control");
-
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
@@ -461,11 +466,16 @@ igb_attach(device_t dev)
e1000_get_bus_info(&adapter->hw);
- /* Sysctls for limiting the amount of work done in the taskqueue */
- igb_add_rx_process_limit(adapter, "rx_processing_limit",
+ /* Sysctl for limiting the amount of work done in the taskqueue */
+ igb_set_sysctl_value(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
igb_rx_process_limit);
+ /* Sysctl for setting the interface flow control */
+ igb_set_sysctl_value(adapter, "flow_control",
+ "configure flow control",
+ &adapter->fc_setting, igb_fc_setting);
+
/*
* Validate number of transmit and receive descriptors. It
* must not exceed hardware maximum, and must be multiple
@@ -537,6 +547,18 @@ igb_attach(device_t dev)
goto err_late;
}
+ /* Some adapter-specific advanced features */
+ if (adapter->hw.mac.type >= e1000_i350) {
+ igb_set_sysctl_value(adapter, "dma_coalesce",
+ "configure dma coalesce",
+ &adapter->dma_coalesce, igb_dma_coalesce);
+ igb_set_sysctl_value(adapter, "eee_control",
+ "enable Energy Efficient Ethernet",
+ &adapter->hw.dev_spec._82575.eee_disable,
+ igb_eee_setting);
+ e1000_set_eee_i350(&adapter->hw);
+ }
+
/*
** Start from a known state, this is
** important in reading the nvm and
@@ -1436,6 +1458,10 @@ igb_msix_que(void *arg)
more_tx = igb_txeof(txr);
IGB_TX_UNLOCK(txr);
+ /* If RX ring is depleted do refresh first */
+ if (rxr->next_to_check == rxr->next_to_refresh)
+ igb_refresh_mbufs(rxr, rxr->next_to_check);
+
more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL);
if (igb_enable_aim == FALSE)
@@ -1938,7 +1964,7 @@ igb_local_timer(void *arg)
out:
callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
#ifndef DEVICE_POLLING
- /* Fire off all queue interrupts - deadlock protection */
+ /* Schedule all queue interrupts - deadlock protection */
E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask);
#endif
return;
@@ -1963,7 +1989,9 @@ igb_update_link_status(struct adapter *adapter)
struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
struct tx_ring *txr = adapter->tx_rings;
- u32 link_check = 0;
+ u32 link_check, thstat, ctrl;
+
+ link_check = thstat = ctrl = 0;
/* Get the cached link value or read for real */
switch (hw->phy.media_type) {
@@ -1993,6 +2021,12 @@ igb_update_link_status(struct adapter *adapter)
break;
}
+ /* Check for thermal downshift or shutdown */
+ if (hw->mac.type == e1000_i350) {
+ thstat = E1000_READ_REG(hw, E1000_THSTAT);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ }
+
/* Now we check if a transition has happened */
if (link_check && (adapter->link_active == 0)) {
e1000_get_speed_and_duplex(&adapter->hw,
@@ -2004,6 +2038,9 @@ igb_update_link_status(struct adapter *adapter)
"Full Duplex" : "Half Duplex"));
adapter->link_active = 1;
ifp->if_baudrate = adapter->link_speed * 1000000;
+ if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
+ (thstat & E1000_THSTAT_LINK_THROTTLE))
+ device_printf(dev, "Link: thermal downshift\n");
/* This can sleep */
if_link_state_change(ifp, LINK_STATE_UP);
} else if (!link_check && (adapter->link_active == 1)) {
@@ -2011,6 +2048,9 @@ igb_update_link_status(struct adapter *adapter)
adapter->link_duplex = 0;
if (bootverbose)
device_printf(dev, "Link is Down\n");
+ if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
+ (thstat & E1000_THSTAT_PWR_DOWN))
+ device_printf(dev, "Link: thermal shutdown\n");
adapter->link_active = 0;
/* This can sleep */
if_link_state_change(ifp, LINK_STATE_DOWN);
@@ -2632,7 +2672,7 @@ igb_reset(struct adapter *adapter)
/* Set Flow control, use the tunable location if sane */
if ((igb_fc_setting >= 0) && (igb_fc_setting < 4))
- fc->requested_mode = igb_fc_setting;
+ fc->requested_mode = adapter->fc_setting;
else
fc->requested_mode = e1000_fc_none;
@@ -2646,28 +2686,31 @@ igb_reset(struct adapter *adapter)
device_printf(dev, "Hardware Initialization Failed\n");
/* Setup DMA Coalescing */
- if (hw->mac.type == e1000_i350) {
+ if ((hw->mac.type == e1000_i350) &&
+ (adapter->dma_coalesce == TRUE)) {
u32 reg;
- hwm = (pba << 10) - (2 * adapter->max_frame_size);
- /*
- * 0x80000000 - enable DMA COAL
- * 0x10000000 - use L0s as low power
- * 0x20000000 - use L1 as low power
- * X << 16 - exit dma coal when rx data exceeds X kB
- * Y - upper limit to stay in dma coal in units of 32usecs
- */
- E1000_WRITE_REG(hw, E1000_DMACR,
- 0xA0000006 | ((hwm << 6) & 0x00FF0000));
+ hwm = (pba - 4) << 10;
+ reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+ /* timer = +-1000 usec in 32usec intervals */
+ reg |= (1000 >> 5);
+ E1000_WRITE_REG(hw, E1000_DMACR, reg);
+
+ /* No lower threshold */
+ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
/* set hwm to PBA - 2 * max frame size */
E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
- /*
- * This sets the time to wait before requesting transition to
- * low power state to number of usecs needed to receive 1 512
- * byte frame at gigabit line rate
- */
- E1000_WRITE_REG(hw, E1000_DMCTLX, 4);
+
+ /* Set the interval before transition */
+ reg = E1000_READ_REG(hw, E1000_DMCTLX);
+ reg |= 0x800000FF; /* 255 usec */
+ E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
/* free space in tx packet buffer to wake from DMA coal */
E1000_WRITE_REG(hw, E1000_DMCTXTH,
@@ -2677,6 +2720,7 @@ igb_reset(struct adapter *adapter)
reg = E1000_READ_REG(hw, E1000_PCIEMISC);
E1000_WRITE_REG(hw, E1000_PCIEMISC,
reg | E1000_PCIEMISC_LX_DECISION);
+ device_printf(dev, "DMA Coalescing enabled\n");
}
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
@@ -3617,22 +3661,27 @@ igb_refresh_mbufs(struct rx_ring *rxr, int limit)
bus_dma_segment_t pseg[1];
struct igb_rx_buf *rxbuf;
struct mbuf *mh, *mp;
- int i, nsegs, error, cleaned;
+ int i, j, nsegs, error;
+ bool refreshed = FALSE;
- i = rxr->next_to_refresh;
- rxr->needs_refresh = FALSE;
- cleaned = -1; /* Signify no completions */
- while (i != limit) {
+ i = j = rxr->next_to_refresh;
+ /*
+ ** Get one descriptor beyond
+ ** our work mark to control
+ ** the loop.
+ */
+ if (++j == adapter->num_rx_desc)
+ j = 0;
+
+ while (j != limit) {
rxbuf = &rxr->rx_buffers[i];
/* No hdr mbuf used with header split off */
if (rxr->hdr_split == FALSE)
goto no_split;
if (rxbuf->m_head == NULL) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
- if (mh == NULL) {
- rxr->needs_refresh = TRUE;
+ if (mh == NULL)
goto update;
- }
} else
mh = rxbuf->m_head;
@@ -3658,10 +3707,8 @@ no_split:
if (rxbuf->m_pack == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
- if (mp == NULL) {
- rxr->needs_refresh = TRUE;
+ if (mp == NULL)
goto update;
- }
} else
mp = rxbuf->m_pack;
@@ -3681,18 +3728,17 @@ no_split:
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.pkt_addr =
htole64(pseg[0].ds_addr);
+ refreshed = TRUE; /* I feel wefreshed :) */
- cleaned = i;
- /* Calculate next index */
- if (++i == adapter->num_rx_desc)
- i = 0;
- /* This is the work marker for refresh */
+ i = j; /* our next is precalculated */
rxr->next_to_refresh = i;
+ if (++j == adapter->num_rx_desc)
+ j = 0;
}
update:
- if (cleaned != -1) /* If we refreshed some, bump tail */
+ if (refreshed) /* update tail */
E1000_WRITE_REG(&adapter->hw,
- E1000_RDT(rxr->me), cleaned);
+ E1000_RDT(rxr->me), rxr->next_to_refresh);
return;
}
@@ -3789,7 +3835,8 @@ igb_free_receive_ring(struct rx_ring *rxr)
int i;
adapter = rxr->adapter;
- for (i = 0; i < adapter->num_rx_desc; i++) {
+ i = rxr->next_to_check;
+ while (i != rxr->next_to_refresh) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head != NULL) {
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
@@ -3807,7 +3854,12 @@ igb_free_receive_ring(struct rx_ring *rxr)
}
rxbuf->m_head = NULL;
rxbuf->m_pack = NULL;
+
+ if (++i == adapter->num_rx_desc)
+ i = 0;
}
+ rxr->next_to_check = 0;
+ rxr->next_to_refresh = 0;
}
@@ -3825,32 +3877,33 @@ igb_setup_receive_ring(struct rx_ring *rxr)
struct igb_rx_buf *rxbuf;
bus_dma_segment_t pseg[1], hseg[1];
struct lro_ctrl *lro = &rxr->lro;
- int rsize, nsegs, error = 0;
+ int i, j, nsegs, error = 0;
adapter = rxr->adapter;
dev = adapter->dev;
ifp = adapter->ifp;
- /* Clear the ring contents */
IGB_RX_LOCK(rxr);
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
-
- /*
- ** Free current RX buffer structures and their mbufs
- */
- igb_free_receive_ring(rxr);
+ /* Invalidate all descriptors */
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ union e1000_adv_rx_desc* cur;
+ cur = &rxr->rx_base[i];
+ cur->wb.upper.status_error = 0;
+ }
/* Configure for header split? */
if (igb_header_split)
rxr->hdr_split = TRUE;
+ /* Get our indices */
+ i = j = rxr->next_to_refresh;
+ if (++j == adapter->num_rx_desc)
+ j = 0;
/* Now replenish the ring mbufs */
- for (int j = 0; j < adapter->num_rx_desc; ++j) {
+ while (j != rxr->next_to_check) {
struct mbuf *mh, *mp;
- rxbuf = &rxr->rx_buffers[j];
+ rxbuf = &rxr->rx_buffers[i];
if (rxr->hdr_split == FALSE)
goto skip_head;
@@ -3873,7 +3926,7 @@ igb_setup_receive_ring(struct rx_ring *rxr)
bus_dmamap_sync(rxr->htag,
rxbuf->hmap, BUS_DMASYNC_PREREAD);
/* Update descriptor */
- rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+ rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr);
skip_head:
/* Now the payload cluster */
@@ -3894,12 +3947,16 @@ skip_head:
bus_dmamap_sync(rxr->ptag,
rxbuf->pmap, BUS_DMASYNC_PREREAD);
/* Update descriptor */
- rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+ rxr->rx_base[i].read.pkt_addr = htole64(pseg[0].ds_addr);
+
+ /* Setup for next loop */
+ i = j;
+ if (++j == adapter->num_rx_desc)
+ j = 0;
}
/* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
+ rxr->next_to_refresh = i;
rxr->lro_enabled = FALSE;
rxr->rx_split_packets = 0;
rxr->rx_bytes = 0;
@@ -3932,6 +3989,7 @@ skip_head:
return (0);
fail:
+ rxr->next_to_refresh = i;
igb_free_receive_ring(rxr);
IGB_RX_UNLOCK(rxr);
return (error);
@@ -3994,7 +4052,7 @@ igb_initialize_receive_units(struct adapter *adapter)
/*
** Set up for header split
*/
- if (rxr->hdr_split) {
+ if (igb_header_split) {
/* Use a standard mbuf for the header */
srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
@@ -4132,9 +4190,9 @@ igb_initialize_receive_units(struct adapter *adapter)
* - needs to be after enable
*/
for (int i = 0; i < adapter->num_queues; i++) {
- E1000_WRITE_REG(hw, E1000_RDH(i), 0);
- E1000_WRITE_REG(hw, E1000_RDT(i),
- adapter->num_rx_desc - 1);
+ rxr = &adapter->rx_rings[i];
+ E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh);
}
return;
}
@@ -4312,10 +4370,6 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- /* Try outstanding refresh first */
- if (rxr->needs_refresh == TRUE)
- igb_refresh_mbufs(rxr, rxr->next_to_check);
-
/* Main clean loop */
for (i = rxr->next_to_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
@@ -4335,7 +4389,11 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
rxbuf = &rxr->rx_buffers[i];
plen = le16toh(cur->wb.upper.length);
ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
- vtag = le16toh(cur->wb.upper.vlan);
+ if ((adapter->hw.mac.type == e1000_i350) &&
+ (staterr & E1000_RXDEXT_STATERR_LB))
+ vtag = be16toh(cur->wb.upper.vlan);
+ else
+ vtag = le16toh(cur->wb.upper.vlan);
hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
@@ -4470,10 +4528,8 @@ next_desc:
}
/* Catch any remainders */
- if (processed != 0) {
+ if (processed != 0 || i == rxr->next_to_refresh)
igb_refresh_mbufs(rxr, i);
- processed = 0;
- }
rxr->next_to_check = i;
@@ -4485,19 +4541,11 @@ next_desc:
tcp_lro_flush(lro, queued);
}
- IGB_RX_UNLOCK(rxr);
-
if (done != NULL)
*done = rxdone;
- /*
- ** We still have cleaning to do?
- ** Schedule another interrupt if so.
- */
- if ((staterr & E1000_RXD_STAT_DD) != 0)
- return (TRUE);
-
- return (FALSE);
+ IGB_RX_UNLOCK(rxr);
+ return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE);
}
/*********************************************************************
@@ -5076,7 +5124,7 @@ igb_add_hw_stats(struct adapter *adapter)
char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
- SYSCTL_ADD_INT(ctx, child, OID_AUTO, "link_irq",
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &adapter->link_irq, 0,
"Link MSIX IRQ Handled");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
@@ -5126,48 +5174,44 @@ igb_add_hw_stats(struct adapter *adapter)
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
- CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
+ CTLFLAG_RD, &adapter->queues[i],
sizeof(&adapter->queues[i]),
igb_sysctl_interrupt_rate_handler,
"IU", "Interrupt Rate");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
- CTLTYPE_UINT | CTLFLAG_RD, adapter,
- E1000_TDH(txr->me),
+ CTLFLAG_RD, adapter, E1000_TDH(txr->me),
igb_sysctl_reg_handler, "IU",
"Transmit Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
- CTLTYPE_UINT | CTLFLAG_RD, adapter,
- E1000_TDT(txr->me),
+ CTLFLAG_RD, adapter, E1000_TDT(txr->me),
igb_sysctl_reg_handler, "IU",
"Transmit Descriptor Tail");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &txr->no_desc_avail,
"Queue No Descriptor Available");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &txr->tx_packets,
"Queue Packets Transmitted");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
- CTLTYPE_UINT | CTLFLAG_RD, adapter,
- E1000_RDH(rxr->me),
+ CTLFLAG_RD, adapter, E1000_RDH(rxr->me),
igb_sysctl_reg_handler, "IU",
"Receive Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
- CTLTYPE_UINT | CTLFLAG_RD, adapter,
- E1000_RDT(rxr->me),
+ CTLFLAG_RD, adapter, E1000_RDT(rxr->me),
igb_sysctl_reg_handler, "IU",
"Receive Descriptor Tail");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &rxr->rx_packets,
"Queue Packets Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &rxr->rx_bytes,
"Queue Bytes Received");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued",
CTLFLAG_RD, &lro->lro_queued, 0,
"LRO Queued");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed",
CTLFLAG_RD, &lro->lro_flushed, 0,
"LRO Flushed");
}
@@ -5183,164 +5227,164 @@ igb_add_hw_stats(struct adapter *adapter)
** since its not managing the metal, so to speak.
*/
if (adapter->vf_ifp) {
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
CTLFLAG_RD, &stats->gprc,
"Good Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
CTLFLAG_RD, &stats->gptc,
"Good Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
CTLFLAG_RD, &stats->gorc,
"Good Octets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
CTLFLAG_RD, &stats->gotc,
"Good Octets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
CTLFLAG_RD, &stats->mprc,
"Multicast Packets Received");
return;
}
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll",
CTLFLAG_RD, &stats->ecol,
"Excessive collisions");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll",
CTLFLAG_RD, &stats->scc,
"Single collisions");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
CTLFLAG_RD, &stats->mcc,
"Multiple collisions");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll",
CTLFLAG_RD, &stats->latecol,
"Late collisions");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count",
CTLFLAG_RD, &stats->colc,
"Collision Count");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
CTLFLAG_RD, &stats->symerrs,
"Symbol Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
CTLFLAG_RD, &stats->sec,
"Sequence Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
CTLFLAG_RD, &stats->dc,
"Defer Count");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
CTLFLAG_RD, &stats->mpc,
"Missed Packets");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
CTLFLAG_RD, &stats->rnbc,
"Receive No Buffers");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
CTLFLAG_RD, &stats->ruc,
"Receive Undersize");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
CTLFLAG_RD, &stats->rfc,
"Fragmented Packets Received ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
CTLFLAG_RD, &stats->roc,
"Oversized Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
CTLFLAG_RD, &stats->rjc,
"Recevied Jabber");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
CTLFLAG_RD, &stats->rxerrc,
"Receive Errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
CTLFLAG_RD, &stats->crcerrs,
"CRC errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
CTLFLAG_RD, &stats->algnerrc,
"Alignment Errors");
/* On 82575 these are collision counts */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
CTLFLAG_RD, &stats->cexterr,
"Collision/Carrier extension errors");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
CTLFLAG_RD, &stats->xonrxc,
"XON Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
CTLFLAG_RD, &stats->xontxc,
"XON Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
CTLFLAG_RD, &stats->xoffrxc,
"XOFF Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
CTLFLAG_RD, &stats->xofftxc,
"XOFF Transmitted");
/* Packet Reception Stats */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
CTLFLAG_RD, &stats->tpr,
"Total Packets Received ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
CTLFLAG_RD, &stats->gprc,
"Good Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
CTLFLAG_RD, &stats->bprc,
"Broadcast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
CTLFLAG_RD, &stats->mprc,
"Multicast Packets Received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
CTLFLAG_RD, &stats->prc64,
"64 byte frames received ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
CTLFLAG_RD, &stats->prc127,
"65-127 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
CTLFLAG_RD, &stats->prc255,
"128-255 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
CTLFLAG_RD, &stats->prc511,
"256-511 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
CTLFLAG_RD, &stats->prc1023,
"512-1023 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
CTLFLAG_RD, &stats->prc1522,
"1023-1522 byte frames received");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
CTLFLAG_RD, &stats->gorc,
"Good Octets Received");
/* Packet Transmission Stats */
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
CTLFLAG_RD, &stats->gotc,
"Good Octets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
CTLFLAG_RD, &stats->tpt,
"Total Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
CTLFLAG_RD, &stats->gptc,
"Good Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
CTLFLAG_RD, &stats->bptc,
"Broadcast Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
CTLFLAG_RD, &stats->mptc,
"Multicast Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
CTLFLAG_RD, &stats->ptc64,
"64 byte frames transmitted ");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
CTLFLAG_RD, &stats->ptc127,
"65-127 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
CTLFLAG_RD, &stats->ptc255,
"128-255 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
CTLFLAG_RD, &stats->ptc511,
"256-511 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
CTLFLAG_RD, &stats->ptc1023,
"512-1023 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
CTLFLAG_RD, &stats->ptc1522,
"1024-1522 byte frames transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
CTLFLAG_RD, &stats->tsctc,
"TSO Contexts Transmitted");
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
CTLFLAG_RD, &stats->tsctfc,
"TSO Contexts Failed");
@@ -5351,39 +5395,39 @@ igb_add_hw_stats(struct adapter *adapter)
CTLFLAG_RD, NULL, "Interrupt Statistics");
int_list = SYSCTL_CHILDREN(int_node);
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts",
CTLFLAG_RD, &stats->iac,
"Interrupt Assertion Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
CTLFLAG_RD, &stats->icrxptc,
"Interrupt Cause Rx Pkt Timer Expire Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
CTLFLAG_RD, &stats->icrxatc,
"Interrupt Cause Rx Abs Timer Expire Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
CTLFLAG_RD, &stats->ictxptc,
"Interrupt Cause Tx Pkt Timer Expire Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
CTLFLAG_RD, &stats->ictxatc,
"Interrupt Cause Tx Abs Timer Expire Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
CTLFLAG_RD, &stats->ictxqec,
"Interrupt Cause Tx Queue Empty Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
CTLFLAG_RD, &stats->ictxqmtc,
"Interrupt Cause Tx Queue Min Thresh Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
CTLFLAG_RD, &stats->icrxdmtc,
"Interrupt Cause Rx Desc Min Thresh Count");
- SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
+ SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun",
CTLFLAG_RD, &stats->icrxoc,
"Interrupt Cause Receiver Overrun Count");
@@ -5395,51 +5439,51 @@ igb_add_hw_stats(struct adapter *adapter)
host_list = SYSCTL_CHILDREN(host_node);
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt",
CTLFLAG_RD, &stats->cbtmpc,
"Circuit Breaker Tx Packet Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard",
CTLFLAG_RD, &stats->htdpmc,
"Host Transmit Discarded Packets");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "rx_pkt",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt",
CTLFLAG_RD, &stats->rpthc,
"Rx Packets To Host");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts",
CTLFLAG_RD, &stats->cbrmpc,
"Circuit Breaker Rx Packet Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop",
CTLFLAG_RD, &stats->cbrdpc,
"Circuit Breaker Rx Dropped Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "tx_good_pkt",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt",
CTLFLAG_RD, &stats->hgptc,
"Host Good Packets Tx Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop",
CTLFLAG_RD, &stats->htcbdpc,
"Host Tx Circuit Breaker Dropped Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "rx_good_bytes",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes",
CTLFLAG_RD, &stats->hgorc,
"Host Good Octets Received Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "tx_good_bytes",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes",
CTLFLAG_RD, &stats->hgotc,
"Host Good Octets Transmit Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "length_errors",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors",
CTLFLAG_RD, &stats->lenerrs,
"Length Errors");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt",
CTLFLAG_RD, &stats->scvpc,
"SerDes/SGMII Code Violation Pkt Count");
- SYSCTL_ADD_UQUAD(ctx, host_list, OID_AUTO, "header_redir_missed",
+ SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed",
CTLFLAG_RD, &stats->hrmpc,
"Header Redirection Missed Packet Count");
}
@@ -5499,7 +5543,7 @@ igb_print_nvm_info(struct adapter *adapter)
}
static void
-igb_add_rx_process_limit(struct adapter *adapter, const char *name,
+igb_set_sysctl_value(struct adapter *adapter, const char *name,
const char *description, int *limit, int value)
{
*limit = value;
@@ -5507,3 +5551,4 @@ igb_add_rx_process_limit(struct adapter *adapter, const char *name,
SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
+
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index d9f1cc3..b5a74a0 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -322,7 +322,6 @@ struct rx_ring {
bool lro_enabled;
bool hdr_split;
bool discard;
- bool needs_refresh;
struct mtx rx_mtx;
char mtx_name[16];
u32 next_to_refresh;
@@ -401,6 +400,8 @@ struct adapter {
u16 link_speed;
u16 link_duplex;
u32 smartspeed;
+ u32 fc_setting;
+ u32 dma_coalesce;
/* Interface queues */
struct igb_queue *queues;
diff --git a/sys/dev/e1000/if_lem.h b/sys/dev/e1000/if_lem.h
index e866d07e..f37a42e 100644
--- a/sys/dev/e1000/if_lem.h
+++ b/sys/dev/e1000/if_lem.h
@@ -217,6 +217,10 @@
#define EM_BAR_MEM_TYPE_64BIT 0x00000004
#define EM_MSIX_BAR 3 /* On 82575 */
+#if !defined(SYSTCL_ADD_UQUAD)
+#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
+#endif
+
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
OpenPOWER on IntegriCloud