summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--share/man/man4/em.412
-rw-r--r--sys/dev/e1000/e1000_80003es2lan.c2
-rw-r--r--sys/dev/e1000/e1000_80003es2lan.h2
-rw-r--r--sys/dev/e1000/e1000_82540.c2
-rw-r--r--sys/dev/e1000/e1000_82541.c292
-rw-r--r--sys/dev/e1000/e1000_82541.h76
-rw-r--r--sys/dev/e1000/e1000_82542.c40
-rw-r--r--sys/dev/e1000/e1000_82543.c149
-rw-r--r--sys/dev/e1000/e1000_82543.h28
-rw-r--r--sys/dev/e1000/e1000_82571.c2
-rw-r--r--sys/dev/e1000/e1000_82571.h7
-rw-r--r--sys/dev/e1000/e1000_82575.c6
-rw-r--r--sys/dev/e1000/e1000_82575.h2
-rw-r--r--sys/dev/e1000/e1000_api.c2
-rw-r--r--sys/dev/e1000/e1000_api.h18
-rw-r--r--sys/dev/e1000/e1000_defines.h2
-rw-r--r--sys/dev/e1000/e1000_hw.h4
-rw-r--r--sys/dev/e1000/e1000_i210.c2
-rw-r--r--sys/dev/e1000/e1000_i210.h2
-rw-r--r--sys/dev/e1000/e1000_ich8lan.c24
-rw-r--r--sys/dev/e1000/e1000_ich8lan.h5
-rw-r--r--sys/dev/e1000/e1000_mac.c2
-rw-r--r--sys/dev/e1000/e1000_mac.h2
-rw-r--r--sys/dev/e1000/e1000_manage.c3
-rw-r--r--sys/dev/e1000/e1000_manage.h2
-rw-r--r--sys/dev/e1000/e1000_mbx.c2
-rw-r--r--sys/dev/e1000/e1000_mbx.h2
-rw-r--r--sys/dev/e1000/e1000_nvm.c2
-rw-r--r--sys/dev/e1000/e1000_nvm.h2
-rw-r--r--sys/dev/e1000/e1000_osdep.c2
-rw-r--r--sys/dev/e1000/e1000_osdep.h2
-rw-r--r--sys/dev/e1000/e1000_phy.c13
-rw-r--r--sys/dev/e1000/e1000_phy.h2
-rw-r--r--sys/dev/e1000/e1000_regs.h6
-rw-r--r--sys/dev/e1000/e1000_vf.c2
-rw-r--r--sys/dev/e1000/e1000_vf.h2
-rw-r--r--sys/dev/e1000/if_em.c476
-rw-r--r--sys/dev/e1000/if_em.h29
-rw-r--r--sys/dev/e1000/if_igb.c25
-rw-r--r--sys/dev/e1000/if_igb.h19
-rw-r--r--sys/dev/e1000/if_lem.c26
-rw-r--r--sys/dev/e1000/if_lem.h48
-rw-r--r--sys/dev/ixgb/if_ixgb.c1
-rw-r--r--sys/dev/netmap/if_em_netmap.h16
44 files changed, 710 insertions, 655 deletions
diff --git a/share/man/man4/em.4 b/share/man/man4/em.4
index a06261c..7901331 100644
--- a/share/man/man4/em.4
+++ b/share/man/man4/em.4
@@ -31,7 +31,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd October 11, 2011
+.Dd August 16, 2015
.Dt EM 4
.Os
.Sh NAME
@@ -205,6 +205,11 @@ Tunables can be set at the
prompt before booting the kernel or stored in
.Xr loader.conf 5 .
.Bl -tag -width indent
+.It Va hw.em.disable_crc_stripping
+Disable or enable hardware stripping of CRC field.
+This is mostly useful on BMC/IPMI shared interfaces where stripping the CRC
+causes remote access over IPMI to fail.
+Default 0 (enabled).
.It Va hw.em.eee_setting
Disable or enable Energy Efficient Ethernet.
Default 1 (disabled).
@@ -307,10 +312,5 @@ The
driver was written by
.An Intel Corporation Aq freebsd@intel.com .
.Sh BUGS
-Hardware-assisted VLAN processing is disabled by default.
-You can enable it on an
-.Nm
-interface using
-.Xr ifconfig 8 .
.Pp
Activating EM_MULTIQUEUE support requires MSI-X features.
diff --git a/sys/dev/e1000/e1000_80003es2lan.c b/sys/dev/e1000/e1000_80003es2lan.c
index 076e02b..b948bb4 100644
--- a/sys/dev/e1000/e1000_80003es2lan.c
+++ b/sys/dev/e1000/e1000_80003es2lan.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_80003es2lan.h b/sys/dev/e1000/e1000_80003es2lan.h
index 3807e46..89b1551 100644
--- a/sys/dev/e1000/e1000_80003es2lan.h
+++ b/sys/dev/e1000/e1000_80003es2lan.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_82540.c b/sys/dev/e1000/e1000_82540.c
index 141b92e..68f92c6 100644
--- a/sys/dev/e1000/e1000_82540.c
+++ b/sys/dev/e1000/e1000_82540.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_82541.c b/sys/dev/e1000/e1000_82541.c
index 781aa93..5961555 100644
--- a/sys/dev/e1000/e1000_82541.c
+++ b/sys/dev/e1000/e1000_82541.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -49,34 +49,34 @@ static s32 e1000_init_mac_params_82541(struct e1000_hw *hw);
static s32 e1000_reset_hw_82541(struct e1000_hw *hw);
static s32 e1000_init_hw_82541(struct e1000_hw *hw);
static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
- u16 *duplex);
+ u16 *duplex);
static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw);
static s32 e1000_check_for_link_82541(struct e1000_hw *hw);
static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw);
static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw,
- bool active);
+ bool active);
static s32 e1000_setup_led_82541(struct e1000_hw *hw);
static s32 e1000_cleanup_led_82541(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82541(struct e1000_hw *hw);
static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
- bool link_up);
+ bool link_up);
static s32 e1000_phy_init_script_82541(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw);
-static const u16 e1000_igp_cable_length_table[] =
- { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
- 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
- 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
- 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
- 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
- 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+static const u16 e1000_igp_cable_length_table[] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10,
+ 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60,
+ 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80,
+ 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120,
+ 120, 120, 120, 120, 120, 120, 120, 120};
#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_cable_length_table) / \
- sizeof(e1000_igp_cable_length_table[0]))
+ (sizeof(e1000_igp_cable_length_table) / \
+ sizeof(e1000_igp_cable_length_table[0]))
/**
* e1000_init_phy_params_82541 - Init PHY func ptrs.
@@ -89,23 +89,23 @@ static s32 e1000_init_phy_params_82541(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_phy_params_82541");
- phy->addr = 1;
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 10000;
- phy->type = e1000_phy_igp;
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_igp;
/* Function Pointers */
- phy->ops.check_polarity = e1000_check_polarity_igp;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
- phy->ops.get_cable_length = e1000_get_cable_length_igp_82541;
- phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
- phy->ops.get_info = e1000_get_phy_info_igp;
- phy->ops.read_reg = e1000_read_phy_reg_igp;
- phy->ops.reset = e1000_phy_hw_reset_82541;
- phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541;
- phy->ops.write_reg = e1000_write_phy_reg_igp;
- phy->ops.power_up = e1000_power_up_phy_copper;
- phy->ops.power_down = e1000_power_down_phy_copper_82541;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_82541;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.reset = e1000_phy_hw_reset_82541;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82541;
ret_val = e1000_get_phy_id(hw);
if (ret_val)
@@ -127,8 +127,8 @@ out:
**/
static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
{
- struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val = E1000_SUCCESS;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = E1000_SUCCESS;
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
u16 size;
@@ -152,28 +152,25 @@ static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
eecd &= ~E1000_EECD_SIZE;
break;
default:
- nvm->type = eecd & E1000_EECD_TYPE
- ? e1000_nvm_eeprom_spi
- : e1000_nvm_eeprom_microwire;
+ nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi
+ : e1000_nvm_eeprom_microwire;
break;
}
if (nvm->type == e1000_nvm_eeprom_spi) {
- nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS)
- ? 16 : 8;
- nvm->delay_usec = 1;
- nvm->opcode_bits = 8;
- nvm->page_size = (eecd & E1000_EECD_ADDR_BITS)
- ? 32 : 8;
+ nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8;
+ nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8;
/* Function Pointers */
- nvm->ops.acquire = e1000_acquire_nvm_generic;
- nvm->ops.read = e1000_read_nvm_spi;
- nvm->ops.release = e1000_release_nvm_generic;
- nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_spi;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
nvm->ops.valid_led_default = e1000_valid_led_default_generic;
- nvm->ops.validate = e1000_validate_nvm_checksum_generic;
- nvm->ops.write = e1000_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_spi;
/*
* nvm->word_size must be discovered after the pointers
@@ -196,21 +193,19 @@ static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
nvm->word_size = 1 << size;
}
} else {
- nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS)
- ? 8 : 6;
- nvm->delay_usec = 50;
- nvm->opcode_bits = 3;
- nvm->word_size = (eecd & E1000_EECD_ADDR_BITS)
- ? 256 : 64;
+ nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64;
/* Function Pointers */
- nvm->ops.acquire = e1000_acquire_nvm_generic;
- nvm->ops.read = e1000_read_nvm_microwire;
- nvm->ops.release = e1000_release_nvm_generic;
- nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
nvm->ops.valid_led_default = e1000_valid_led_default_generic;
- nvm->ops.validate = e1000_validate_nvm_checksum_generic;
- nvm->ops.write = e1000_write_nvm_microwire;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
}
out:
@@ -390,11 +385,10 @@ static s32 e1000_init_hw_82541(struct e1000_hw *hw)
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
}
-
+
/* Storing the Speed Power Down value for later use */
- ret_val = hw->phy.ops.read_reg(hw,
- IGP01E1000_GMII_FIFO,
- &dev_spec->spd_default);
+ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
if (ret_val)
goto out;
@@ -423,7 +417,7 @@ static s32 e1000_init_hw_82541(struct e1000_hw *hw)
txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
+ E1000_TXDCTL_FULL_TX_DESC_WB;
E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
/*
@@ -447,7 +441,7 @@ out:
* Retrieve the current speed and duplex configuration.
**/
static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+ u16 *duplex)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
@@ -549,6 +543,7 @@ static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
/* Earlier revs of the IGP phy require us to force MDI. */
if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) {
dev_spec->dsp_config = e1000_dsp_config_disabled;
@@ -651,9 +646,8 @@ static s32 e1000_check_for_link_82541(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000_config_fc_after_link_up_generic(hw);
- if (ret_val) {
+ if (ret_val)
DEBUGOUT("Error configuring flow control\n");
- }
out:
return ret_val;
@@ -671,7 +665,7 @@ out:
* gigabit link is achieved to improve link quality.
**/
static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
- bool link_up)
+ bool link_up)
{
struct e1000_phy_info *phy = &hw->phy;
struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
@@ -679,11 +673,11 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
u32 idle_errs = 0;
u16 phy_data, phy_saved_data, speed, duplex, i;
u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
- u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- {IGP01E1000_PHY_AGC_PARAM_A,
- IGP01E1000_PHY_AGC_PARAM_B,
- IGP01E1000_PHY_AGC_PARAM_C,
- IGP01E1000_PHY_AGC_PARAM_D};
+ u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D};
DEBUGFUNC("e1000_config_dsp_after_link_change_82541");
@@ -708,16 +702,16 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
ret_val = phy->ops.read_reg(hw,
- dsp_reg_array[i],
- &phy_data);
+ dsp_reg_array[i],
+ &phy_data);
if (ret_val)
goto out;
phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
ret_val = phy->ops.write_reg(hw,
- dsp_reg_array[i],
- phy_data);
+ dsp_reg_array[i],
+ phy_data);
if (ret_val)
goto out;
}
@@ -737,9 +731,8 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
for (i = 0; i < ffe_idle_err_timeout; i++) {
usec_delay(1000);
- ret_val = phy->ops.read_reg(hw,
- PHY_1000T_STATUS,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
if (ret_val)
goto out;
@@ -748,8 +741,8 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
dev_spec->ffe_config = e1000_ffe_config_active;
ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_DSP_FFE,
- IGP01E1000_PHY_DSP_FFE_CM_CP);
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
if (ret_val)
goto out;
break;
@@ -757,7 +750,7 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
if (idle_errs)
ffe_idle_err_timeout =
- FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
}
} else {
if (dev_spec->dsp_config == e1000_dsp_config_activated) {
@@ -765,9 +758,8 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
* Save off the current value of register 0x2F5B
* to be restored at the end of the routines.
*/
- ret_val = phy->ops.read_reg(hw,
- 0x2F5B,
- &phy_saved_data);
+ ret_val = phy->ops.read_reg(hw, 0x2F5B,
+ &phy_saved_data);
if (ret_val)
goto out;
@@ -778,15 +770,14 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
msec_delay_irq(20);
- ret_val = phy->ops.write_reg(hw,
- 0x0000,
- IGP01E1000_IEEE_FORCE_GIG);
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIG);
if (ret_val)
goto out;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
ret_val = phy->ops.read_reg(hw,
- dsp_reg_array[i],
- &phy_data);
+ dsp_reg_array[i],
+ &phy_data);
if (ret_val)
goto out;
@@ -794,24 +785,22 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
ret_val = phy->ops.write_reg(hw,
- dsp_reg_array[i],
- phy_data);
+ dsp_reg_array[i],
+ phy_data);
if (ret_val)
goto out;
}
- ret_val = phy->ops.write_reg(hw,
- 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
goto out;
msec_delay_irq(20);
/* Now enable the transmitter */
- ret_val = phy->ops.write_reg(hw,
- 0x2F5B,
- phy_saved_data);
+ ret_val = phy->ops.write_reg(hw, 0x2F5B,
+ phy_saved_data);
if (ret_val)
goto out;
@@ -838,21 +827,18 @@ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
msec_delay_irq(20);
- ret_val = phy->ops.write_reg(hw,
- 0x0000,
- IGP01E1000_IEEE_FORCE_GIG);
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIG);
if (ret_val)
goto out;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_DSP_FFE,
- IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
if (ret_val)
goto out;
- ret_val = phy->ops.write_reg(hw,
- 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
goto out;
@@ -889,11 +875,10 @@ static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw)
u16 i, data;
u16 cur_agc_value, agc_value = 0;
u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
- u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- {IGP01E1000_PHY_AGC_A,
- IGP01E1000_PHY_AGC_B,
- IGP01E1000_PHY_AGC_C,
- IGP01E1000_PHY_AGC_D};
+ u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D};
DEBUGFUNC("e1000_get_cable_length_igp_82541");
@@ -929,12 +914,12 @@ static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw)
}
phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] >
- IGP01E1000_AGC_RANGE)
- ? (e1000_igp_cable_length_table[agc_value] -
- IGP01E1000_AGC_RANGE)
- : 0;
+ IGP01E1000_AGC_RANGE)
+ ? (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE)
+ : 0;
phy->max_cable_length = e1000_igp_cable_length_table[agc_value] +
- IGP01E1000_AGC_RANGE;
+ IGP01E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -992,50 +977,48 @@ static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active)
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
goto out;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
if (ret_val)
goto out;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
if (ret_val)
goto out;
}
} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
- (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
- (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
data |= IGP01E1000_GMII_FLEX_SPD;
ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data);
if (ret_val)
goto out;
/* When LPLU is enabled, we should disable SmartSpeed */
- ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- data);
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
}
out:
@@ -1056,16 +1039,14 @@ static s32 e1000_setup_led_82541(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_led_82541");
- ret_val = hw->phy.ops.read_reg(hw,
- IGP01E1000_GMII_FIFO,
- &dev_spec->spd_default);
+ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.write_reg(hw,
- IGP01E1000_GMII_FIFO,
- (u16)(dev_spec->spd_default &
- ~IGP01E1000_GMII_SPD));
+ ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16)(dev_spec->spd_default &
+ ~IGP01E1000_GMII_SPD));
if (ret_val)
goto out;
@@ -1089,9 +1070,8 @@ static s32 e1000_cleanup_led_82541(struct e1000_hw *hw)
DEBUGFUNC("e1000_cleanup_led_82541");
- ret_val = hw->phy.ops.write_reg(hw,
- IGP01E1000_GMII_FIFO,
- dev_spec->spd_default);
+ ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO,
+ dev_spec->spd_default);
if (ret_val)
goto out;
@@ -1178,14 +1158,12 @@ static s32 e1000_phy_init_script_82541(struct e1000_hw *hw)
u16 fused, fine, coarse;
/* Move to analog registers page */
- hw->phy.ops.read_reg(hw,
- IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
- &fused);
+ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+ &fused);
if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
- hw->phy.ops.read_reg(hw,
- IGP01E1000_ANALOG_FUSE_STATUS,
- &fused);
+ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS,
+ &fused);
fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
@@ -1194,19 +1172,19 @@ static s32 e1000_phy_init_script_82541(struct e1000_hw *hw)
coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
} else if (coarse ==
- IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
- (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
- (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
hw->phy.ops.write_reg(hw,
- IGP01E1000_ANALOG_FUSE_CONTROL,
- fused);
+ IGP01E1000_ANALOG_FUSE_CONTROL,
+ fused);
hw->phy.ops.write_reg(hw,
- IGP01E1000_ANALOG_FUSE_BYPASS,
- IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
}
}
diff --git a/sys/dev/e1000/e1000_82541.h b/sys/dev/e1000/e1000_82541.h
index 3b6b961..7a01fd4 100644
--- a/sys/dev/e1000/e1000_82541.h
+++ b/sys/dev/e1000/e1000_82541.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,55 +37,55 @@
#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1)
-#define IGP01E1000_PHY_CHANNEL_NUM 4
+#define IGP01E1000_PHY_CHANNEL_NUM 4
-#define IGP01E1000_PHY_AGC_A 0x1172
-#define IGP01E1000_PHY_AGC_B 0x1272
-#define IGP01E1000_PHY_AGC_C 0x1472
-#define IGP01E1000_PHY_AGC_D 0x1872
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
-#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
-#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
-#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
-#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
-#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
-#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
-#define IGP01E1000_PHY_DSP_RESET 0x1F33
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
-#define IGP01E1000_PHY_DSP_FFE 0x1F35
-#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
-#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
-#define IGP01E1000_IEEE_FORCE_GIG 0x0140
-#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIG 0x0140
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
-#define IGP01E1000_AGC_LENGTH_SHIFT 7
-#define IGP01E1000_AGC_RANGE 10
+#define IGP01E1000_AGC_LENGTH_SHIFT 7
+#define IGP01E1000_AGC_RANGE 10
-#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
-#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
-#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
-#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
-#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
-#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
-#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
-#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
-#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
-#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
-#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
-#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
-#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
-#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
-#define IGP01E1000_MSE_CHANNEL_D 0x000F
-#define IGP01E1000_MSE_CHANNEL_C 0x00F0
-#define IGP01E1000_MSE_CHANNEL_B 0x0F00
-#define IGP01E1000_MSE_CHANNEL_A 0xF000
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
void e1000_init_script_state_82541(struct e1000_hw *hw, bool state);
diff --git a/sys/dev/e1000/e1000_82542.c b/sys/dev/e1000/e1000_82542.c
index 19d5402..b2d676e 100644
--- a/sys/dev/e1000/e1000_82542.c
+++ b/sys/dev/e1000/e1000_82542.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -62,7 +62,7 @@ static s32 e1000_init_phy_params_82542(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_phy_params_82542");
- phy->type = e1000_phy_none;
+ phy->type = e1000_phy_none;
return ret_val;
}
@@ -77,18 +77,18 @@ static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_nvm_params_82542");
- nvm->address_bits = 6;
- nvm->delay_usec = 50;
- nvm->opcode_bits = 3;
- nvm->type = e1000_nvm_eeprom_microwire;
- nvm->word_size = 64;
+ nvm->address_bits = 6;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->word_size = 64;
/* Function Pointers */
- nvm->ops.read = e1000_read_nvm_microwire;
- nvm->ops.release = e1000_stop_nvm;
- nvm->ops.write = e1000_write_nvm_microwire;
- nvm->ops.update = e1000_update_nvm_checksum_generic;
- nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_stop_nvm;
+ nvm->ops.write = e1000_write_nvm_microwire;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
return E1000_SUCCESS;
}
@@ -124,7 +124,8 @@ static s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
/* link setup */
mac->ops.setup_link = e1000_setup_link_82542;
/* phy/fiber/serdes setup */
- mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_generic;
/* check for link */
mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
/* multicast address update */
@@ -143,7 +144,8 @@ static s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
/* clear hardware counters */
mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542;
/* link info */
- mac->ops.get_link_up_info = e1000_get_speed_and_duplex_fiber_serdes_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
return E1000_SUCCESS;
}
@@ -325,7 +327,7 @@ static s32 e1000_setup_link_82542(struct e1000_hw *hw)
hw->fc.requested_mode &= ~e1000_fc_tx_pause;
- if (mac->report_tx_early == 1)
+ if (mac->report_tx_early)
hw->fc.requested_mode &= ~e1000_fc_rx_pause;
/*
@@ -335,7 +337,7 @@ static s32 e1000_setup_link_82542(struct e1000_hw *hw)
hw->fc.current_mode = hw->fc.requested_mode;
DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
- hw->fc.current_mode);
+ hw->fc.current_mode);
/* Call the necessary subroutine to configure the link. */
ret_val = mac->ops.setup_physical_interface(hw);
@@ -419,9 +421,8 @@ static int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
- rar_low = ((u32) addr[0] |
- ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
@@ -431,6 +432,7 @@ static int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+
return E1000_SUCCESS;
}
diff --git a/sys/dev/e1000/e1000_82543.c b/sys/dev/e1000/e1000_82543.c
index 1c01658..b9a53bd 100644
--- a/sys/dev/e1000/e1000_82543.c
+++ b/sys/dev/e1000/e1000_82543.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,9 +47,9 @@ static s32 e1000_init_phy_params_82543(struct e1000_hw *hw);
static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw);
static s32 e1000_init_mac_params_82543(struct e1000_hw *hw);
static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset,
- u16 *data);
+ u16 *data);
static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset,
- u16 data);
+ u16 data);
static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw);
static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw);
static s32 e1000_reset_hw_82543(struct e1000_hw *hw);
@@ -62,7 +62,7 @@ static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw);
static s32 e1000_led_on_82543(struct e1000_hw *hw);
static s32 e1000_led_off_82543(struct e1000_hw *hw);
static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
- u32 value);
+ u32 value);
static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
@@ -71,7 +71,7 @@ static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw);
static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw);
static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
- u16 count);
+ u16 count);
static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw);
static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state);
static s32 e1000_read_mac_addr_82543(struct e1000_hw *hw);
@@ -89,34 +89,34 @@ static s32 e1000_init_phy_params_82543(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_phy_params_82543");
if (hw->phy.media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
+ phy->type = e1000_phy_none;
goto out;
} else {
- phy->ops.power_up = e1000_power_up_phy_copper;
- phy->ops.power_down = e1000_power_down_phy_copper;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper;
}
- phy->addr = 1;
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 10000;
- phy->type = e1000_phy_m88;
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_m88;
/* Function Pointers */
- phy->ops.check_polarity = e1000_check_polarity_m88;
- phy->ops.commit = e1000_phy_sw_reset_generic;
- phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543;
- phy->ops.get_cable_length = e1000_get_cable_length_m88;
- phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
- phy->ops.read_reg = (hw->mac.type == e1000_82543)
- ? e1000_read_phy_reg_82543
- : e1000_read_phy_reg_m88;
- phy->ops.reset = (hw->mac.type == e1000_82543)
- ? e1000_phy_hw_reset_82543
- : e1000_phy_hw_reset_generic;
- phy->ops.write_reg = (hw->mac.type == e1000_82543)
- ? e1000_write_phy_reg_82543
- : e1000_write_phy_reg_m88;
- phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.read_reg = (hw->mac.type == e1000_82543)
+ ? e1000_read_phy_reg_82543
+ : e1000_read_phy_reg_m88;
+ phy->ops.reset = (hw->mac.type == e1000_82543)
+ ? e1000_phy_hw_reset_82543
+ : e1000_phy_hw_reset_generic;
+ phy->ops.write_reg = (hw->mac.type == e1000_82543)
+ ? e1000_write_phy_reg_82543
+ : e1000_write_phy_reg_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
/*
* The external PHY of the 82543 can be in a funky state.
@@ -170,18 +170,18 @@ static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_nvm_params_82543");
- nvm->type = e1000_nvm_eeprom_microwire;
- nvm->word_size = 64;
- nvm->delay_usec = 50;
- nvm->address_bits = 6;
- nvm->opcode_bits = 3;
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->word_size = 64;
+ nvm->delay_usec = 50;
+ nvm->address_bits = 6;
+ nvm->opcode_bits = 3;
/* Function Pointers */
- nvm->ops.read = e1000_read_nvm_microwire;
- nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
nvm->ops.valid_led_default = e1000_valid_led_default_generic;
- nvm->ops.validate = e1000_validate_nvm_checksum_generic;
- nvm->ops.write = e1000_write_nvm_microwire;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
return E1000_SUCCESS;
}
@@ -226,19 +226,18 @@ static s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
mac->ops.setup_link = e1000_setup_link_82543;
/* physical interface setup */
mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_setup_copper_link_82543
- : e1000_setup_fiber_link_82543;
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82543 : e1000_setup_fiber_link_82543;
/* check for link */
mac->ops.check_for_link =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_check_for_copper_link_82543
- : e1000_check_for_fiber_link_82543;
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_check_for_copper_link_82543
+ : e1000_check_for_fiber_link_82543;
/* link info */
mac->ops.get_link_up_info =
- (hw->phy.media_type == e1000_media_type_copper)
- ? e1000_get_speed_and_duplex_copper_generic
- : e1000_get_speed_and_duplex_fiber_serdes_generic;
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_get_speed_and_duplex_copper_generic
+ : e1000_get_speed_and_duplex_fiber_serdes_generic;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* writing VFTA */
@@ -295,8 +294,7 @@ static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw)
goto out;
}
- state = (dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED)
- ? TRUE : FALSE;
+ state = !!(dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED);
out:
return state;
@@ -348,8 +346,7 @@ bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw)
goto out;
}
- state = (dev_spec->tbi_compatibility & TBI_SBP_ENABLED)
- ? TRUE : FALSE;
+ state = !!(dev_spec->tbi_compatibility & TBI_SBP_ENABLED);
out:
return state;
@@ -412,8 +409,8 @@ out:
* Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
**/
void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
- struct e1000_hw_stats *stats, u32 frame_len,
- u8 *mac_addr, u32 max_frame_size)
+ struct e1000_hw_stats *stats, u32 frame_len,
+ u8 *mac_addr, u32 max_frame_size)
{
if (!(e1000_tbi_sbp_enabled_82543(hw)))
goto out;
@@ -425,12 +422,12 @@ void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
- /* This packet should not be counted as a CRC error. */
+ /* This packet should not be counted as a CRC error. */
stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
+ /* This packet does count as a Good Packet Received. */
stats->gprc++;
- /* Adjust the Good Octets received counters */
+ /* Adjust the Good Octets received counters */
stats->gorc += frame_len;
/*
@@ -446,7 +443,7 @@ void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
stats->mprc++;
/*
- * In this case, the hardware has overcounted the number of
+ * In this case, the hardware has over counted the number of
* oversize frames.
*/
if ((frame_len == max_frame_size) && (stats->roc > 0))
@@ -513,7 +510,7 @@ static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data)
* e1000_shift_out_mdi_bits routine five different times. The format
* of an MII read instruction consists of a shift out of 14 bits and
* is defined as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Offset>
+ * <Preamble><SOF><Op Code><Phy Addr><Offset>
* followed by a shift in of 18 bits. This first two bits shifted in
* are TurnAround bits used to avoid contention on the MDIO pin when a
* READ operation is performed. These two bits are thrown away
@@ -572,9 +569,9 @@ static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data)
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
*/
mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) |
- (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
mdic <<= 16;
- mdic |= (u32) data;
+ mdic |= (u32)data;
e1000_shift_out_mdi_bits_82543(hw, mdic, 32);
@@ -631,7 +628,7 @@ static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
* In order to do this, "data" must be broken down into bits.
**/
static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
- u16 count)
+ u16 count)
{
u32 ctrl, mask;
@@ -642,7 +639,7 @@ static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
* into bits.
*/
mask = 0x01;
- mask <<= (count -1);
+ mask <<= (count - 1);
ctrl = E1000_READ_REG(hw, E1000_CTRL);
@@ -656,8 +653,10 @@ static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
* A "0" is shifted out to the PHY by setting the MDIO bit to
* "0" and then raising and lowering the clock.
*/
- if (data & mask) ctrl |= E1000_CTRL_MDIO;
- else ctrl &= ~E1000_CTRL_MDIO;
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
E1000_WRITE_FLUSH(hw);
@@ -749,8 +748,8 @@ static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw)
if (ret_val)
goto out;
- if (!hw->mac.autoneg &&
- (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED))
+ if (!hw->mac.autoneg && (hw->mac.forced_speed_duplex &
+ E1000_ALL_10_SPEED))
ret_val = e1000_polarity_reversal_workaround_82543(hw);
out:
@@ -808,7 +807,7 @@ static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw)
if (ret_val)
goto out;
- if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+ if (!(mii_status_reg & ~MII_SR_LINK_STATUS))
break;
msec_delay_irq(100);
}
@@ -1040,7 +1039,7 @@ static s32 e1000_setup_link_82543(struct e1000_hw *hw)
goto out;
}
ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) <<
- NVM_SWDPIO_EXT_SHIFT);
+ NVM_SWDPIO_EXT_SHIFT);
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
}
@@ -1114,10 +1113,8 @@ static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw)
* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
- ret_val = e1000_phy_has_link_generic(hw,
- COPPER_LINK_UP_LIMIT,
- 10,
- &link);
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
if (ret_val)
goto out;
@@ -1177,11 +1174,10 @@ static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw)
* optics detect a signal. If we have a signal, then poll for a
* "Link-Up" indication.
*/
- if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+ if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1))
ret_val = e1000_poll_fiber_serdes_link_generic(hw);
- } else {
+ else
DEBUGOUT("No signal detected\n");
- }
out:
return ret_val;
@@ -1275,9 +1271,8 @@ static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000_config_fc_after_link_up_generic(hw);
- if (ret_val) {
+ if (ret_val)
DEBUGOUT("Error configuring flow control\n");
- }
/*
* At this point we know that we are on copper and we have
@@ -1359,8 +1354,8 @@ static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw)
if ((!(ctrl & E1000_CTRL_SWDPIN1)) &&
(!(status & E1000_STATUS_LU)) &&
(!(rxcw & E1000_RXCW_C))) {
- if (mac->autoneg_failed == 0) {
- mac->autoneg_failed = 1;
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = TRUE;
ret_val = 0;
goto out;
}
diff --git a/sys/dev/e1000/e1000_82543.h b/sys/dev/e1000/e1000_82543.h
index 60e5c15..e8edda5 100644
--- a/sys/dev/e1000/e1000_82543.h
+++ b/sys/dev/e1000/e1000_82543.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2008, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,23 +35,23 @@
#ifndef _E1000_82543_H_
#define _E1000_82543_H_
-#define PHY_PREAMBLE 0xFFFFFFFF
-#define PHY_PREAMBLE_SIZE 32
-#define PHY_SOF 0x1
-#define PHY_OP_READ 0x2
-#define PHY_OP_WRITE 0x1
-#define PHY_TURNAROUND 0x2
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_PREAMBLE_SIZE 32
+#define PHY_SOF 0x1
+#define PHY_OP_READ 0x2
+#define PHY_OP_WRITE 0x1
+#define PHY_TURNAROUND 0x2
-#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */
+#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */
/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
-#define TBI_SBP_ENABLED 0x2
-
+#define TBI_SBP_ENABLED 0x2
+
void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
- struct e1000_hw_stats *stats,
- u32 frame_len, u8 *mac_addr,
- u32 max_frame_size);
+ struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr,
+ u32 max_frame_size);
void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
- bool state);
+ bool state);
bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
#endif
diff --git a/sys/dev/e1000/e1000_82571.c b/sys/dev/e1000/e1000_82571.c
index e209d43..a64ef56 100644
--- a/sys/dev/e1000/e1000_82571.c
+++ b/sys/dev/e1000/e1000_82571.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_82571.h b/sys/dev/e1000/e1000_82571.h
index 41d5df0..1d7718e 100644
--- a/sys/dev/e1000/e1000_82571.h
+++ b/sys/dev/e1000/e1000_82571.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -50,10 +50,9 @@
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000
-#define E1000_IVAR_INT_ALLOC_VALID 0x8
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-/* Manageability Operation Mode mask */
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+#define E1000_RXCFGL 0x0B634 /* TimeSync Rx EtherType & Msg Type Reg - RW */
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index d79db67..10653f8 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -1051,7 +1051,7 @@ static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 i = 0, timeout = 200;
DEBUGFUNC("e1000_acquire_swfw_sync_82575");
@@ -2126,7 +2126,7 @@ static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
* e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
* @hw: pointer to the HW structure
*
- * After rx enable if managability is enabled then there is likely some
+ * After Rx enable, if manageability is enabled then there is likely some
* bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
diff --git a/sys/dev/e1000/e1000_82575.h b/sys/dev/e1000/e1000_82575.h
index 6569b98..503fdce 100644
--- a/sys/dev/e1000/e1000_82575.h
+++ b/sys/dev/e1000/e1000_82575.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index 374ffa6..5db22db 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_api.h b/sys/dev/e1000/e1000_api.h
index a2ffa16..074197b 100644
--- a/sys/dev/e1000/e1000_api.h
+++ b/sys/dev/e1000/e1000_api.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -124,14 +124,14 @@ u32 e1000_translate_register_82542(u32 reg);
* TBI_ACCEPT macro definition:
*
* This macro requires:
- * adapter = a pointer to struct e1000_hw
+ * a = a pointer to struct e1000_hw
* status = the 8 bit status field of the Rx descriptor with EOP set
- * error = the 8 bit error field of the Rx descriptor with EOP set
+ * errors = the 8 bit error field of the Rx descriptor with EOP set
* length = the sum of all the length fields of the Rx descriptors that
* make up the current frame
* last_byte = the last byte of the frame DMAed by the hardware
- * max_frame_length = the maximum frame length we want to accept.
- * min_frame_length = the minimum frame length we want to accept.
+ * min_frame_size = the minimum frame length we want to accept.
+ * max_frame_size = the maximum frame length we want to accept.
*
* This macro is a conditional that should be used in the interrupt
* handler's Rx processing routine when RxErrors have been detected.
@@ -157,10 +157,10 @@ u32 e1000_translate_register_82542(u32 reg);
(((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
((last_byte) == CARRIER_EXTENSION) && \
(((status) & E1000_RXD_STAT_VP) ? \
- (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
- ((length) <= (max_frame_size + 1))) : \
- (((length) > min_frame_size) && \
- ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \
+ ((length) <= ((max_frame_size) + 1))) : \
+ (((length) > (min_frame_size)) && \
+ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1)))))
#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index 5deada2..9472ca4 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index faf64a3..8ae4b20 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -787,7 +787,7 @@ struct e1000_mac_info {
u16 uta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
+#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
diff --git a/sys/dev/e1000/e1000_i210.c b/sys/dev/e1000/e1000_i210.c
index f12c13f..563f11a 100644
--- a/sys/dev/e1000/e1000_i210.c
+++ b/sys/dev/e1000/e1000_i210.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_i210.h b/sys/dev/e1000/e1000_i210.h
index 2a20ca1..f940915 100644
--- a/sys/dev/e1000/e1000_i210.h
+++ b/sys/dev/e1000/e1000_i210.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_ich8lan.c b/sys/dev/e1000/e1000_ich8lan.c
index 1e5d9dc..75fcade 100644
--- a/sys/dev/e1000/e1000_ich8lan.c
+++ b/sys/dev/e1000/e1000_ich8lan.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -681,6 +681,9 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+ u16 pci_cfg;
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
DEBUGFUNC("e1000_init_mac_params_ich8lan");
@@ -753,6 +756,11 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.update_mc_addr_list =
e1000_update_mc_addr_list_pch2lan;
case e1000_pchlan:
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+ /* save PCH revision_id */
+ e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
+ hw->revision_id = (u8)(pci_cfg &= 0x000F);
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
/* ID LED init */
@@ -1079,7 +1087,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
u16 speed, duplex, scale = 0;
u16 max_snoop, max_nosnoop;
u16 max_ltr_enc; /* max LTR latency encoded */
- s64 lat_ns; /* latency (ns) */
+ s64 lat_ns;
s64 value;
u32 rxa;
@@ -1111,8 +1119,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
lat_ns = 0;
else
lat_ns /= speed;
-
value = lat_ns;
+
while (value > E1000_LTRV_VALUE_MASK) {
scale++;
value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
@@ -1297,9 +1305,9 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
release:
hw->phy.ops.release(hw);
out:
- if (ret_val) {
+ if (ret_val)
DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
- } else
+ else
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
return ret_val;
@@ -1440,9 +1448,9 @@ release:
msec_delay(50);
}
out:
- if (ret_val) {
+ if (ret_val)
DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
- } else
+ else
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
return ret_val;
@@ -2989,7 +2997,6 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
u16 oem_reg;
DEBUGFUNC("e1000_set_lplu_state_pchlan");
-
ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
if (ret_val)
return ret_val;
@@ -4990,7 +4997,6 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
s32 ret_val;
DEBUGFUNC("e1000_resume_workarounds_pchlan");
-
if (hw->mac.type < e1000_pch2lan)
return;
diff --git a/sys/dev/e1000/e1000_ich8lan.h b/sys/dev/e1000/e1000_ich8lan.h
index 999e856..9cb79c0 100644
--- a/sys/dev/e1000/e1000_ich8lan.h
+++ b/sys/dev/e1000/e1000_ich8lan.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -302,6 +302,9 @@
#define E1000_SVCR_OFF_TIMER_SHIFT 16
#define E1000_SVT_OFF_HWM_MASK 0x0000001F
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+#define E1000_PCI_REVISION_ID_REG 0x08
+#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */
void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
diff --git a/sys/dev/e1000/e1000_mac.c b/sys/dev/e1000/e1000_mac.c
index b888b34..1c86307 100644
--- a/sys/dev/e1000/e1000_mac.c
+++ b/sys/dev/e1000/e1000_mac.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_mac.h b/sys/dev/e1000/e1000_mac.h
index 2c1bfe3..1daed9b 100644
--- a/sys/dev/e1000/e1000_mac.h
+++ b/sys/dev/e1000/e1000_mac.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_manage.c b/sys/dev/e1000/e1000_manage.c
index 8087e65..f8290a2 100644
--- a/sys/dev/e1000/e1000_manage.c
+++ b/sys/dev/e1000/e1000_manage.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,6 @@
/*$FreeBSD$*/
#include "e1000_api.h"
-
/**
* e1000_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
diff --git a/sys/dev/e1000/e1000_manage.h b/sys/dev/e1000/e1000_manage.h
index 51f17671..303e99e 100644
--- a/sys/dev/e1000/e1000_manage.h
+++ b/sys/dev/e1000/e1000_manage.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_mbx.c b/sys/dev/e1000/e1000_mbx.c
index 55477b2..d9fb9ac 100644
--- a/sys/dev/e1000/e1000_mbx.c
+++ b/sys/dev/e1000/e1000_mbx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_mbx.h b/sys/dev/e1000/e1000_mbx.h
index d2aea5c4..fadd849 100644
--- a/sys/dev/e1000/e1000_mbx.h
+++ b/sys/dev/e1000/e1000_mbx.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_nvm.c b/sys/dev/e1000/e1000_nvm.c
index f702f71..0a1a18d 100644
--- a/sys/dev/e1000/e1000_nvm.c
+++ b/sys/dev/e1000/e1000_nvm.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_nvm.h b/sys/dev/e1000/e1000_nvm.h
index 34077b2..31f2180 100644
--- a/sys/dev/e1000/e1000_nvm.h
+++ b/sys/dev/e1000/e1000_nvm.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_osdep.c b/sys/dev/e1000/e1000_osdep.c
index 75a7b79..2987cda 100644
--- a/sys/dev/e1000/e1000_osdep.c
+++ b/sys/dev/e1000/e1000_osdep.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_osdep.h b/sys/dev/e1000/e1000_osdep.h
index 1324110..fc46f48 100644
--- a/sys/dev/e1000/e1000_osdep.h
+++ b/sys/dev/e1000/e1000_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index f27889c..0c8ccd2 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -3122,7 +3122,7 @@ s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
- FALSE, FALSE);
+ FALSE, false);
goto release;
}
@@ -3286,7 +3286,7 @@ s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
- FALSE, FALSE);
+ FALSE, false);
goto release;
}
@@ -3433,6 +3433,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 phy_reg = 0;
DEBUGFUNC("e1000_access_phy_wakeup_reg_bm");
+
reg = BM_PHY_REG_NUM(offset);
page = BM_PHY_REG_PAGE(offset);
@@ -3544,7 +3545,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
if (ret_val)
return ret_val;
}
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -3598,7 +3598,7 @@ out:
**/
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
{
- return __e1000_read_phy_reg_hv(hw, offset, data, FALSE, FALSE);
+ return __e1000_read_phy_reg_hv(hw, offset, data, FALSE, false);
}
/**
@@ -3654,7 +3654,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (ret_val)
return ret_val;
}
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -3724,7 +3723,7 @@ out:
**/
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
{
- return __e1000_write_phy_reg_hv(hw, offset, data, FALSE, FALSE);
+ return __e1000_write_phy_reg_hv(hw, offset, data, FALSE, false);
}
/**
diff --git a/sys/dev/e1000/e1000_phy.h b/sys/dev/e1000/e1000_phy.h
index 0e5b2e6..d3d563f 100644
--- a/sys/dev/e1000/e1000_phy.h
+++ b/sys/dev/e1000/e1000_phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_regs.h b/sys/dev/e1000/e1000_regs.h
index 952a7dc..7c81a89 100644
--- a/sys/dev/e1000/e1000_regs.h
+++ b/sys/dev/e1000/e1000_regs.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -202,7 +202,7 @@
/* Queues fetch arbitration priority control register */
#define E1000_I210_TQAVARBCTRL 0x3574
/* Queues priority masks where _n and _p can be 0-3. */
-#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n)))
/* QAV Tx mode control registers where _n can be 0 or 1. */
#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
@@ -215,7 +215,7 @@
#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
-#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n)))
#define E1000_MMDAC 13 /* MMD Access Control */
#define E1000_MMDAAD 14 /* MMD Access Address/Data */
diff --git a/sys/dev/e1000/e1000_vf.c b/sys/dev/e1000/e1000_vf.c
index 2cabac9..4af985b 100644
--- a/sys/dev/e1000/e1000_vf.c
+++ b/sys/dev/e1000/e1000_vf.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/e1000_vf.h b/sys/dev/e1000/e1000_vf.h
index 2a780741..e6f834e 100644
--- a/sys/dev/e1000/e1000_vf.h
+++ b/sys/dev/e1000/e1000_vf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2014, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 9ed3f36..31a4ca8 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -258,7 +258,9 @@ static bool em_rxeof(struct rx_ring *, int, int *);
#ifndef __NO_STRICT_ALIGNMENT
static int em_fixup_rx(struct rx_ring *);
#endif
-static void em_receive_checksum(struct e1000_rx_desc *, struct mbuf *);
+static void em_setup_rxdesc(union e1000_rx_desc_extended *,
+ const struct em_rxbuffer *rxbuf);
+static void em_receive_checksum(uint32_t status, struct mbuf *);
static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int,
struct ip *, u32 *, u32 *);
static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *,
@@ -359,8 +361,14 @@ MODULE_DEPEND(em, ether, 1, 1, 1);
#define CSUM_TSO 0
#endif
+#define TSO_WORKAROUND 4
+
static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
+static int em_disable_crc_stripping = 0;
+SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
+ &em_disable_crc_stripping, 0, "Disable CRC Stripping");
+
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
@@ -632,7 +640,7 @@ em_attach(device_t dev)
} else
adapter->num_tx_desc = em_txd;
- if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
+ if (((em_rxd * sizeof(union e1000_rx_desc_extended)) % EM_DBA_ALIGN) != 0 ||
(em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) {
device_printf(dev, "Using %d RX descriptors instead of %d!\n",
EM_DEFAULT_RXD, em_rxd);
@@ -1873,20 +1881,21 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
struct adapter *adapter = txr->adapter;
bus_dma_segment_t segs[EM_MAX_SCATTER];
bus_dmamap_t map;
- struct em_buffer *tx_buffer, *tx_buffer_mapped;
+ struct em_txbuffer *tx_buffer, *tx_buffer_mapped;
struct e1000_tx_desc *ctxd = NULL;
struct mbuf *m_head;
struct ether_header *eh;
struct ip *ip = NULL;
struct tcphdr *tp = NULL;
- u32 txd_upper, txd_lower, txd_used, txd_saved;
+ u32 txd_upper = 0, txd_lower = 0;
int ip_off, poff;
int nsegs, i, j, first, last = 0;
- int error, do_tso, tso_desc = 0, remap = 1;
+ int error;
+ bool do_tso, tso_desc, remap = TRUE;
m_head = *m_headp;
- txd_upper = txd_lower = txd_used = txd_saved = 0;
- do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
+ do_tso = (m_head->m_pkthdr.csum_flags & CSUM_TSO);
+ tso_desc = FALSE;
ip_off = poff = 0;
/*
@@ -1922,74 +1931,82 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
* for IPv6 yet.
*/
ip_off = sizeof(struct ether_header);
- m_head = m_pullup(m_head, ip_off);
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
+ if (m_head->m_len < ip_off) {
+ m_head = m_pullup(m_head, ip_off);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
}
eh = mtod(m_head, struct ether_header *);
if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
ip_off = sizeof(struct ether_vlan_header);
- m_head = m_pullup(m_head, ip_off);
+ if (m_head->m_len < ip_off) {
+ m_head = m_pullup(m_head, ip_off);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ }
+ }
+ if (m_head->m_len < ip_off + sizeof(struct ip)) {
+ m_head = m_pullup(m_head, ip_off + sizeof(struct ip));
if (m_head == NULL) {
*m_headp = NULL;
return (ENOBUFS);
}
}
- m_head = m_pullup(m_head, ip_off + sizeof(struct ip));
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
- }
ip = (struct ip *)(mtod(m_head, char *) + ip_off);
poff = ip_off + (ip->ip_hl << 2);
- if (do_tso) {
- m_head = m_pullup(m_head, poff + sizeof(struct tcphdr));
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
+
+ if (do_tso || (m_head->m_pkthdr.csum_flags & CSUM_TCP)) {
+ if (m_head->m_len < poff + sizeof(struct tcphdr)) {
+ m_head = m_pullup(m_head, poff +
+ sizeof(struct tcphdr));
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
}
tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
/*
* TSO workaround:
* pull 4 more bytes of data into it.
*/
- m_head = m_pullup(m_head, poff + (tp->th_off << 2) + 4);
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
+ if (m_head->m_len < poff + (tp->th_off << 2)) {
+ m_head = m_pullup(m_head, poff +
+ (tp->th_off << 2) +
+ TSO_WORKAROUND);
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
}
ip = (struct ip *)(mtod(m_head, char *) + ip_off);
- ip->ip_len = 0;
- ip->ip_sum = 0;
- /*
- * The pseudo TCP checksum does not include TCP payload
- * length so driver should recompute the checksum here
- * what hardware expect to see. This is adherence of
- * Microsoft's Large Send specification.
- */
tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
- tp->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- } else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
- m_head = m_pullup(m_head, poff + sizeof(struct tcphdr));
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
+ if (do_tso) {
+ ip->ip_len = htons(m_head->m_pkthdr.tso_segsz +
+ (ip->ip_hl << 2) +
+ (tp->th_off << 2));
+ ip->ip_sum = 0;
+ /*
+ * The pseudo TCP checksum does not include TCP
+ * payload length so driver should recompute
+ * the checksum here what hardware expect to
+ * see. This is adherence of Microsoft's Large
+ * Send specification.
+ */
+ tp->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
}
- tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
- m_head = m_pullup(m_head, poff + (tp->th_off << 2));
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
- }
- ip = (struct ip *)(mtod(m_head, char *) + ip_off);
- tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
} else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
- m_head = m_pullup(m_head, poff + sizeof(struct udphdr));
- if (m_head == NULL) {
- *m_headp = NULL;
- return (ENOBUFS);
+ if (m_head->m_len < poff + sizeof(struct udphdr)) {
+ m_head = m_pullup(m_head, poff +
+ sizeof(struct udphdr));
+ if (m_head == NULL) {
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
}
ip = (struct ip *)(mtod(m_head, char *) + ip_off);
}
@@ -2025,9 +2042,9 @@ retry:
if (error == EFBIG && remap) {
struct mbuf *m;
- m = m_defrag(*m_headp, M_NOWAIT);
+ m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER);
if (m == NULL) {
- adapter->mbuf_alloc_failed++;
+ adapter->mbuf_defrag_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (ENOBUFS);
@@ -2035,11 +2052,8 @@ retry:
*m_headp = m;
/* Try it again, but only once */
- remap = 0;
+ remap = FALSE;
goto retry;
- } else if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
} else if (error != 0) {
adapter->no_tx_dma_setup++;
m_freem(*m_headp);
@@ -2053,13 +2067,13 @@ retry:
* it follows a TSO burst, then we need to add a
* sentinel descriptor to prevent premature writeback.
*/
- if ((do_tso == 0) && (txr->tx_tso == TRUE)) {
+ if ((!do_tso) && (txr->tx_tso == TRUE)) {
if (nsegs == 1)
tso_desc = TRUE;
txr->tx_tso = FALSE;
}
- if (nsegs > (txr->tx_avail - 2)) {
+ if (nsegs > (txr->tx_avail - EM_MAX_SCATTER)) {
txr->no_desc_avail++;
bus_dmamap_unload(txr->txtag, map);
return (ENOBUFS);
@@ -2100,23 +2114,23 @@ retry:
** If this is the last descriptor, we want to
** split it so we have a small final sentinel
*/
- if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
- seg_len -= 4;
+ if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
+ seg_len -= TSO_WORKAROUND;
ctxd->buffer_addr = htole64(seg_addr);
ctxd->lower.data = htole32(
- adapter->txd_cmd | txd_lower | seg_len);
- ctxd->upper.data =
- htole32(txd_upper);
+ adapter->txd_cmd | txd_lower | seg_len);
+ ctxd->upper.data = htole32(txd_upper);
if (++i == adapter->num_tx_desc)
i = 0;
+
/* Now make the sentinel */
- ++txd_used; /* using an extra txd */
+ txr->tx_avail--;
ctxd = &txr->tx_base[i];
tx_buffer = &txr->tx_buffers[i];
ctxd->buffer_addr =
htole64(seg_addr + seg_len);
ctxd->lower.data = htole32(
- adapter->txd_cmd | txd_lower | 4);
+ adapter->txd_cmd | txd_lower | TSO_WORKAROUND);
ctxd->upper.data =
htole32(txd_upper);
last = i;
@@ -2126,8 +2140,7 @@ retry:
ctxd->buffer_addr = htole64(seg_addr);
ctxd->lower.data = htole32(
adapter->txd_cmd | txd_lower | seg_len);
- ctxd->upper.data =
- htole32(txd_upper);
+ ctxd->upper.data = htole32(txd_upper);
last = i;
if (++i == adapter->num_tx_desc)
i = 0;
@@ -2138,8 +2151,6 @@ retry:
txr->next_avail_desc = i;
txr->tx_avail -= nsegs;
- if (tso_desc) /* TSO used an extra for sentinel */
- txr->tx_avail -= txd_used;
tx_buffer->m_head = m_head;
/*
@@ -3078,6 +3089,11 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_softc = adapter;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = em_ioctl;
+ /* TSO parameters */
+ ifp->if_hw_tsomax = IP_MAXPACKET;
+ ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER;
+ ifp->if_hw_tsomaxsegsize = EM_TSO_SEG_SIZE;
+
#ifdef EM_MULTIQUEUE
/* Multiqueue stack interface */
ifp->if_transmit = em_mq_start;
@@ -3223,7 +3239,6 @@ fail_2:
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
bus_dma_tag_destroy(dma->dma_tag);
fail_0:
- dma->dma_map = NULL;
dma->dma_tag = NULL;
return (error);
@@ -3234,12 +3249,15 @@ em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
{
if (dma->dma_tag == NULL)
return;
- if (dma->dma_map != NULL) {
+ if (dma->dma_paddr != 0) {
bus_dmamap_sync(dma->dma_tag, dma->dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ dma->dma_paddr = 0;
+ }
+ if (dma->dma_vaddr != NULL) {
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- dma->dma_map = NULL;
+ dma->dma_vaddr = NULL;
}
bus_dma_tag_destroy(dma->dma_tag);
dma->dma_tag = NULL;
@@ -3325,7 +3343,7 @@ em_allocate_queues(struct adapter *adapter)
* Next the RX queues...
*/
rsize = roundup2(adapter->num_rx_desc *
- sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
+ sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
rxr = &adapter->rx_rings[i];
rxr->adapter = adapter;
@@ -3343,7 +3361,7 @@ em_allocate_queues(struct adapter *adapter)
error = ENOMEM;
goto err_rx_desc;
}
- rxr->rx_base = (struct e1000_rx_desc *)rxr->rxdma.dma_vaddr;
+ rxr->rx_base = (union e1000_rx_desc_extended *)rxr->rxdma.dma_vaddr;
bzero((void *)rxr->rx_base, rsize);
/* Allocate receive buffers for the ring*/
@@ -3386,7 +3404,7 @@ em_allocate_transmit_buffers(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
device_t dev = adapter->dev;
- struct em_buffer *txbuf;
+ struct em_txbuffer *txbuf;
int error, i;
/*
@@ -3409,7 +3427,7 @@ em_allocate_transmit_buffers(struct tx_ring *txr)
}
if (!(txr->tx_buffers =
- (struct em_buffer *) malloc(sizeof(struct em_buffer) *
+ (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) *
adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
error = ENOMEM;
@@ -3442,7 +3460,7 @@ static void
em_setup_transmit_ring(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
- struct em_buffer *txbuf;
+ struct em_txbuffer *txbuf;
int i;
#ifdef DEV_NETMAP
struct netmap_adapter *na = NA(adapter->ifp);
@@ -3661,7 +3679,7 @@ static void
em_free_transmit_buffers(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
- struct em_buffer *txbuf;
+ struct em_txbuffer *txbuf;
INIT_DEBUGOUT("free_transmit_ring: begin");
@@ -3728,7 +3746,7 @@ em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
{
struct adapter *adapter = txr->adapter;
struct e1000_context_desc *TXD = NULL;
- struct em_buffer *tx_buffer;
+ struct em_txbuffer *tx_buffer;
int cur, hdr_len;
u32 cmd = 0;
u16 offload = 0;
@@ -3762,29 +3780,38 @@ em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
offload |= CSUM_TCP;
tucss = hdr_len;
tucso = hdr_len + offsetof(struct tcphdr, th_sum);
- /*
- * Setting up new checksum offload context for every frames
- * takes a lot of processing time for hardware. This also
- * reduces performance a lot for small sized frames so avoid
- * it if driver can use previously configured checksum
- * offload context.
- */
- if (txr->last_hw_offload == offload) {
- if (offload & CSUM_IP) {
- if (txr->last_hw_ipcss == ipcss &&
- txr->last_hw_ipcso == ipcso &&
- txr->last_hw_tucss == tucss &&
- txr->last_hw_tucso == tucso)
- return;
- } else {
- if (txr->last_hw_tucss == tucss &&
- txr->last_hw_tucso == tucso)
- return;
- }
- }
- txr->last_hw_offload = offload;
- txr->last_hw_tucss = tucss;
- txr->last_hw_tucso = tucso;
+ /*
+ * The 82574L can only remember the *last* context used
+ * regardless of queue that it was use for. We cannot reuse
+ * contexts on this hardware platform and must generate a new
+ * context every time. 82574L hardware spec, section 7.2.6,
+ * second note.
+ */
+ if (adapter->num_queues < 2) {
+ /*
+ * Setting up new checksum offload context for every
+ * frames takes a lot of processing time for hardware.
+ * This also reduces performance a lot for small sized
+ * frames so avoid it if driver can use previously
+ * configured checksum offload context.
+ */
+ if (txr->last_hw_offload == offload) {
+ if (offload & CSUM_IP) {
+ if (txr->last_hw_ipcss == ipcss &&
+ txr->last_hw_ipcso == ipcso &&
+ txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ } else {
+ if (txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ }
+ }
+ txr->last_hw_offload = offload;
+ txr->last_hw_tucss = tucss;
+ txr->last_hw_tucso = tucso;
+ }
/*
* Start offset for payload checksum calculation.
* End offset for payload checksum calculation.
@@ -3800,29 +3827,38 @@ em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
tucss = hdr_len;
tucso = hdr_len + offsetof(struct udphdr, uh_sum);
- /*
- * Setting up new checksum offload context for every frames
- * takes a lot of processing time for hardware. This also
- * reduces performance a lot for small sized frames so avoid
- * it if driver can use previously configured checksum
- * offload context.
- */
- if (txr->last_hw_offload == offload) {
- if (offload & CSUM_IP) {
- if (txr->last_hw_ipcss == ipcss &&
- txr->last_hw_ipcso == ipcso &&
- txr->last_hw_tucss == tucss &&
- txr->last_hw_tucso == tucso)
- return;
- } else {
- if (txr->last_hw_tucss == tucss &&
- txr->last_hw_tucso == tucso)
- return;
+ /*
+ * The 82574L can only remember the *last* context used
+ * regardless of queue that it was use for. We cannot reuse
+ * contexts on this hardware platform and must generate a new
+ * context every time. 82574L hardware spec, section 7.2.6,
+ * second note.
+ */
+ if (adapter->num_queues < 2) {
+ /*
+ * Setting up new checksum offload context for every
+ * frames takes a lot of processing time for hardware.
+ * This also reduces performance a lot for small sized
+ * frames so avoid it if driver can use previously
+ * configured checksum offload context.
+ */
+ if (txr->last_hw_offload == offload) {
+ if (offload & CSUM_IP) {
+ if (txr->last_hw_ipcss == ipcss &&
+ txr->last_hw_ipcso == ipcso &&
+ txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ } else {
+ if (txr->last_hw_tucss == tucss &&
+ txr->last_hw_tucso == tucso)
+ return;
+ }
}
- }
- txr->last_hw_offload = offload;
- txr->last_hw_tucss = tucss;
- txr->last_hw_tucso = tucso;
+ txr->last_hw_offload = offload;
+ txr->last_hw_tucss = tucss;
+ txr->last_hw_tucso = tucso;
+ }
/*
* Start offset for header checksum calculation.
* End offset for header checksum calculation.
@@ -3865,7 +3901,7 @@ em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
{
struct adapter *adapter = txr->adapter;
struct e1000_context_desc *TXD;
- struct em_buffer *tx_buffer;
+ struct em_txbuffer *tx_buffer;
int cur, hdr_len;
/*
@@ -3943,7 +3979,7 @@ em_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
int first, last, done, processed;
- struct em_buffer *tx_buffer;
+ struct em_txbuffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
@@ -4049,7 +4085,6 @@ em_txeof(struct tx_ring *txr)
txr->busy = EM_TX_IDLE;
}
-
/*********************************************************************
*
* Refresh RX descriptor mbufs from system mbuf buffer pool.
@@ -4060,8 +4095,8 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
{
struct adapter *adapter = rxr->adapter;
struct mbuf *m;
- bus_dma_segment_t segs[1];
- struct em_buffer *rxbuf;
+ bus_dma_segment_t segs;
+ struct em_rxbuffer *rxbuf;
int i, j, error, nsegs;
bool cleaned = FALSE;
@@ -4096,7 +4131,7 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
/* Use bus_dma machinery to setup the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map,
- m, segs, &nsegs, BUS_DMA_NOWAIT);
+ m, &segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("Refresh mbufs: hdr dmamap load"
" failure - %d\n", error);
@@ -4105,9 +4140,10 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
goto update;
}
rxbuf->m_head = m;
+ rxbuf->paddr = segs.ds_addr;
bus_dmamap_sync(rxr->rxtag,
rxbuf->map, BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].buffer_addr = htole64(segs[0].ds_addr);
+ em_setup_rxdesc(&rxr->rx_base[i], rxbuf);
cleaned = TRUE;
i = j; /* Next is precalulated for us */
@@ -4142,10 +4178,10 @@ em_allocate_receive_buffers(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
device_t dev = adapter->dev;
- struct em_buffer *rxbuf;
+ struct em_rxbuffer *rxbuf;
int error;
- rxr->rx_buffers = malloc(sizeof(struct em_buffer) *
+ rxr->rx_buffers = malloc(sizeof(struct em_rxbuffer) *
adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rxr->rx_buffers == NULL) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
@@ -4198,7 +4234,7 @@ static int
em_setup_receive_ring(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
- struct em_buffer *rxbuf;
+ struct em_rxbuffer *rxbuf;
bus_dma_segment_t seg[1];
int rsize, nsegs, error = 0;
#ifdef DEV_NETMAP
@@ -4210,7 +4246,7 @@ em_setup_receive_ring(struct rx_ring *rxr)
/* Clear the ring contents */
EM_RX_LOCK(rxr);
rsize = roundup2(adapter->num_rx_desc *
- sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
+ sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
bzero((void *)rxr->rx_base, rsize);
#ifdef DEV_NETMAP
slot = netmap_reset(na, NR_RX, 0, 0);
@@ -4241,8 +4277,7 @@ em_setup_receive_ring(struct rx_ring *rxr)
addr = PNMB(na, slot + si, &paddr);
netmap_load_map(na, rxr->rxtag, rxbuf->map, addr);
- /* Update descriptor */
- rxr->rx_base[j].buffer_addr = htole64(paddr);
+ em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
continue;
}
#endif /* DEV_NETMAP */
@@ -4268,8 +4303,8 @@ em_setup_receive_ring(struct rx_ring *rxr)
bus_dmamap_sync(rxr->rxtag,
rxbuf->map, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].buffer_addr = htole64(seg[0].ds_addr);
+ rxbuf->paddr = seg[0].ds_addr;
+ em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
}
rxr->next_to_check = 0;
rxr->next_to_refresh = 0;
@@ -4306,7 +4341,7 @@ fail:
for (int i = 0; i < q; ++i) {
rxr = &adapter->rx_rings[i];
for (int n = 0; n < adapter->num_rx_desc; n++) {
- struct em_buffer *rxbuf;
+ struct em_rxbuffer *rxbuf;
rxbuf = &rxr->rx_buffers[n];
if (rxbuf->m_head != NULL) {
bus_dmamap_sync(rxr->rxtag, rxbuf->map,
@@ -4353,7 +4388,7 @@ static void
em_free_receive_buffers(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
- struct em_buffer *rxbuf = NULL;
+ struct em_rxbuffer *rxbuf = NULL;
INIT_DEBUGOUT("free_receive_buffers: begin");
@@ -4395,11 +4430,10 @@ em_free_receive_buffers(struct rx_ring *rxr)
static void
em_initialize_receive_unit(struct adapter *adapter)
{
- struct rx_ring *rxr = adapter->rx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
struct ifnet *ifp = adapter->ifp;
struct e1000_hw *hw = &adapter->hw;
- u64 bus_addr;
- u32 rctl, rxcsum;
+ u32 rctl, rxcsum, rfctl;
INIT_DEBUGOUT("em_initialize_receive_units: begin");
@@ -4412,6 +4446,25 @@ em_initialize_receive_unit(struct adapter *adapter)
if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ /* Setup the Receive Control Register */
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Do not store bad packets */
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Enable Long Packet receive */
+ if (ifp->if_mtu > ETHERMTU)
+ rctl |= E1000_RCTL_LPE;
+ else
+ rctl &= ~E1000_RCTL_LPE;
+
+ /* Strip the CRC */
+ if (!em_disable_crc_stripping)
+ rctl |= E1000_RCTL_SECRC;
+
E1000_WRITE_REG(&adapter->hw, E1000_RADV,
adapter->rx_abs_int_delay.value);
@@ -4423,20 +4476,21 @@ em_initialize_receive_unit(struct adapter *adapter)
*/
E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
+ /* Use extended rx descriptor formats */
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ rfctl |= E1000_RFCTL_EXTEN;
/*
** When using MSIX interrupts we need to throttle
** using the EITR register (82574 only)
*/
if (hw->mac.type == e1000_82574) {
- u32 rfctl;
for (int i = 0; i < 4; i++)
E1000_WRITE_REG(hw, E1000_EITR_82574(i),
DEFAULT_ITR);
/* Disable accelerated acknowledge */
- rfctl = E1000_READ_REG(hw, E1000_RFCTL);
rfctl |= E1000_RFCTL_ACK_DIS;
- E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
}
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
if (ifp->if_capenable & IFCAP_RXCSUM) {
@@ -4453,38 +4507,44 @@ em_initialize_receive_unit(struct adapter *adapter)
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
#ifdef EM_MULTIQUEUE
+#define RSSKEYLEN 10
if (adapter->num_queues > 1) {
- uint32_t rss_key[10];
- uint32_t reta;
+ uint8_t rss_key[4 * RSSKEYLEN];
+ uint32_t reta = 0;
int i;
/*
* Configure RSS key
*/
arc4rand(rss_key, sizeof(rss_key), 0);
- for (i = 0; i < 10; ++i)
- E1000_WRITE_REG_ARRAY(hw,E1000_RSSRK(0), i, rss_key[i]);
+ for (i = 0; i < RSSKEYLEN; ++i) {
+ uint32_t rssrk = 0;
+
+ rssrk = EM_RSSRK_VAL(rss_key, i);
+ E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk);
+ }
/*
* Configure RSS redirect table in following fashion:
* (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
*/
- reta = 0;
- for (i = 0; i < 4; ++i) {
+ for (i = 0; i < sizeof(reta); ++i) {
uint32_t q;
+
q = (i % adapter->num_queues) << 7;
reta |= q << (8 * i);
}
- for (i = 0; i < 32; ++i)
+
+ for (i = 0; i < 32; ++i) {
E1000_WRITE_REG(hw, E1000_RETA(i), reta);
+ }
E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX |
E1000_MRQC_RSS_FIELD_IPV6_EX |
- E1000_MRQC_RSS_FIELD_IPV6 |
- E1000_MRQC_RSS_FIELD_IPV6_TCP);
+ E1000_MRQC_RSS_FIELD_IPV6);
}
#endif
/*
@@ -4499,11 +4559,11 @@ em_initialize_receive_unit(struct adapter *adapter)
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
/* Setup the Base and Length of the Rx Descriptor Ring */
+ u64 bus_addr = rxr->rxdma.dma_paddr;
u32 rdt = adapter->num_rx_desc - 1; /* default */
- bus_addr = rxr->rxdma.dma_paddr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
- adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
+ adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended));
E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
/* Setup the Head and Tail Descriptor Pointers */
@@ -4532,14 +4592,13 @@ em_initialize_receive_unit(struct adapter *adapter)
(ifp->if_mtu > ETHERMTU)) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
- } else if ((adapter->hw.mac.type == e1000_82574) &&
- (ifp->if_mtu > ETHERMTU)) {
+ } else if (adapter->hw.mac.type == e1000_82574) {
for (int i = 0; i < adapter->num_queues; i++) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
- rxdctl |= 0x20; /* PTHRESH */
- rxdctl |= 4 << 8; /* HTHRESH */
- rxdctl |= 4 << 16;/* WTHRESH */
+ rxdctl |= 0x20; /* PTHRESH */
+ rxdctl |= 4 << 8; /* HTHRESH */
+ rxdctl |= 4 << 16;/* WTHRESH */
rxdctl |= 1 << 24; /* Switch to granularity */
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
@@ -4552,18 +4611,8 @@ em_initialize_receive_unit(struct adapter *adapter)
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
- /* Setup the Receive Control Register */
- rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-
- /* Strip the CRC */
- rctl |= E1000_RCTL_SECRC;
-
/* Make sure VLAN Filters are off */
rctl &= ~E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_SBP;
if (adapter->rx_mbuf_sz == MCLBYTES)
rctl |= E1000_RCTL_SZ_2048;
@@ -4572,11 +4621,8 @@ em_initialize_receive_unit(struct adapter *adapter)
else if (adapter->rx_mbuf_sz > MJUMPAGESIZE)
rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
- if (ifp->if_mtu > ETHERMTU)
- rctl |= E1000_RCTL_LPE;
- else
- rctl &= ~E1000_RCTL_LPE;
-
+ /* ensure we clear use DTYPE of 00 here */
+ rctl &= ~0x00000C00;
/* Write out the settings */
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
@@ -4601,11 +4647,11 @@ em_rxeof(struct rx_ring *rxr, int count, int *done)
struct adapter *adapter = rxr->adapter;
struct ifnet *ifp = adapter->ifp;
struct mbuf *mp, *sendmp;
- u8 status = 0;
+ u32 status = 0;
u16 len;
int i, processed, rxdone = 0;
bool eop;
- struct e1000_rx_desc *cur;
+ union e1000_rx_desc_extended *cur;
EM_RX_LOCK(rxr);
@@ -4622,21 +4668,20 @@ em_rxeof(struct rx_ring *rxr, int count, int *done)
#endif /* DEV_NETMAP */
for (i = rxr->next_to_check, processed = 0; count != 0;) {
-
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
break;
cur = &rxr->rx_base[i];
- status = cur->status;
+ status = le32toh(cur->wb.upper.status_error);
mp = sendmp = NULL;
if ((status & E1000_RXD_STAT_DD) == 0)
break;
- len = le16toh(cur->length);
+ len = le16toh(cur->wb.upper.length);
eop = (status & E1000_RXD_STAT_EOP) != 0;
- if ((cur->errors & E1000_RXD_ERR_FRAME_ERR_MASK) ||
+ if ((status & E1000_RXDEXT_ERR_FRAME_ERR_MASK) ||
(rxr->discard == TRUE)) {
adapter->dropped_pkts++;
++rxr->rx_discarded;
@@ -4673,7 +4718,7 @@ em_rxeof(struct rx_ring *rxr, int count, int *done)
sendmp = rxr->fmp;
sendmp->m_pkthdr.rcvif = ifp;
ifp->if_ipackets++;
- em_receive_checksum(cur, sendmp);
+ em_receive_checksum(status, sendmp);
#ifndef __NO_STRICT_ALIGNMENT
if (adapter->hw.mac.max_frame_size >
(MCLBYTES - ETHER_ALIGN) &&
@@ -4682,7 +4727,7 @@ em_rxeof(struct rx_ring *rxr, int count, int *done)
#endif
if (status & E1000_RXD_STAT_VP) {
sendmp->m_pkthdr.ether_vtag =
- le16toh(cur->special);
+ le16toh(cur->wb.upper.vlan);
sendmp->m_flags |= M_VLANTAG;
}
#ifndef __NO_STRICT_ALIGNMENT
@@ -4696,7 +4741,7 @@ next_desc:
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Zero out the receive descriptors status. */
- cur->status = 0;
+ cur->wb.upper.status_error &= htole32(~0xFF);
++rxdone; /* cumulative for POLL */
++processed;
@@ -4735,7 +4780,7 @@ next_desc:
static __inline void
em_rx_discard(struct rx_ring *rxr, int i)
{
- struct em_buffer *rbuf;
+ struct em_rxbuffer *rbuf;
rbuf = &rxr->rx_buffers[i];
bus_dmamap_unload(rxr->rxtag, rbuf->map);
@@ -4807,6 +4852,14 @@ em_fixup_rx(struct rx_ring *rxr)
}
#endif
+static void
+em_setup_rxdesc(union e1000_rx_desc_extended *rxd, const struct em_rxbuffer *rxbuf)
+{
+ rxd->read.buffer_addr = htole64(rxbuf->paddr);
+ /* DD bits must be cleared */
+ rxd->wb.upper.status_error= 0;
+}
+
/*********************************************************************
*
* Verify that the hardware indicated that the checksum is valid.
@@ -4815,23 +4868,27 @@ em_fixup_rx(struct rx_ring *rxr)
*
*********************************************************************/
static void
-em_receive_checksum(struct e1000_rx_desc *rx_desc, struct mbuf *mp)
+em_receive_checksum(uint32_t status, struct mbuf *mp)
{
mp->m_pkthdr.csum_flags = 0;
/* Ignore Checksum bit is set */
- if (rx_desc->status & E1000_RXD_STAT_IXSM)
- return;
-
- if (rx_desc->errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE))
+ if (status & E1000_RXD_STAT_IXSM)
return;
- /* IP Checksum Good? */
- if (rx_desc->status & E1000_RXD_STAT_IPCS)
+ /* If the IP checksum exists and there is no IP Checksum error */
+ if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
+ E1000_RXD_STAT_IPCS) {
mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
+ }
/* TCP or UDP checksum */
- if (rx_desc->status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
+ if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
+ E1000_RXD_STAT_TCPCS) {
+ mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ mp->m_pkthdr.csum_data = htons(0xffff);
+ }
+ if (status & E1000_RXD_STAT_UDPCS) {
mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
mp->m_pkthdr.csum_data = htons(0xffff);
}
@@ -4933,8 +4990,8 @@ em_enable_intr(struct adapter *adapter)
u32 ims_mask = IMS_ENABLE_MASK;
if (hw->mac.type == e1000_82574) {
- E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
- ims_mask |= EM_MSIX_MASK;
+ E1000_WRITE_REG(hw, EM_EIAC, adapter->ims);
+ ims_mask |= adapter->ims;
}
E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
}
@@ -5470,18 +5527,15 @@ em_add_hw_stats(struct adapter *adapter)
char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
- CTLFLAG_RD, &adapter->link_irq,
- "Link MSIX IRQ Handled");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
- CTLFLAG_RD, &adapter->mbuf_alloc_failed,
- "Std mbuf failed");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
- CTLFLAG_RD, &adapter->mbuf_cluster_failed,
- "Std mbuf cluster failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
CTLFLAG_RD, &adapter->dropped_pkts,
"Driver dropped packets");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
+ CTLFLAG_RD, &adapter->link_irq,
+ "Link MSIX IRQ Handled");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
+ CTLFLAG_RD, &adapter->mbuf_defrag_failed,
+ "Defragmenting mbuf chain failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
CTLFLAG_RD, &adapter->no_tx_dma_setup,
"Driver tx dma failure in xmit");
@@ -6030,7 +6084,9 @@ DB_COMMAND(em_reset_dev, em_ddb_reset_dev)
dev = devclass_get_device(dc, index);
if (device_get_driver(dev) == &em_driver) {
struct adapter *adapter = device_get_softc(dev);
+ EM_CORE_LOCK(adapter);
em_init_locked(adapter);
+ EM_CORE_UNLOCK(adapter);
}
}
}
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index beb078d..b1664fe 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -266,7 +266,7 @@
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
-#define EM_MAX_SCATTER 32
+#define EM_MAX_SCATTER 64
#define EM_VFTA_SIZE 128
#define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */
@@ -330,7 +330,7 @@ struct tx_ring {
struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
- struct em_buffer *tx_buffers;
+ struct em_txbuffer *tx_buffers;
volatile u16 tx_avail;
u32 tx_tso; /* last tx was tso */
u16 last_hw_offload;
@@ -362,11 +362,11 @@ struct rx_ring {
u32 payload;
struct task rx_task;
struct taskqueue *tq;
- struct e1000_rx_desc *rx_base;
+ union e1000_rx_desc_extended *rx_base;
struct em_dma_alloc rxdma;
u32 next_to_refresh;
u32 next_to_check;
- struct em_buffer *rx_buffers;
+ struct em_rxbuffer *rx_buffers;
struct mbuf *fmp;
struct mbuf *lmp;
@@ -473,13 +473,12 @@ struct adapter {
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
- unsigned long mbuf_alloc_failed;
- unsigned long mbuf_cluster_failed;
+ unsigned long link_irq;
+ unsigned long mbuf_defrag_failed;
+ unsigned long no_tx_dma_setup;
unsigned long no_tx_map_avail;
- unsigned long no_tx_dma_setup;
unsigned long rx_overruns;
unsigned long watchdog_events;
- unsigned long link_irq;
struct e1000_hw_stats stats;
};
@@ -499,10 +498,17 @@ typedef struct _em_vendor_info_t {
unsigned int index;
} em_vendor_info_t;
-struct em_buffer {
+struct em_txbuffer {
+ int next_eop; /* Index of the desc to watch */
+ struct mbuf *m_head;
+ bus_dmamap_t map; /* bus_dma map for packet */
+};
+
+struct em_rxbuffer {
int next_eop; /* Index of the desc to watch */
struct mbuf *m_head;
bus_dmamap_t map; /* bus_dma map for packet */
+ bus_addr_t paddr;
};
@@ -541,4 +547,9 @@ e1000_rx_unrefreshed(struct rx_ring *rxr)
#define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
#define EM_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED)
+#define EM_RSSRK_SIZE 4
+#define EM_RSSRK_VAL(key, i) (key[(i) * EM_RSSRK_SIZE] | \
+ key[(i) * EM_RSSRK_SIZE + 1] << 8 | \
+ key[(i) * EM_RSSRK_SIZE + 2] << 16 | \
+ key[(i) * EM_RSSRK_SIZE + 3] << 24)
#endif /* _EM_H_DEFINED_ */
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 2e23d9d..620fbad 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -1889,7 +1889,8 @@ retry:
/* Try it again? - one try */
if (remap == TRUE) {
remap = FALSE;
- m = m_defrag(*m_headp, M_NOWAIT);
+ m = m_collapse(*m_headp, M_NOWAIT,
+ IGB_MAX_SCATTER);
if (m == NULL) {
adapter->mbuf_defrag_failed++;
m_freem(*m_headp);
@@ -1900,9 +1901,6 @@ retry:
goto retry;
} else
return (error);
- case ENOMEM:
- txr->no_tx_dma_setup++;
- return (error);
default:
txr->no_tx_dma_setup++;
m_freem(*m_headp);
@@ -3270,7 +3268,6 @@ fail_2:
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
bus_dma_tag_destroy(dma->dma_tag);
fail_0:
- dma->dma_map = NULL;
dma->dma_tag = NULL;
return (error);
@@ -3281,12 +3278,15 @@ igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma)
{
if (dma->dma_tag == NULL)
return;
- if (dma->dma_map != NULL) {
+ if (dma->dma_paddr != 0) {
bus_dmamap_sync(dma->dma_tag, dma->dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ dma->dma_paddr = 0;
+ }
+ if (dma->dma_vaddr != NULL) {
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- dma->dma_map = NULL;
+ dma->dma_vaddr = NULL;
}
bus_dma_tag_destroy(dma->dma_tag);
dma->dma_tag = NULL;
@@ -5639,12 +5639,15 @@ igb_add_hw_stats(struct adapter *adapter)
char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
- CTLFLAG_RD, &adapter->link_irq,
- "Link MSIX IRQ Handled");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
CTLFLAG_RD, &adapter->dropped_pkts,
"Driver dropped packets");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
+ CTLFLAG_RD, &adapter->link_irq,
+ "Link MSIX IRQ Handled");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
+ CTLFLAG_RD, &adapter->mbuf_defrag_failed,
+ "Defragmenting mbuf chain failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
CTLFLAG_RD, &adapter->no_tx_dma_setup,
"Driver tx dma failure in xmit");
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index d41d8b0..2708a7c 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -457,20 +457,19 @@ struct adapter {
u8 *mta;
/* Misc stats maintained by the driver */
+ unsigned long device_control;
unsigned long dropped_pkts;
+ unsigned long eint_mask;
+ unsigned long int_mask;
+ unsigned long link_irq;
unsigned long mbuf_defrag_failed;
- unsigned long mbuf_header_failed;
- unsigned long mbuf_packet_failed;
unsigned long no_tx_dma_setup;
- unsigned long watchdog_events;
- unsigned long link_irq;
- unsigned long rx_overruns;
- unsigned long device_control;
- unsigned long rx_control;
- unsigned long int_mask;
- unsigned long eint_mask;
unsigned long packet_buf_alloc_rx;
unsigned long packet_buf_alloc_tx;
+ unsigned long rx_control;
+ unsigned long rx_overruns;
+ unsigned long watchdog_events;
+
/* Used in pf and vf */
void *stats;
diff --git a/sys/dev/e1000/if_lem.c b/sys/dev/e1000/if_lem.c
index 903cc28..9d3be4c 100644
--- a/sys/dev/e1000/if_lem.c
+++ b/sys/dev/e1000/if_lem.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -95,7 +95,7 @@
/*********************************************************************
* Legacy Em Driver version:
*********************************************************************/
-char lem_driver_version[] = "1.0.6";
+char lem_driver_version[] = "1.1.0";
/*********************************************************************
* PCI Device ID Table
@@ -1670,9 +1670,9 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
if (error == EFBIG) {
struct mbuf *m;
- m = m_defrag(*m_headp, M_NOWAIT);
+ m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER);
if (m == NULL) {
- adapter->mbuf_alloc_failed++;
+ adapter->mbuf_defrag_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (ENOBUFS);
@@ -2699,7 +2699,6 @@ fail_2:
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
bus_dma_tag_destroy(dma->dma_tag);
fail_0:
- dma->dma_map = NULL;
dma->dma_tag = NULL;
return (error);
@@ -2710,12 +2709,15 @@ lem_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
{
if (dma->dma_tag == NULL)
return;
- if (dma->dma_map != NULL) {
+ if (dma->dma_paddr != 0) {
bus_dmamap_sync(dma->dma_tag, dma->dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ dma->dma_paddr = 0;
+ }
+ if (dma->dma_vaddr != NULL) {
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- dma->dma_map = NULL;
+ dma->dma_vaddr = NULL;
}
bus_dma_tag_destroy(dma->dma_tag);
dma->dma_tag = NULL;
@@ -2942,10 +2944,6 @@ lem_free_transmit_structures(struct adapter *adapter)
bus_dma_tag_destroy(adapter->txtag);
adapter->txtag = NULL;
}
-#if __FreeBSD_version >= 800000
- if (adapter->br != NULL)
- buf_ring_free(adapter->br, M_DEVBUF);
-#endif
}
/*********************************************************************
@@ -4548,12 +4546,12 @@ lem_add_hw_stats(struct adapter *adapter)
struct sysctl_oid_list *stat_list;
/* Driver Statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_fail",
- CTLFLAG_RD, &adapter->mbuf_alloc_failed,
- "Std mbuf failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "cluster_alloc_fail",
CTLFLAG_RD, &adapter->mbuf_cluster_failed,
"Std mbuf cluster failed");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail",
+ CTLFLAG_RD, &adapter->mbuf_defrag_failed,
+ "Defragmenting mbuf chain failed");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
CTLFLAG_RD, &adapter->dropped_pkts,
"Driver dropped packets");
diff --git a/sys/dev/e1000/if_lem.h b/sys/dev/e1000/if_lem.h
index c67a761..14fb1aa 100644
--- a/sys/dev/e1000/if_lem.h
+++ b/sys/dev/e1000/if_lem.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2011, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -266,11 +266,11 @@
#define TSYNC_PORT 319 /* UDP port for the protocol */
#ifdef NIC_PARAVIRT
-#define E1000_PARA_SUBDEV 0x1101 /* special id */
-#define E1000_CSBAL 0x02830 /* csb phys. addr. low */
-#define E1000_CSBAH 0x02834 /* csb phys. addr. hi */
+#define E1000_PARA_SUBDEV 0x1101 /* special id */
+#define E1000_CSBAL 0x02830 /* csb phys. addr. low */
+#define E1000_CSBAH 0x02834 /* csb phys. addr. hi */
#include <net/paravirt.h>
-#endif
+#endif /* NIC_PARAVIRT */
/*
* Bus dma allocation structure used by
@@ -296,9 +296,6 @@ struct em_int_delay_info {
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
-#if __FreeBSD_version >= 800000
- struct buf_ring *br;
-#endif
struct e1000_hw hw;
/* FreeBSD operating-system-specific structures. */
@@ -420,17 +417,17 @@ struct adapter {
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
- unsigned long mbuf_alloc_failed;
+ unsigned long link_irq;
unsigned long mbuf_cluster_failed;
+ unsigned long mbuf_defrag_failed;
unsigned long no_tx_desc_avail1;
unsigned long no_tx_desc_avail2;
+ unsigned long no_tx_dma_setup;
unsigned long no_tx_map_avail;
- unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
- unsigned long rx_overruns;
unsigned long rx_irq;
+ unsigned long rx_overruns;
unsigned long tx_irq;
- unsigned long link_irq;
/* 82547 workaround */
uint32_t tx_fifo_size;
@@ -444,15 +441,26 @@ struct adapter {
boolean_t pcix_82544;
boolean_t in_detach;
+#ifdef NIC_SEND_COMBINING
+ /* 0 = idle; 1xxxx int-pending; 3xxxx int + d pending + tdt */
+#define MIT_PENDING_INT 0x10000 /* pending interrupt */
+#define MIT_PENDING_TDT 0x30000 /* both intr and tdt write are pending */
+ uint32_t shadow_tdt;
+ uint32_t sc_enable;
+#endif /* NIC_SEND_COMBINING */
+#ifdef BATCH_DISPATCH
+ uint32_t batch_enable;
+#endif /* BATCH_DISPATCH */
+
#ifdef NIC_PARAVIRT
- struct em_dma_alloc csb_mem;
- struct paravirt_csb *csb;
- uint32_t rx_retries;
- uint32_t tdt_csb_count;
- uint32_t tdt_reg_count;
- uint32_t tdt_int_count;
- uint32_t guest_need_kick_count;
-#endif
+ struct em_dma_alloc csb_mem; /* phys address */
+ struct paravirt_csb *csb; /* virtual addr */
+ uint32_t rx_retries; /* optimize rx loop */
+ uint32_t tdt_csb_count;// XXX stat
+ uint32_t tdt_reg_count;// XXX stat
+ uint32_t tdt_int_count;// XXX stat
+ uint32_t guest_need_kick_count;// XXX stat
+#endif /* NIC_PARAVIRT */
struct e1000_hw_stats stats;
};
diff --git a/sys/dev/ixgb/if_ixgb.c b/sys/dev/ixgb/if_ixgb.c
index 2e62c1c..f01e673 100644
--- a/sys/dev/ixgb/if_ixgb.c
+++ b/sys/dev/ixgb/if_ixgb.c
@@ -1459,7 +1459,6 @@ fail_2:
fail_1:
bus_dma_tag_destroy(dma->dma_tag);
fail_0:
- dma->dma_map = NULL;
dma->dma_tag = NULL;
return (r);
}
diff --git a/sys/dev/netmap/if_em_netmap.h b/sys/dev/netmap/if_em_netmap.h
index 99eaa6f..3a8ccab 100644
--- a/sys/dev/netmap/if_em_netmap.h
+++ b/sys/dev/netmap/if_em_netmap.h
@@ -148,7 +148,7 @@ em_netmap_txsync(struct netmap_kring *kring, int flags)
/* device-specific */
struct e1000_tx_desc *curr = &txr->tx_base[nic_i];
- struct em_buffer *txbuf = &txr->tx_buffers[nic_i];
+ struct em_txbuffer *txbuf = &txr->tx_buffers[nic_i];
int flags = (slot->flags & NS_REPORT ||
nic_i == 0 || nic_i == report_frequency) ?
E1000_TXD_CMD_RS : 0;
@@ -241,12 +241,12 @@ em_netmap_rxsync(struct netmap_kring *kring, int flags)
nm_i = netmap_idx_n2k(kring, nic_i);
for (n = 0; ; n++) { // XXX no need to count
- struct e1000_rx_desc *curr = &rxr->rx_base[nic_i];
- uint32_t staterr = le32toh(curr->status);
+ union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i];
+ uint32_t staterr = le32toh(curr->wb.upper.status_error);
if ((staterr & E1000_RXD_STAT_DD) == 0)
break;
- ring->slot[nm_i].len = le16toh(curr->length);
+ ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
ring->slot[nm_i].flags = slot_flags;
bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map,
BUS_DMASYNC_POSTREAD);
@@ -273,19 +273,19 @@ em_netmap_rxsync(struct netmap_kring *kring, int flags)
uint64_t paddr;
void *addr = PNMB(na, slot, &paddr);
- struct e1000_rx_desc *curr = &rxr->rx_base[nic_i];
- struct em_buffer *rxbuf = &rxr->rx_buffers[nic_i];
+ union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i];
+ struct em_rxbuffer *rxbuf = &rxr->rx_buffers[nic_i];
if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
goto ring_reset;
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
- curr->buffer_addr = htole64(paddr);
+ curr->read.buffer_addr = htole64(paddr);
netmap_reload_map(na, rxr->rxtag, rxbuf->map, addr);
slot->flags &= ~NS_BUF_CHANGED;
}
- curr->status = 0;
+ curr->wb.upper.status_error = 0;
bus_dmamap_sync(rxr->rxtag, rxbuf->map,
BUS_DMASYNC_PREREAD);
nm_i = nm_next(nm_i, lim);
OpenPOWER on IntegriCloud