summaryrefslogtreecommitdiffstats
path: root/sys/dev/em/if_em.c
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2007-05-04 00:00:12 +0000
committerjfv <jfv@FreeBSD.org>2007-05-04 00:00:12 +0000
commit0eab9692b55624688d6a271bf08293f710b7bb9d (patch)
tree241fab48e3dc6309360be4b31eb5c20f54432389 /sys/dev/em/if_em.c
parent25f570bc482092b6c3eab07fc4881fdd1b8b8f05 (diff)
downloadFreeBSD-src-0eab9692b55624688d6a271bf08293f710b7bb9d.zip
FreeBSD-src-0eab9692b55624688d6a271bf08293f710b7bb9d.tar.gz
Merge in the new driver (6.5.0) of Intel. This has a new
shared code infrastructure that is family specific and modular. There is also support for our latest gigabit nic, the 82575 that is MSI/X and multiqueue capable. The new shared code changes some interfaces to the core code but testing at Intel has been going on for months, it is fairly stable. I have attempted to be careful in retaining any fixes that CURRENT had and we did not, I apologize in advance if any thing gets clobbered, I'm sure I'll hear about it :) Approved by pdeuskar
Diffstat (limited to 'sys/dev/em/if_em.c')
-rw-r--r--sys/dev/em/if_em.c2415
1 files changed, 1614 insertions, 801 deletions
diff --git a/sys/dev/em/if_em.c b/sys/dev/em/if_em.c
index 570b30d..68ee526 100644
--- a/sys/dev/em/if_em.c
+++ b/sys/dev/em/if_em.c
@@ -1,6 +1,6 @@
/**************************************************************************
-Copyright (c) 2001-2006, Intel Corporation
+Copyright (c) 2001-2007, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,7 +31,7 @@ POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
-/*$FreeBSD$*/
+$FreeBSD$
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
@@ -76,8 +76,10 @@ POSSIBILITY OF SUCH DAMAGE.
#include <machine/in_cksum.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
-#include <dev/em/if_em_hw.h>
-#include <dev/em/if_em.h>
+
+#include "e1000_api.h"
+#include "e1000_82575.h"
+#include "if_em.h"
/*********************************************************************
* Set this to one to display debug statistics
@@ -85,17 +87,16 @@ POSSIBILITY OF SUCH DAMAGE.
int em_display_debug_stats = 0;
/*********************************************************************
- * Driver version
+ * Driver version:
*********************************************************************/
-
-char em_driver_version[] = "Version - 6.2.9";
+char em_driver_version[] = "Version - 6.5.0";
/*********************************************************************
* PCI Device ID Table
*
* Used by probe to select devices to load on
- * Last field stores an index into em_strings
+ * Last field stores an index into e1000_strings
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
@@ -154,9 +155,10 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE,
+ { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
PCI_ANY_ID, PCI_ANY_ID, 0},
-
{ 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -181,6 +183,20 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
+
+ { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82575EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82575EM_FIBER_SERDES,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
};
@@ -219,7 +235,7 @@ static void em_free_pci_resources(struct adapter *);
static void em_local_timer(void *);
static int em_hardware_init(struct adapter *);
static void em_setup_interface(device_t, struct adapter *);
-static void em_setup_transmit_structures(struct adapter *);
+static int em_setup_transmit_structures(struct adapter *);
static void em_initialize_transmit_unit(struct adapter *);
static int em_setup_receive_structures(struct adapter *);
static void em_initialize_receive_unit(struct adapter *);
@@ -235,12 +251,13 @@ static int em_rxeof(struct adapter *, int);
#ifndef __NO_STRICT_ALIGNMENT
static int em_fixup_rx(struct adapter *);
#endif
-static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
+static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
struct mbuf *);
static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
uint32_t *, uint32_t *);
-static boolean_t em_tso_setup(struct adapter *, struct mbuf *,
- uint32_t *, uint32_t *);
+static boolean_t em_tso_setup(struct adapter *, struct mbuf *, uint32_t *,
+ uint32_t *);
+static boolean_t em_tso_adv_setup(struct adapter *, struct mbuf *, uint32_t *);
static void em_set_promisc(struct adapter *);
static void em_disable_promisc(struct adapter *);
static void em_set_multi(struct adapter *);
@@ -249,13 +266,15 @@ static void em_update_link_status(struct adapter *);
static int em_get_buf(struct adapter *, int);
static void em_enable_vlans(struct adapter *);
static int em_encap(struct adapter *, struct mbuf **);
+static int em_adv_encap(struct adapter *, struct mbuf **);
+static void em_tx_adv_ctx_setup(struct adapter *, struct mbuf *);
static void em_smartspeed(struct adapter *);
static int em_82547_fifo_workaround(struct adapter *, int);
static void em_82547_update_fifo_head(struct adapter *, int);
static int em_82547_tx_fifo_reset(struct adapter *);
-static void em_82547_move_tail(void *arg);
+static void em_82547_move_tail(void *);
static int em_dma_malloc(struct adapter *, bus_size_t,
- struct em_dma_alloc *, int);
+ struct em_dma_alloc *, int);
static void em_dma_free(struct adapter *, struct em_dma_alloc *);
static void em_print_debug_info(struct adapter *);
static int em_is_valid_ether_addr(uint8_t *);
@@ -265,19 +284,21 @@ static uint32_t em_fill_descriptors (bus_addr_t address, uint32_t length,
PDESC_ARRAY desc_array);
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct adapter *, const char *,
- const char *, struct em_int_delay_info *, int, int);
+ const char *, struct em_int_delay_info *, int, int);
+/* Management and WOL Support */
+static void em_init_manageability(struct adapter *);
+static void em_release_manageability(struct adapter *);
+static void em_get_hw_control(struct adapter *);
+static void em_release_hw_control(struct adapter *);
+static void em_enable_wakeup(device_t);
-/*
- * Fast interrupt handler and legacy ithread/polling modes are
- * mutually exclusive.
- */
#ifdef DEVICE_POLLING
static poll_handler_t em_poll;
static void em_intr(void *);
#else
static int em_intr_fast(void *);
-static void em_add_int_process_limit(struct adapter *, const char *,
- const char *, int *, int);
+static void em_add_rx_process_limit(struct adapter *, const char *,
+ const char *, int *, int);
static void em_handle_rxtx(void *context, int pending);
static void em_handle_link(void *context, int pending);
#endif
@@ -310,14 +331,14 @@ MODULE_DEPEND(em, ether, 1, 1, 1);
* Tunable default values.
*********************************************************************/
-#define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
-#define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
+#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
+#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
#define M_TSO_LEN 66
-static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
-static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
-static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
-static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
+static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
+static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
+static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
+static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
static int em_rxd = EM_DEFAULT_RXD;
static int em_txd = EM_DEFAULT_TXD;
static int em_smart_pwr_down = FALSE;
@@ -330,9 +351,12 @@ TUNABLE_INT("hw.em.rxd", &em_rxd);
TUNABLE_INT("hw.em.txd", &em_txd);
TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
#ifndef DEVICE_POLLING
+/* How many packets rxeof tries to clean at a time */
static int em_rx_process_limit = 100;
TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
#endif
+/* Global used in WOL setup with multiport cards */
+static int global_quad_port_a = 0;
/*********************************************************************
* Device identification routine
@@ -401,6 +425,7 @@ em_attach(device_t dev)
struct adapter *adapter;
int tsize, rsize;
int error = 0;
+ u16 eeprom_data, device_id;
INIT_DEBUGOUT("em_attach: begin");
@@ -425,29 +450,64 @@ em_attach(device_t dev)
/* Determine hardware revision */
em_identify_hardware(adapter);
+ /* Setup PCI resources */
+ if (em_allocate_pci_resources(adapter)) {
+ device_printf(dev, "Allocation of PCI resources failed\n");
+ error = ENXIO;
+ goto err_pci;
+ }
+
+ /*
+ ** For ICH8 and family we need to
+ ** map the flash memory, and this
+ ** must happen after the MAC is
+ ** identified
+ */
+ if ((adapter->hw.mac.type == e1000_ich8lan) ||
+ (adapter->hw.mac.type == e1000_ich9lan)) {
+ int rid = EM_BAR_TYPE_FLASH;
+ adapter->flash_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ /* This is used in the shared code */
+ adapter->hw.flash_address = (u8 *)adapter->flash_mem;
+ adapter->osdep.flash_bus_space_tag =
+ rman_get_bustag(adapter->flash_mem);
+ adapter->osdep.flash_bus_space_handle =
+ rman_get_bushandle(adapter->flash_mem);
+ }
+
+ /* Do Shared Code initialization */
+ if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
+ device_printf(dev, "Setup of Shared code failed\n");
+ error = ENXIO;
+ goto err_pci;
+ }
+
+ e1000_get_bus_info(&adapter->hw);
+
/* Set up some sysctls for the tunable interrupt delays */
em_add_int_delay_sysctl(adapter, "rx_int_delay",
"receive interrupt delay in usecs", &adapter->rx_int_delay,
- E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt);
+ E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "tx_int_delay",
"transmit interrupt delay in usecs", &adapter->tx_int_delay,
- E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt);
- if (adapter->hw.mac_type >= em_82540) {
+ E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
+ if (adapter->hw.mac.type >= e1000_82540) {
em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
"receive interrupt delay limit in usecs",
&adapter->rx_abs_int_delay,
- E1000_REG_OFFSET(&adapter->hw, RADV),
+ E1000_REGISTER(&adapter->hw, E1000_RADV),
em_rx_abs_int_delay_dflt);
em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
"transmit interrupt delay limit in usecs",
&adapter->tx_abs_int_delay,
- E1000_REG_OFFSET(&adapter->hw, TADV),
+ E1000_REGISTER(&adapter->hw, E1000_TADV),
em_tx_abs_int_delay_dflt);
}
#ifndef DEVICE_POLLING
/* Sysctls for limiting the amount of work done in the taskqueue */
- em_add_int_process_limit(adapter, "rx_processing_limit",
+ em_add_rx_process_limit(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
em_rx_process_limit);
#endif
@@ -455,20 +515,20 @@ em_attach(device_t dev)
/*
* Validate number of transmit and receive descriptors. It
* must not exceed hardware maximum, and must be multiple
- * of EM_DBA_ALIGN.
+ * of E1000_DBA_ALIGN.
*/
- if (((em_txd * sizeof(struct em_tx_desc)) % EM_DBA_ALIGN) != 0 ||
- (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
- (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
+ if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
+ (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
+ (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
(em_txd < EM_MIN_TXD)) {
device_printf(dev, "Using %d TX descriptors instead of %d!\n",
EM_DEFAULT_TXD, em_txd);
adapter->num_tx_desc = EM_DEFAULT_TXD;
} else
adapter->num_tx_desc = em_txd;
- if (((em_rxd * sizeof(struct em_rx_desc)) % EM_DBA_ALIGN) != 0 ||
- (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
- (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
+ if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
+ (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
+ (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
(em_rxd < EM_MIN_RXD)) {
device_printf(dev, "Using %d RX descriptors instead of %d!\n",
EM_DEFAULT_RXD, em_rxd);
@@ -476,43 +536,37 @@ em_attach(device_t dev)
} else
adapter->num_rx_desc = em_rxd;
- adapter->hw.autoneg = DO_AUTO_NEG;
- adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
- adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
- adapter->hw.tbi_compatibility_en = TRUE;
- adapter->rx_buffer_len = EM_RXBUFFER_2048;
+ adapter->hw.mac.autoneg = DO_AUTO_NEG;
+ adapter->hw.phy.wait_for_link = FALSE;
+ adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ adapter->rx_buffer_len = 2048;
- adapter->hw.phy_init_script = 1;
- adapter->hw.phy_reset_disable = FALSE;
+ e1000_init_script_state_82541(&adapter->hw, TRUE);
+ e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
+
+ /* Copper options */
+ if (adapter->hw.media_type == e1000_media_type_copper) {
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+ adapter->hw.phy.disable_polarity_correction = FALSE;
+ adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
+ }
-#ifndef EM_MASTER_SLAVE
- adapter->hw.master_slave = em_ms_hw_default;
-#else
- adapter->hw.master_slave = EM_MASTER_SLAVE;
-#endif
/*
* Set the max frame size assuming standard ethernet
* sized frames.
*/
- adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ adapter->hw.mac.max_frame_size =
+ ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
- adapter->hw.min_frame_size = MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
+ adapter->hw.mac.min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
/*
* This controls when hardware reports transmit completion
* status.
*/
- adapter->hw.report_tx_early = 1;
- if (em_allocate_pci_resources(adapter)) {
- device_printf(dev, "Allocation of PCI resources failed\n");
- error = ENXIO;
- goto err_pci;
- }
-
- /* Initialize eeprom parameters */
- em_init_eeprom_params(&adapter->hw);
+ adapter->hw.mac.report_tx_early = 1;
- tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
+ tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
EM_DBA_ALIGN);
/* Allocate Transmit Descriptor ring */
@@ -521,9 +575,10 @@ em_attach(device_t dev)
error = ENOMEM;
goto err_tx_desc;
}
- adapter->tx_desc_base = (struct em_tx_desc *)adapter->txdma.dma_vaddr;
+ adapter->tx_desc_base =
+ (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
- rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
+ rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
EM_DBA_ALIGN);
/* Allocate Receive Descriptor ring */
@@ -532,7 +587,30 @@ em_attach(device_t dev)
error = ENOMEM;
goto err_rx_desc;
}
- adapter->rx_desc_base = (struct em_rx_desc *)adapter->rxdma.dma_vaddr;
+ adapter->rx_desc_base =
+ (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+ /*
+ ** Some PCI-E parts fail the first check due to
+ ** the link being in sleep state, call it again,
+ ** if it fails a second time its a real issue.
+ */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+ device_printf(dev,
+ "The EEPROM Checksum Is Not Valid\n");
+ error = EIO;
+ goto err_hw_init;
+ }
+ }
+
+ if (e1000_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
+ device_printf(dev, "EEPROM read error "
+ "reading part number\n");
+ error = EIO;
+ goto err_hw_init;
+ }
/* Initialize the hardware */
if (em_hardware_init(adapter)) {
@@ -542,64 +620,122 @@ em_attach(device_t dev)
}
/* Copy the permanent MAC address out of the EEPROM */
- if (em_read_mac_addr(&adapter->hw) < 0) {
+ if (e1000_read_mac_addr(&adapter->hw) < 0) {
device_printf(dev, "EEPROM read error while reading MAC"
" address\n");
error = EIO;
goto err_hw_init;
}
- if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
+ if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
device_printf(dev, "Invalid MAC address\n");
error = EIO;
goto err_hw_init;
}
- /* Allocate transmit descriptors and buffers */
- if (em_allocate_transmit_structures(adapter)) {
- device_printf(dev, "Could not setup transmit structures\n");
- error = ENOMEM;
- goto err_tx_struct;
- }
-
- /* Allocate receive descriptors and buffers */
- if (em_allocate_receive_structures(adapter)) {
- device_printf(dev, "Could not setup receive structures\n");
- error = ENOMEM;
- goto err_rx_struct;
- }
-
/* Setup OS specific network interface */
em_setup_interface(dev, adapter);
em_allocate_intr(adapter);
/* Initialize statistics */
- em_clear_hw_cntrs(&adapter->hw);
em_update_stats_counters(adapter);
- adapter->hw.get_link_status = 1;
+
+ adapter->hw.mac.get_link_status = 1;
em_update_link_status(adapter);
/* Indicate SOL/IDER usage */
- if (em_check_phy_reset_block(&adapter->hw))
+ if (e1000_check_reset_block(&adapter->hw))
device_printf(dev,
"PHY reset is blocked due to SOL/IDER session.\n");
- /* Identify 82544 on PCIX */
- em_get_bus_info(&adapter->hw);
- if(adapter->hw.bus_type == em_bus_type_pcix && adapter->hw.mac_type == em_82544)
+ /* Determine if we have to control management hardware */
+ adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
+
+ /*
+ * Setup Wake-on-Lan
+ */
+ switch (adapter->hw.mac.type) {
+
+ case e1000_82542:
+ case e1000_82543:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ case e1000_80003es2lan:
+ if (adapter->hw.bus.func == 1)
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ else
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ eeprom_data &= EM_EEPROM_APME;
+ break;
+ default:
+ /* APME bit in EEPROM is mapped to WUC.APME */
+ eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) &
+ E1000_WUC_APME;
+ break;
+ }
+ if (eeprom_data)
+ adapter->wol = E1000_WUFC_MAG;
+ /*
+ * We have the eeprom settings, now apply the special cases
+ * where the eeprom may be wrong or the board won't support
+ * wake on lan on a particular port
+ */
+ device_id = pci_get_device(dev);
+ switch (device_id) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+ E1000_STATUS_FUNC_1)
+ adapter->wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->wol = 0;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* Do we need workaround for 82544 PCI-X adapter? */
+ if (adapter->hw.bus.type == e1000_bus_type_pcix &&
+ adapter->hw.mac.type == e1000_82544)
adapter->pcix_82544 = TRUE;
else
adapter->pcix_82544 = FALSE;
+ /* Get control from any management/hw control */
+ if (((adapter->hw.mac.type != e1000_82573) &&
+ (adapter->hw.mac.type != e1000_ich8lan) &&
+ (adapter->hw.mac.type != e1000_ich9lan)) ||
+ !e1000_check_mng_mode(&adapter->hw))
+ em_get_hw_control(adapter);
+
+ /* Tell the stack that the interface is not active */
+ adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
INIT_DEBUGOUT("em_attach: end");
return (0);
-err_rx_struct:
- em_free_transmit_structures(adapter);
-err_tx_struct:
err_hw_init:
+ em_release_hw_control(adapter);
+ e1000_remove_device(&adapter->hw);
em_dma_free(adapter, &adapter->rxdma);
err_rx_desc:
em_dma_free(adapter, &adapter->txdma);
@@ -635,11 +771,25 @@ em_detach(device_t dev)
ether_poll_deregister(ifp);
#endif
+ em_disable_intr(adapter);
em_free_intr(adapter);
EM_LOCK(adapter);
adapter->in_detach = 1;
em_stop(adapter);
- em_phy_hw_reset(&adapter->hw);
+ e1000_phy_hw_reset(&adapter->hw);
+
+ em_release_manageability(adapter);
+ if (((adapter->hw.mac.type != e1000_82573) &&
+ (adapter->hw.mac.type != e1000_ich8lan) &&
+ (adapter->hw.mac.type != e1000_ich9lan)) ||
+ !e1000_check_mng_mode(&adapter->hw))
+ em_release_hw_control(adapter);
+ if (adapter->wol) {
+ E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+ E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+ em_enable_wakeup(dev);
+ }
+
EM_UNLOCK(adapter);
ether_ifdetach(adapter->ifp);
@@ -650,6 +800,7 @@ em_detach(device_t dev)
bus_generic_detach(dev);
if_free(ifp);
+ e1000_remove_device(&adapter->hw);
em_free_transmit_structures(adapter);
em_free_receive_structures(adapter);
@@ -679,11 +830,7 @@ em_detach(device_t dev)
static int
em_shutdown(device_t dev)
{
- struct adapter *adapter = device_get_softc(dev);
- EM_LOCK(adapter);
- em_stop(adapter);
- EM_UNLOCK(adapter);
- return (0);
+ return em_suspend(dev);
}
/*
@@ -696,6 +843,19 @@ em_suspend(device_t dev)
EM_LOCK(adapter);
em_stop(adapter);
+
+ em_release_manageability(adapter);
+ if (((adapter->hw.mac.type != e1000_82573) &&
+ (adapter->hw.mac.type != e1000_ich8lan) &&
+ (adapter->hw.mac.type != e1000_ich9lan)) ||
+ !e1000_check_mng_mode(&adapter->hw))
+ em_release_hw_control(adapter);
+ if (adapter->wol) {
+ E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+ E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+ em_enable_wakeup(dev);
+ }
+
EM_UNLOCK(adapter);
return bus_generic_suspend(dev);
@@ -709,9 +869,19 @@ em_resume(device_t dev)
EM_LOCK(adapter);
em_init_locked(adapter);
+
+ /* Get control from any management/hw control */
+ if (((adapter->hw.mac.type != e1000_82573) &&
+ (adapter->hw.mac.type != e1000_ich8lan) &&
+ (adapter->hw.mac.type != e1000_ich9lan)) ||
+ !e1000_check_mng_mode(&adapter->hw))
+ em_get_hw_control(adapter);
+ em_init_manageability(adapter);
+
if ((ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING))
em_start_locked(ifp);
+
EM_UNLOCK(adapter);
return bus_generic_resume(dev);
@@ -748,10 +918,13 @@ em_start_locked(struct ifnet *ifp)
if (m_head == NULL)
break;
/*
- * em_encap() can modify our pointer, and or make it NULL on
- * failure. In that event, we can't requeue.
+ * Encapsulation can modify our pointer, and or make it
+ * NULL on failure. In that event, we can't requeue.
+ *
+ * We now use a pointer to accomodate legacy and
+ * advanced transmit functions.
*/
- if (em_encap(adapter, &m_head)) {
+ if (adapter->em_xmit(adapter, &m_head)) {
if (m_head == NULL)
break;
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
@@ -760,7 +933,7 @@ em_start_locked(struct ifnet *ifp)
}
/* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
+ BPF_MTAP(ifp, m_head);
/* Set timeout in case hardware has problems transmitting. */
adapter->watchdog_timer = EM_TX_TIMEOUT;
@@ -827,25 +1000,27 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
EM_LOCK(adapter);
- switch (adapter->hw.mac_type) {
- case em_82573:
+ switch (adapter->hw.mac.type) {
+ case e1000_82573:
/*
* 82573 only supports jumbo frames
* if ASPM is disabled.
*/
- em_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
- &eeprom_data);
- if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_3GIO_3, 1, &eeprom_data);
+ if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
max_frame_size = ETHER_MAX_LEN;
break;
}
/* Allow Jumbo frames - fall thru */
- case em_82571:
- case em_82572:
- case em_80003es2lan: /* Limit Jumbo Frame size */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_ich9lan:
+ case e1000_82575:
+ case e1000_80003es2lan: /* Limit Jumbo Frame size */
max_frame_size = 9234;
break;
- case em_ich8lan:
+ case e1000_ich8lan:
/* ICH8 does not support jumbo frames */
max_frame_size = ETHER_MAX_LEN;
break;
@@ -860,14 +1035,15 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
ifp->if_mtu = ifr->ifr_mtu;
- adapter->hw.max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ adapter->hw.mac.max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
em_init_locked(adapter);
EM_UNLOCK(adapter);
break;
}
case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
+ IOCTL_DEBUGOUT("ioctl rcv'd:\
+ SIOCSIFFLAGS (Set Interface Flags)");
EM_LOCK(adapter);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
@@ -878,11 +1054,9 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
} else
em_init_locked(adapter);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ } else
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
em_stop(adapter);
- }
- }
adapter->if_flags = ifp->if_flags;
EM_UNLOCK(adapter);
break;
@@ -893,7 +1067,8 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
EM_LOCK(adapter);
em_disable_intr(adapter);
em_set_multi(adapter);
- if (adapter->hw.mac_type == em_82542_rev2_0) {
+ if (adapter->hw.mac.type == e1000_82542 &&
+ adapter->hw.revision_id == E1000_REVISION_2) {
em_initialize_receive_unit(adapter);
}
#ifdef DEVICE_POLLING
@@ -906,15 +1081,16 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
case SIOCSIFMEDIA:
/* Check SOL/IDER usage */
EM_LOCK(adapter);
- if (em_check_phy_reset_block(&adapter->hw)) {
+ if (e1000_check_reset_block(&adapter->hw)) {
EM_UNLOCK(adapter);
device_printf(adapter->dev, "Media change is"
- "blocked due to SOL/IDER session.\n");
+ " blocked due to SOL/IDER session.\n");
break;
}
EM_UNLOCK(adapter);
case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
+ IOCTL_DEBUGOUT("ioctl rcv'd: \
+ SIOCxIFMEDIA (Get/Set Interface Media)");
error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
break;
case SIOCSIFCAP:
@@ -986,26 +1162,26 @@ em_watchdog(struct adapter *adapter)
EM_LOCK_ASSERT(adapter);
/*
- * The timer is set to 5 every time em_start() queues a packet.
- * Then em_txeof() keeps resetting to 5 as long as it cleans at
- * least one descriptor.
- * Finally, anytime all descriptors are clean the timer is
- * set to 0.
- */
+ ** The timer is set to 5 every time start queues a packet.
+ ** Then txeof keeps resetting to 5 as long as it cleans at
+ ** least one descriptor.
+ ** Finally, anytime all descriptors are clean the timer is
+ ** set to 0.
+ */
if (adapter->watchdog_timer == 0 || --adapter->watchdog_timer)
return;
/* If we are in this routine because of pause frames, then
* don't reset the hardware.
*/
- if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
+ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+ E1000_STATUS_TXOFF) {
adapter->watchdog_timer = EM_TX_TIMEOUT;
return;
}
- if (em_check_for_link(&adapter->hw) == 0)
+ if (e1000_check_for_link(&adapter->hw) == 0)
device_printf(adapter->dev, "watchdog timeout -- resetting\n");
-
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
@@ -1047,42 +1223,49 @@ em_init_locked(struct adapter *adapter)
* Default allocation: PBA=30K for Rx, leaving 10K for Tx.
* Note: default does not leave enough room for Jumbo Frame >10k.
*/
- switch (adapter->hw.mac_type) {
- case em_82547:
- case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
- if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
+ switch (adapter->hw.mac.type) {
+ case e1000_82547:
+ case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
+ if (adapter->hw.mac.max_frame_size > 8192)
pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
else
pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
adapter->tx_fifo_head = 0;
adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
- adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
+ adapter->tx_fifo_size =
+ (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
break;
- case em_80003es2lan: /* 80003es2lan: Total Packet Buffer is 48K */
- case em_82571: /* 82571: Total Packet Buffer is 48K */
- case em_82572: /* 82572: Total Packet Buffer is 48K */
+ /* Total Packet Buffer on these is 48K */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82575:
+ case e1000_80003es2lan:
pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
break;
- case em_82573: /* 82573: Total Packet Buffer is 32K */
- /* Jumbo frames not supported */
+ case e1000_82573: /* 82573: Total Packet Buffer is 32K */
pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
break;
- case em_ich8lan:
+ case e1000_ich9lan:
+#define E1000_PBA_10K 0x000A
+ pba = E1000_PBA_10K;
+ break;
+ case e1000_ich8lan:
pba = E1000_PBA_8K;
break;
default:
/* Devices before 82547 had a Packet Buffer of 64K. */
- if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
+ if (adapter->hw.mac.max_frame_size > 8192)
pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
else
pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
}
INIT_DEBUGOUT1("em_init: pba=%dK",pba);
- E1000_WRITE_REG(&adapter->hw, PBA, pba);
+ E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
/* Get the latest mac address, User can use a LAA */
- bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr, ETHER_ADDR_LEN);
+ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
+ ETHER_ADDR_LEN);
/* Initialize the hardware */
if (em_hardware_init(adapter)) {
@@ -1094,16 +1277,24 @@ em_init_locked(struct adapter *adapter)
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
em_enable_vlans(adapter);
+ /* Set hardware offload abilities */
ifp->if_hwassist = 0;
- if (adapter->hw.mac_type >= em_82543) {
+ if (adapter->hw.mac.type >= e1000_82543) {
if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist = EM_CHECKSUM_FEATURES;
+ ifp->if_hwassist |= EM_CHECKSUM_FEATURES;
if (ifp->if_capenable & IFCAP_TSO)
ifp->if_hwassist |= EM_TCPSEG_FEATURES;
}
+ /* Configure for OS presence */
+ em_init_manageability(adapter);
+
/* Prepare transmit descriptors and buffers */
- em_setup_transmit_structures(adapter);
+ if (em_setup_transmit_structures(adapter)) {
+ device_printf(dev, "Could not setup transmit structures\n");
+ em_stop(adapter);
+ return;
+ }
em_initialize_transmit_unit(adapter);
/* Setup Multicast table */
@@ -1124,7 +1315,8 @@ em_init_locked(struct adapter *adapter)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
- em_clear_hw_cntrs(&adapter->hw);
+ e1000_clear_hw_cntrs_base_generic(&adapter->hw);
+
#ifdef DEVICE_POLLING
/*
* Only enable interrupts if we are not polling, make sure
@@ -1137,7 +1329,7 @@ em_init_locked(struct adapter *adapter)
em_enable_intr(adapter);
/* Don't reset the phy next time init gets called */
- adapter->hw.phy_reset_disable = TRUE;
+ adapter->hw.phy.reset_disable = TRUE;
}
static void
@@ -1154,7 +1346,7 @@ em_init(void *arg)
#ifdef DEVICE_POLLING
/*********************************************************************
*
- * Legacy polling routine
+ * Legacy polling routine
*
*********************************************************************/
static void
@@ -1170,13 +1362,14 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
}
if (cmd == POLL_AND_CHECK_STATUS) {
- reg_icr = E1000_READ_REG(&adapter->hw, ICR);
+ reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
callout_stop(&adapter->timer);
- adapter->hw.get_link_status = 1;
- em_check_for_link(&adapter->hw);
+ adapter->hw.mac.get_link_status = 1;
+ e1000_check_for_link(&adapter->hw);
em_update_link_status(adapter);
- callout_reset(&adapter->timer, hz, em_local_timer, adapter);
+ callout_reset(&adapter->timer, hz,
+ em_local_timer, adapter);
}
}
em_rxeof(adapter, count);
@@ -1189,9 +1382,10 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
/*********************************************************************
*
- * Legacy Interrupt Service routine
+ * Legacy Interrupt Service routine
*
*********************************************************************/
+
static void
em_intr(void *arg)
{
@@ -1200,7 +1394,6 @@ em_intr(void *arg)
uint32_t reg_icr;
EM_LOCK(adapter);
-
ifp = adapter->ifp;
if (ifp->if_capenable & IFCAP_POLLING) {
@@ -1209,9 +1402,10 @@ em_intr(void *arg)
}
for (;;) {
- reg_icr = E1000_READ_REG(&adapter->hw, ICR);
- if (adapter->hw.mac_type >= em_82571 &&
- (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
+ reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+ if (adapter->hw.mac.type >= e1000_82571 &&
+ (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
break;
else if (reg_icr == 0)
break;
@@ -1233,10 +1427,11 @@ em_intr(void *arg)
/* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
callout_stop(&adapter->timer);
- adapter->hw.get_link_status = 1;
- em_check_for_link(&adapter->hw);
+ adapter->hw.mac.get_link_status = 1;
+ e1000_check_for_link(&adapter->hw);
em_update_link_status(adapter);
- callout_reset(&adapter->timer, hz, em_local_timer, adapter);
+ callout_reset(&adapter->timer, hz,
+ em_local_timer, adapter);
}
if (reg_icr & E1000_ICR_RXO)
@@ -1246,11 +1441,10 @@ em_intr(void *arg)
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp);
-
EM_UNLOCK(adapter);
}
-#else /* if not DEVICE_POLLING, then fast interrupt routines only */
+#else /* if not DEVICE_POLLING, then fast interrupt routines only */
static void
em_handle_link(void *context, int pending)
@@ -1267,8 +1461,8 @@ em_handle_link(void *context, int pending)
}
callout_stop(&adapter->timer);
- adapter->hw.get_link_status = 1;
- em_check_for_link(&adapter->hw);
+ adapter->hw.mac.get_link_status = 1;
+ e1000_check_for_link(&adapter->hw);
em_update_link_status(adapter);
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
EM_UNLOCK(adapter);
@@ -1304,7 +1498,7 @@ em_handle_rxtx(void *context, int pending)
/*********************************************************************
*
- * Fast Interrupt Service routine
+ * Fast Interrupt Service routine
*
*********************************************************************/
static int
@@ -1316,7 +1510,7 @@ em_intr_fast(void *arg)
ifp = adapter->ifp;
- reg_icr = E1000_READ_REG(&adapter->hw, ICR);
+ reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
/* Hot eject? */
if (reg_icr == 0xffffffff)
@@ -1330,7 +1524,7 @@ em_intr_fast(void *arg)
* Starting with the 82571 chip, bit 31 should be used to
* determine whether the interrupt belongs to us.
*/
- if (adapter->hw.mac_type >= em_82571 &&
+ if (adapter->hw.mac.type >= e1000_82571 &&
(reg_icr & E1000_ICR_INT_ASSERTED) == 0)
return (FILTER_STRAY);
@@ -1364,11 +1558,12 @@ static void
em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct adapter *adapter = ifp->if_softc;
+ u_char fiber_type = IFM_1000_SX;
INIT_DEBUGOUT("em_media_status: begin");
EM_LOCK(adapter);
- em_check_for_link(&adapter->hw);
+ e1000_check_for_link(&adapter->hw);
em_update_link_status(adapter);
ifmr->ifm_status = IFM_AVALID;
@@ -1381,12 +1576,11 @@ em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
- if ((adapter->hw.media_type == em_media_type_fiber) ||
- (adapter->hw.media_type == em_media_type_internal_serdes)) {
- if (adapter->hw.mac_type == em_82545)
- ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
- else
- ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
+ if ((adapter->hw.media_type == e1000_media_type_fiber) ||
+ (adapter->hw.media_type == e1000_media_type_internal_serdes)) {
+ if (adapter->hw.mac.type == e1000_82545)
+ fiber_type = IFM_1000_LX;
+ ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (adapter->link_speed) {
case 10:
@@ -1429,30 +1623,30 @@ em_media_change(struct ifnet *ifp)
EM_LOCK(adapter);
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
- adapter->hw.autoneg = DO_AUTO_NEG;
- adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ adapter->hw.mac.autoneg = DO_AUTO_NEG;
+ adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
break;
case IFM_1000_LX:
case IFM_1000_SX:
case IFM_1000_T:
- adapter->hw.autoneg = DO_AUTO_NEG;
- adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ adapter->hw.mac.autoneg = DO_AUTO_NEG;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
- adapter->hw.autoneg = FALSE;
- adapter->hw.autoneg_advertised = 0;
+ adapter->hw.mac.autoneg = FALSE;
+ adapter->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
- adapter->hw.forced_speed_duplex = em_100_full;
+ adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
else
- adapter->hw.forced_speed_duplex = em_100_half;
+ adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
break;
case IFM_10_T:
- adapter->hw.autoneg = FALSE;
- adapter->hw.autoneg_advertised = 0;
+ adapter->hw.mac.autoneg = FALSE;
+ adapter->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
- adapter->hw.forced_speed_duplex = em_10_full;
+ adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
else
- adapter->hw.forced_speed_duplex = em_10_half;
+ adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
break;
default:
device_printf(adapter->dev, "Unsupported media type\n");
@@ -1461,7 +1655,7 @@ em_media_change(struct ifnet *ifp)
/* As the speed/duplex settings my have changed we need to
* reset the PHY.
*/
- adapter->hw.phy_reset_disable = FALSE;
+ adapter->hw.phy.reset_disable = FALSE;
em_init_locked(adapter);
EM_UNLOCK(adapter);
@@ -1475,6 +1669,7 @@ em_media_change(struct ifnet *ifp)
*
* return 0 on success, positive on failure
**********************************************************************/
+
static int
em_encap(struct adapter *adapter, struct mbuf **m_headp)
{
@@ -1482,45 +1677,45 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
bus_dma_segment_t segs[EM_MAX_SCATTER];
bus_dmamap_t map;
struct em_buffer *tx_buffer, *tx_buffer_mapped;
- struct em_tx_desc *current_tx_desc;
+ struct e1000_tx_desc *ctxd = NULL;
struct mbuf *m_head;
uint32_t txd_upper, txd_lower, txd_used, txd_saved;
int nsegs, i, j, first, last = 0;
int error, do_tso, tso_desc = 0;
m_head = *m_headp;
- current_tx_desc = NULL;
txd_upper = txd_lower = txd_used = txd_saved = 0;
do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
- /*
- * Force a cleanup if number of TX descriptors
- * available hits the threshold.
- */
+ /*
+ * Force a cleanup if number of TX descriptors
+ * available hits the threshold
+ */
if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
em_txeof(adapter);
- if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
+ /* Now do we at least have a minimal? */
+ if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
adapter->no_tx_desc_avail1++;
return (ENOBUFS);
}
}
+
/*
- * TSO workaround:
- * If an mbuf contains only the IP and TCP header we have
- * to pull 4 bytes of data into it.
+ * TSO workaround:
+ * If an mbuf is only header we need
+ * to pull 4 bytes of data into it.
*/
if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
m_head = m_pullup(m_head, M_TSO_LEN + 4);
*m_headp = m_head;
- if (m_head == NULL) {
+ if (m_head == NULL)
return (ENOBUFS);
- }
}
/*
- * Map the packet for DMA.
+ * Map the packet for DMA
*
* Capture the first descriptor index,
* this descriptor will have the index
@@ -1532,8 +1727,8 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
tx_buffer_mapped = tx_buffer;
map = tx_buffer->map;
- error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp, segs,
- &nsegs, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
/*
* There are two types of errors we can (try) to handle:
@@ -1549,7 +1744,6 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
m = m_defrag(*m_headp, M_DONTWAIT);
if (m == NULL) {
- /* Assume m_defrag(9) used only m_get(9). */
adapter->mbuf_alloc_failed++;
m_freem(*m_headp);
*m_headp = NULL;
@@ -1557,8 +1751,9 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
}
*m_headp = m;
- error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp,
- segs, &nsegs, BUS_DMA_NOWAIT);
+ /* Try it again */
+ error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == ENOMEM) {
adapter->no_tx_dma_setup++;
@@ -1591,17 +1786,17 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
adapter->tx_tso = FALSE;
}
- if (nsegs > adapter->num_tx_desc_avail - 2) {
- adapter->no_tx_desc_avail2++;
+ if (nsegs > (adapter->num_tx_desc_avail - 2)) {
+ adapter->no_tx_desc_avail2++;
bus_dmamap_unload(adapter->txtag, map);
return (ENOBUFS);
- }
+ }
m_head = *m_headp;
/* Do hardware assists */
- if (ifp->if_hwassist) {
- if (do_tso &&
- em_tso_setup(adapter, m_head, &txd_upper, &txd_lower)) {
+ if (ifp->if_hwassist > 0) {
+ if (do_tso && em_tso_setup(adapter, m_head,
+ &txd_upper, &txd_lower)) {
/* we need to make a final sentinel transmit desc */
tso_desc = TRUE;
} else
@@ -1610,17 +1805,17 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
}
i = adapter->next_avail_tx_desc;
- if (adapter->pcix_82544)
+ if (adapter->pcix_82544)
txd_saved = i;
+ /* Set up our transmit descriptors */
for (j = 0; j < nsegs; j++) {
bus_size_t seg_len;
bus_addr_t seg_addr;
- /* If adapter is 82544 and on PCIX bus. */
+ /* If adapter is 82544 and on PCIX bus */
if(adapter->pcix_82544) {
DESC_ARRAY desc_array;
uint32_t array_elements, counter;
-
/*
* Check the Address and Length combination and
* split the data accordingly
@@ -1635,24 +1830,24 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
return (ENOBUFS);
}
tx_buffer = &adapter->tx_buffer_area[i];
- current_tx_desc = &adapter->tx_desc_base[i];
- current_tx_desc->buffer_addr = htole64(
- desc_array.descriptor[counter].address);
- current_tx_desc->lower.data = htole32(
- (adapter->txd_cmd | txd_lower |
- (uint16_t)desc_array.descriptor[counter].length));
- current_tx_desc->upper.data = htole32((txd_upper));
+ ctxd = &adapter->tx_desc_base[i];
+ ctxd->buffer_addr = htole64(
+ desc_array.descriptor[counter].address);
+ ctxd->lower.data = htole32(
+ (adapter->txd_cmd | txd_lower | (uint16_t)
+ desc_array.descriptor[counter].length));
+ ctxd->upper.data =
+ htole32((txd_upper));
last = i;
if (++i == adapter->num_tx_desc)
- i = 0;
-
+ i = 0;
tx_buffer->m_head = NULL;
tx_buffer->next_eop = -1;
txd_used++;
- }
+ }
} else {
tx_buffer = &adapter->tx_buffer_area[i];
- current_tx_desc = &adapter->tx_desc_base[i];
+ ctxd = &adapter->tx_desc_base[i];
seg_addr = segs[j].ds_addr;
seg_len = segs[j].ds_len;
/*
@@ -1662,31 +1857,31 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
*/
if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
seg_len -= 4;
- current_tx_desc->buffer_addr = htole64(seg_addr);
- current_tx_desc->lower.data = htole32(
+ ctxd->buffer_addr = htole64(seg_addr);
+ ctxd->lower.data = htole32(
adapter->txd_cmd | txd_lower | seg_len);
- current_tx_desc->upper.data =
+ ctxd->upper.data =
htole32(txd_upper);
if (++i == adapter->num_tx_desc)
i = 0;
- /* Now make the sentinel */
+ /* Now make the sentinel */
++txd_used; /* using an extra txd */
- current_tx_desc = &adapter->tx_desc_base[i];
+ ctxd = &adapter->tx_desc_base[i];
tx_buffer = &adapter->tx_buffer_area[i];
- current_tx_desc->buffer_addr =
- htole64(seg_addr + seg_len);
- current_tx_desc->lower.data = htole32(
+ ctxd->buffer_addr =
+ htole64(seg_addr + seg_len);
+ ctxd->lower.data = htole32(
adapter->txd_cmd | txd_lower | 4);
- current_tx_desc->upper.data =
+ ctxd->upper.data =
htole32(txd_upper);
last = i;
if (++i == adapter->num_tx_desc)
i = 0;
} else {
- current_tx_desc->buffer_addr = htole64(seg_addr);
- current_tx_desc->lower.data = htole32(
+ ctxd->buffer_addr = seg_addr;
+ ctxd->lower.data = htole32(
adapter->txd_cmd | txd_lower | seg_len);
- current_tx_desc->upper.data =
+ ctxd->upper.data =
htole32(txd_upper);
last = i;
if (++i == adapter->num_tx_desc)
@@ -1708,24 +1903,23 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
if (m_head->m_flags & M_VLANTAG) {
/* Set the vlan id. */
- current_tx_desc->upper.fields.special =
+ ctxd->upper.fields.special =
htole16(m_head->m_pkthdr.ether_vtag);
+ /* Tell hardware to add tag */
+ ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
+ }
- /* Tell hardware to add tag. */
- current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
- }
-
- tx_buffer->m_head = m_head;
+ tx_buffer->m_head = m_head;
tx_buffer_mapped->map = tx_buffer->map;
tx_buffer->map = map;
- bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
- /*
- * Last Descriptor of Packet
+ /*
+ * Last Descriptor of Packet
* needs End Of Packet (EOP)
* and Report Status (RS)
- */
- current_tx_desc->lower.data |=
+ */
+ ctxd->lower.data |=
htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
/*
* Keep track in the first buffer which
@@ -1735,22 +1929,194 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
tx_buffer->next_eop = last;
/*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
+ * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
* that this frame is available to transmit.
*/
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- if (adapter->hw.mac_type == em_82547 &&
+ if (adapter->hw.mac.type == e1000_82547 &&
adapter->link_duplex == HALF_DUPLEX)
em_82547_move_tail(adapter);
else {
- E1000_WRITE_REG(&adapter->hw, TDT, i);
- if (adapter->hw.mac_type == em_82547)
- em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT, i);
+ if (adapter->hw.mac.type == e1000_82547)
+ em_82547_update_fifo_head(adapter,
+ m_head->m_pkthdr.len);
+ }
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * This routine maps the mbufs to Advanced TX descriptors.
+ * used by the 82575 adapter. It also needs no workarounds.
+ *
+ **********************************************************************/
+static int
+em_adv_encap(struct adapter *adapter, struct mbuf **m_headp)
+{
+ bus_dma_segment_t segs[EM_MAX_SCATTER];
+ bus_dmamap_t map;
+ struct em_buffer *tx_buffer, *tx_buffer_mapped;
+ union e1000_adv_tx_desc *txd = NULL;
+ struct mbuf *m_head;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ u32 do_tso, paylen = 0;
+ int nsegs, i, j, error, first, last = 0;
+
+ m_head = *m_headp;
+
+ do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
+
+ /* Set basic descriptor constants */
+ cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
+ cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+
+ /*
+ * Force a cleanup if number of TX descriptors
+ * available hits the threshold
+ */
+ if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
+ em_txeof(adapter);
+ /* Now do we at least have a minimal? */
+ if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
+ adapter->no_tx_desc_avail1++;
+ return (ENOBUFS);
+ }
}
+ /*
+ * Map the packet for DMA.
+ *
+ * Capture the first descriptor index,
+ * this descriptor will have the index
+ * of the EOP which is the only one that
+ * now gets a DONE bit writeback.
+ */
+ first = adapter->next_avail_tx_desc;
+ tx_buffer = &adapter->tx_buffer_area[first];
+ tx_buffer_mapped = tx_buffer;
+ map = tx_buffer->map;
+
+ error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == EFBIG) {
+ struct mbuf *m;
+
+ m = m_defrag(*m_headp, M_DONTWAIT);
+ if (m == NULL) {
+ adapter->mbuf_alloc_failed++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ *m_headp = m;
+
+ /* Try it again */
+ error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == ENOMEM) {
+ adapter->no_tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
+ adapter->no_tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+ } else if (error == ENOMEM) {
+ adapter->no_tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
+ adapter->no_tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+
+ /* Check again to be sure we have enough descriptors */
+ if (nsegs > (adapter->num_tx_desc_avail - 2)) {
+ adapter->no_tx_desc_avail2++;
+ bus_dmamap_unload(adapter->txtag, map);
+ return (ENOBUFS);
+ }
+ m_head = *m_headp;
+
+ /*
+ * Set up the context descriptor:
+ * used when any hardware offload is done.
+ * This includes CSUM, VLAN, and TSO. It
+ * will use the first descriptor.
+ */
+ if (m_head->m_pkthdr.csum_flags) {
+ /* All offloads set this */
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ /* First try TSO */
+ if ((do_tso) && em_tso_adv_setup(adapter, m_head, &paylen)) {
+ cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+ olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+ olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
+ } else /* Just checksum offload */
+ em_tx_adv_ctx_setup(adapter, m_head);
+ }
+
+ /* Set up our transmit descriptors */
+ i = adapter->next_avail_tx_desc;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seg_len;
+ bus_addr_t seg_addr;
+
+ tx_buffer = &adapter->tx_buffer_area[i];
+ txd = (union e1000_adv_tx_desc *)&adapter->tx_desc_base[i];
+ seg_addr = segs[j].ds_addr;
+ seg_len = segs[j].ds_len;
+
+ txd->read.buffer_addr = htole64(seg_addr);
+ txd->read.cmd_type_len = htole32(
+ adapter->txd_cmd | cmd_type_len | seg_len);
+ txd->read.olinfo_status = htole32(olinfo_status);
+ last = i;
+ if (++i == adapter->num_tx_desc)
+ i = 0;
+ tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
+ }
+
+ adapter->next_avail_tx_desc = i;
+ adapter->num_tx_desc_avail -= nsegs;
+
+ tx_buffer->m_head = m_head;
+ tx_buffer_mapped->map = tx_buffer->map;
+ tx_buffer->map = map;
+ bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
+
+ /*
+ * Last Descriptor of Packet
+ * needs End Of Packet (EOP)
+ * and Report Status (RS)
+ */
+ txd->read.cmd_type_len |=
+ htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
+ /*
+ * Keep track in the first buffer which
+ * descriptor will be written back
+ */
+ tx_buffer = &adapter->tx_buffer_area[first];
+ tx_buffer->next_eop = last;
+
+ /*
+ * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
+ * that this frame is available to transmit.
+ */
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT, i);
+
return (0);
+
}
/*********************************************************************
@@ -1767,20 +2133,20 @@ em_82547_move_tail(void *arg)
struct adapter *adapter = arg;
uint16_t hw_tdt;
uint16_t sw_tdt;
- struct em_tx_desc *tx_desc;
+ struct e1000_tx_desc *tx_desc;
uint16_t length = 0;
boolean_t eop = 0;
EM_LOCK_ASSERT(adapter);
- hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
+ hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT);
sw_tdt = adapter->next_avail_tx_desc;
while (hw_tdt != sw_tdt) {
tx_desc = &adapter->tx_desc_base[hw_tdt];
length += tx_desc->lower.flags.length;
eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
- if(++hw_tdt == adapter->num_tx_desc)
+ if (++hw_tdt == adapter->num_tx_desc)
hw_tdt = 0;
if (eop) {
@@ -1790,7 +2156,7 @@ em_82547_move_tail(void *arg)
em_82547_move_tail, adapter);
break;
}
- E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT, hw_tdt);
em_82547_update_fifo_head(adapter, length);
length = 0;
}
@@ -1833,26 +2199,33 @@ em_82547_update_fifo_head(struct adapter *adapter, int len)
static int
em_82547_tx_fifo_reset(struct adapter *adapter)
-{
+{
uint32_t tctl;
- if ((E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH)) &&
- (E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH)) &&
- (E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS))&&
- (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
-
+ if ((E1000_READ_REG(&adapter->hw, E1000_TDT) ==
+ E1000_READ_REG(&adapter->hw, E1000_TDH)) &&
+ (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
+ E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
+ (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
+ E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
+ (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
/* Disable TX unit */
- tctl = E1000_READ_REG(&adapter->hw, TCTL);
- E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
+ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+ E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+ tctl & ~E1000_TCTL_EN);
/* Reset FIFO pointers */
- E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
- E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
+ adapter->tx_head_addr);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
+ adapter->tx_head_addr);
/* Re-enable TX unit */
- E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
E1000_WRITE_FLUSH(&adapter->hw);
adapter->tx_fifo_head = 0;
@@ -1871,15 +2244,15 @@ em_set_promisc(struct adapter *adapter)
struct ifnet *ifp = adapter->ifp;
uint32_t reg_rctl;
- reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
if (ifp->if_flags & IFF_PROMISC) {
reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
} else if (ifp->if_flags & IFF_ALLMULTI) {
reg_rctl |= E1000_RCTL_MPE;
reg_rctl &= ~E1000_RCTL_UPE;
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
}
}
@@ -1888,11 +2261,11 @@ em_disable_promisc(struct adapter *adapter)
{
uint32_t reg_rctl;
- reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl &= (~E1000_RCTL_UPE);
reg_rctl &= (~E1000_RCTL_MPE);
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
}
@@ -1909,17 +2282,18 @@ em_set_multi(struct adapter *adapter)
struct ifnet *ifp = adapter->ifp;
struct ifmultiaddr *ifma;
uint32_t reg_rctl = 0;
- uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
+ uint8_t mta[512]; /* Largest MTS is 4096 bits */
int mcnt = 0;
IOCTL_DEBUGOUT("em_set_multi: begin");
- if (adapter->hw.mac_type == em_82542_rev2_0) {
- reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
- if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
- em_pci_clear_mwi(&adapter->hw);
+ if (adapter->hw.mac.type == e1000_82542 &&
+ adapter->hw.revision_id == E1000_REVISION_2) {
+ reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+ if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_clear_mwi(&adapter->hw);
reg_rctl |= E1000_RCTL_RST;
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
msec_delay(5);
}
@@ -1932,25 +2306,27 @@ em_set_multi(struct adapter *adapter)
break;
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
- &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
+ &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
mcnt++;
}
IF_ADDR_UNLOCK(ifp);
if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
- reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl |= E1000_RCTL_MPE;
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
} else
- em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
+ e1000_mc_addr_list_update(&adapter->hw, mta,
+ mcnt, 1, adapter->hw.mac.rar_entry_count);
- if (adapter->hw.mac_type == em_82542_rev2_0) {
- reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ if (adapter->hw.mac.type == e1000_82542 &&
+ adapter->hw.revision_id == E1000_REVISION_2) {
+ reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
reg_rctl &= ~E1000_RCTL_RST;
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
msec_delay(5);
- if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
- em_pci_set_mwi(&adapter->hw);
+ if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(&adapter->hw);
}
}
@@ -1970,7 +2346,7 @@ em_local_timer(void *arg)
EM_LOCK_ASSERT(adapter);
- em_check_for_link(&adapter->hw);
+ e1000_check_for_link(&adapter->hw);
em_update_link_status(adapter);
em_update_stats_counters(adapter);
if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
@@ -1983,6 +2359,7 @@ em_local_timer(void *arg)
em_watchdog(adapter);
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
+
}
static void
@@ -1991,19 +2368,22 @@ em_update_link_status(struct adapter *adapter)
struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
- if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
+ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+ E1000_STATUS_LU) {
if (adapter->link_active == 0) {
- em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
- &adapter->link_duplex);
- /* Check if we may set SPEED_MODE bit on PCI-E */
- if ((adapter->link_speed == SPEED_1000) &&
- ((adapter->hw.mac_type == em_82571) ||
- (adapter->hw.mac_type == em_82572))) {
+ e1000_get_speed_and_duplex(&adapter->hw,
+ &adapter->link_speed, &adapter->link_duplex);
+ /* Check if we must disable SPEED_MODE bit on PCI-E */
+ if ((adapter->link_speed != SPEED_1000) &&
+ ((adapter->hw.mac.type == e1000_82571) ||
+ (adapter->hw.mac.type == e1000_82572))) {
int tarc0;
- tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
- tarc0 |= SPEED_MODE_BIT;
- E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
+ tarc0 = E1000_READ_REG(&adapter->hw,
+ E1000_TARC0);
+ tarc0 &= ~SPEED_MODE_BIT;
+ E1000_WRITE_REG(&adapter->hw,
+ E1000_TARC0, tarc0);
}
if (bootverbose)
device_printf(dev, "Link is up %d Mbps %s\n",
@@ -2045,16 +2425,21 @@ em_stop(void *arg)
INIT_DEBUGOUT("em_stop: begin");
em_disable_intr(adapter);
- em_reset_hw(&adapter->hw);
callout_stop(&adapter->timer);
callout_stop(&adapter->tx_fifo_timer);
+ em_free_transmit_structures(adapter);
+ em_free_receive_structures(adapter);
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ e1000_reset_hw(&adapter->hw);
+ if (adapter->hw.mac.type >= e1000_82544)
+ E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
}
-/********************************************************************
+/*********************************************************************
*
* Determine hardware revision.
*
@@ -2065,24 +2450,30 @@ em_identify_hardware(struct adapter *adapter)
device_t dev = adapter->dev;
/* Make sure our PCI config space has the necessary stuff set */
- pci_enable_busmaster(dev);
- pci_enable_io(dev, SYS_RES_MEMORY);
- adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ if ((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) == 0 &&
+ (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN)) {
+ device_printf(dev, "Memory Access and/or Bus Master bits "
+ "were not set!\n");
+ adapter->hw.bus.pci_cmd_word |=
+ (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
+ pci_write_config(dev, PCIR_COMMAND,
+ adapter->hw.bus.pci_cmd_word, 2);
+ }
/* Save off the information about this board */
adapter->hw.vendor_id = pci_get_vendor(dev);
adapter->hw.device_id = pci_get_device(dev);
- adapter->hw.revision_id = pci_get_revid(dev);
- adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
- adapter->hw.subsystem_id = pci_get_subdevice(dev);
-
- /* Identify the MAC */
- if (em_set_mac_type(&adapter->hw))
- device_printf(dev, "Unknown MAC Type\n");
-
- if(adapter->hw.mac_type == em_82541 || adapter->hw.mac_type == em_82541_rev_2 ||
- adapter->hw.mac_type == em_82547 || adapter->hw.mac_type == em_82547_rev_2)
- adapter->hw.phy_init_script = TRUE;
+ adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ adapter->hw.subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ /* Do Shared Code Init and Setup */
+ if (e1000_set_mac_type(&adapter->hw)) {
+ device_printf(dev, "Setup init failure\n");
+ return;
+ }
}
static int
@@ -2099,11 +2490,13 @@ em_allocate_pci_resources(struct adapter *adapter)
return (ENXIO);
}
adapter->osdep.mem_bus_space_tag =
- rman_get_bustag(adapter->res_memory);
- adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory);
+ rman_get_bustag(adapter->res_memory);
+ adapter->osdep.mem_bus_space_handle =
+ rman_get_bushandle(adapter->res_memory);
adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
- if (adapter->hw.mac_type > em_82543) {
+ /* Only older adapters use IO mapping */
+ if (adapter->hw.mac.type <= e1000_82543) {
/* Figure our where our IO BAR is ? */
for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
val = pci_read_config(dev, rid, 4);
@@ -2120,38 +2513,47 @@ em_allocate_pci_resources(struct adapter *adapter)
device_printf(dev, "Unable to locate IO BAR\n");
return (ENXIO);
}
- adapter->res_ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
- &adapter->io_rid, RF_ACTIVE);
+ adapter->res_ioport = bus_alloc_resource_any(dev,
+ SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
if (adapter->res_ioport == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"ioport\n");
return (ENXIO);
}
adapter->hw.io_base = 0;
- adapter->osdep.io_bus_space_tag = rman_get_bustag(adapter->res_ioport);
+ adapter->osdep.io_bus_space_tag =
+ rman_get_bustag(adapter->res_ioport);
adapter->osdep.io_bus_space_handle =
rman_get_bushandle(adapter->res_ioport);
}
- /* For ICH8 we need to find the flash memory. */
- if (adapter->hw.mac_type == em_ich8lan) {
- rid = EM_FLASH;
-
- adapter->flash_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
- adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash_mem);
- adapter->osdep.flash_bus_space_handle =
- rman_get_bushandle(adapter->flash_mem);
- }
-
- val = pci_msi_count(dev);
- if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
- rid = 1;
- adapter->msi = 1;
- } else
- rid = 0;
- adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
+ /*
+ * Setup MSI/X or MSI if PCI Express
+ * only the latest can use MSI/X and
+ * real support for it is forthcoming
+ */
+ adapter->msi = 0; /* Set defaults */
+ rid = 0x0;
+ if (adapter->hw.mac.type >= e1000_82575) {
+ /*
+ * Eventually this will be used
+ * for Multiqueue, for now we will
+ * just use one vector.
+ */
+ val = pci_msix_count(dev);
+ if ((val) && pci_alloc_msix(dev, &val) == 0) {
+ rid = 1;
+ adapter->msi = 1;
+ }
+ } else if (adapter->hw.bus.type == e1000_bus_type_pci_express) {
+ val = pci_msi_count(dev);
+ if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
+ rid = 1;
+ adapter->msi = 1;
+ }
+ }
+ adapter->res_interrupt = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (adapter->res_interrupt == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"interrupt\n");
@@ -2163,6 +2565,11 @@ em_allocate_pci_resources(struct adapter *adapter)
return (0);
}
+/*********************************************************************
+ *
+ * Setup the appropriate Interrupt handlers.
+ *
+ **********************************************************************/
int
em_allocate_intr(struct adapter *adapter)
{
@@ -2170,15 +2577,18 @@ em_allocate_intr(struct adapter *adapter)
int error;
/* Manually turn off all interrupts */
- E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
#ifdef DEVICE_POLLING
- if (adapter->int_handler_tag == NULL && (error = bus_setup_intr(dev,
- adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr,
- adapter, &adapter->int_handler_tag)) != 0) {
+ /* We do Legacy setup */
+ if (adapter->int_handler_tag == NULL &&
+ (error = bus_setup_intr(dev, adapter->res_interrupt,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
+ &adapter->int_handler_tag)) != 0) {
device_printf(dev, "Failed to register interrupt handler");
return (error);
}
+
#else
/*
* Try allocating a fast interrupt and the associated deferred
@@ -2199,7 +2609,7 @@ em_allocate_intr(struct adapter *adapter)
adapter->tq = NULL;
return (error);
}
-#endif
+#endif
em_enable_intr(adapter);
return (0);
@@ -2210,8 +2620,9 @@ em_free_intr(struct adapter *adapter)
{
device_t dev = adapter->dev;
- if (adapter->int_handler_tag != NULL) {
- bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag);
+ if (adapter->res_interrupt != NULL) {
+ bus_teardown_intr(dev, adapter->res_interrupt,
+ adapter->int_handler_tag);
adapter->int_handler_tag = NULL;
}
if (adapter->tq != NULL) {
@@ -2228,31 +2639,29 @@ em_free_pci_resources(struct adapter *adapter)
device_t dev = adapter->dev;
if (adapter->res_interrupt != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, adapter->msi ? 1 : 0,
- adapter->res_interrupt);
+ bus_release_resource(dev, SYS_RES_IRQ,
+ 0, adapter->res_interrupt);
if (adapter->msi)
pci_release_msi(dev);
if (adapter->res_memory != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
- adapter->res_memory);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), adapter->res_memory);
if (adapter->flash_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH,
- adapter->flash_mem);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ EM_FLASH, adapter->flash_mem);
if (adapter->res_ioport != NULL)
- bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
- adapter->res_ioport);
+ bus_release_resource(dev, SYS_RES_IOPORT,
+ adapter->io_rid, adapter->res_ioport);
}
/*********************************************************************
*
- * Initialize the hardware to a configuration as specified by the
- * adapter structure. The controller is reset, the EEPROM is
- * verified, the MAC address is set, then the shared initialization
- * routines are called.
+ * Initialize the hardware to a configuration
+ * as specified by the adapter structure.
*
**********************************************************************/
static int
@@ -2262,33 +2671,24 @@ em_hardware_init(struct adapter *adapter)
uint16_t rx_buffer_size;
INIT_DEBUGOUT("em_hardware_init: begin");
+
/* Issue a global reset */
- em_reset_hw(&adapter->hw);
+ e1000_reset_hw(&adapter->hw);
/* When hardware is reset, fifo_head is also reset */
adapter->tx_fifo_head = 0;
- /* Make sure we have a good EEPROM before we read from it */
- if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
- device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
- return (EIO);
- }
-
- if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
- device_printf(dev, "EEPROM read error while reading part "
- "number\n");
- return (EIO);
- }
-
/* Set up smart power down as default off on newer adapters. */
- if (!em_smart_pwr_down &&
- (adapter->hw.mac_type == em_82571 || adapter->hw.mac_type == em_82572)) {
+ if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
+ adapter->hw.mac.type == e1000_82572)) {
uint16_t phy_tmp = 0;
/* Speed up time to link by disabling smart power down. */
- em_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
+ e1000_read_phy_reg(&adapter->hw,
+ IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
phy_tmp &= ~IGP02E1000_PM_SPD;
- em_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
+ e1000_write_phy_reg(&adapter->hw,
+ IGP02E1000_PHY_POWER_MGMT, phy_tmp);
}
/*
@@ -2305,24 +2705,25 @@ em_hardware_init(struct adapter *adapter)
* by 1500.
* - The pause time is fairly large at 1000 x 512ns = 512 usec.
*/
- rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10 );
-
- adapter->hw.fc_high_water = rx_buffer_size -
- roundup2(adapter->hw.max_frame_size, 1024);
- adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
- if (adapter->hw.mac_type == em_80003es2lan)
- adapter->hw.fc_pause_time = 0xFFFF;
+ rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
+ 0xffff) << 10 );
+
+ adapter->hw.mac.fc_high_water = rx_buffer_size -
+ roundup2(adapter->hw.mac.max_frame_size, 1024);
+ adapter->hw.mac.fc_low_water = adapter->hw.mac.fc_high_water - 1500;
+ if (adapter->hw.mac.type == e1000_80003es2lan)
+ adapter->hw.mac.fc_pause_time = 0xFFFF;
else
- adapter->hw.fc_pause_time = 0x1000;
- adapter->hw.fc_send_xon = TRUE;
- adapter->hw.fc = E1000_FC_FULL;
+ adapter->hw.mac.fc_pause_time = EM_FC_PAUSE_TIME;
+ adapter->hw.mac.fc_send_xon = TRUE;
+ adapter->hw.mac.fc = e1000_fc_full;
- if (em_init_hw(&adapter->hw) < 0) {
- device_printf(dev, "Hardware Initialization Failed");
+ if (e1000_init_hw(&adapter->hw) < 0) {
+ device_printf(dev, "Hardware Initialization Failed\n");
return (EIO);
}
- em_check_for_link(&adapter->hw);
+ e1000_check_for_link(&adapter->hw);
return (0);
}
@@ -2336,6 +2737,7 @@ static void
em_setup_interface(device_t dev, struct adapter *adapter)
{
struct ifnet *ifp;
+
INIT_DEBUGOUT("em_setup_interface: begin");
ifp = adapter->ifp = if_alloc(IFT_ETHER);
@@ -2352,18 +2754,18 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
IFQ_SET_READY(&ifp->if_snd);
- ether_ifattach(ifp, adapter->hw.mac_addr);
+ ether_ifattach(ifp, adapter->hw.mac.addr);
ifp->if_capabilities = ifp->if_capenable = 0;
- if (adapter->hw.mac_type >= em_82543) {
+ if (adapter->hw.mac.type >= e1000_82543) {
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
}
/* Enable TSO if available */
- if ((adapter->hw.mac_type > em_82544) &&
- (adapter->hw.mac_type != em_82547)) {
+ if ((adapter->hw.mac.type > e1000_82544) &&
+ (adapter->hw.mac.type != e1000_82547)) {
ifp->if_capabilities |= IFCAP_TSO4;
ifp->if_capenable |= IFCAP_TSO4;
}
@@ -2373,7 +2775,7 @@ em_setup_interface(device_t dev, struct adapter *adapter)
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ ifp->if_capenable |= IFCAP_VLAN_MTU;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
@@ -2383,16 +2785,16 @@ em_setup_interface(device_t dev, struct adapter *adapter)
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
- ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
- em_media_status);
- if ((adapter->hw.media_type == em_media_type_fiber) ||
- (adapter->hw.media_type == em_media_type_internal_serdes)) {
- u_char fiber_type = IFM_1000_SX; /* default type; */
+ ifmedia_init(&adapter->media, IFM_IMASK,
+ em_media_change, em_media_status);
+ if ((adapter->hw.media_type == e1000_media_type_fiber) ||
+ (adapter->hw.media_type == e1000_media_type_internal_serdes)) {
+ u_char fiber_type = IFM_1000_SX; /* default type */
- if (adapter->hw.mac_type == em_82545)
+ if (adapter->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
- ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
- 0, NULL);
+ ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
+ 0, NULL);
ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
} else {
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
@@ -2402,7 +2804,7 @@ em_setup_interface(device_t dev, struct adapter *adapter)
0, NULL);
ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
0, NULL);
- if (adapter->hw.phy_type != em_phy_ife) {
+ if (adapter->hw.phy.type != e1000_phy_ife) {
ifmedia_add(&adapter->media,
IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
ifmedia_add(&adapter->media,
@@ -2424,32 +2826,32 @@ em_smartspeed(struct adapter *adapter)
{
uint16_t phy_tmp;
- if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
- adapter->hw.autoneg == 0 ||
- (adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
+ if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
+ adapter->hw.mac.autoneg == 0 ||
+ (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
return;
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
* we assume back-to-back */
- em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
return;
- em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
- em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
if(phy_tmp & CR_1000T_MS_ENABLE) {
phy_tmp &= ~CR_1000T_MS_ENABLE;
- em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+ e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
phy_tmp);
adapter->smartspeed++;
- if(adapter->hw.autoneg &&
- !em_phy_setup_autoneg(&adapter->hw) &&
- !em_read_phy_reg(&adapter->hw, PHY_CTRL,
+ if(adapter->hw.mac.autoneg &&
+ !e1000_phy_setup_autoneg(&adapter->hw) &&
+ !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL,
&phy_tmp)) {
phy_tmp |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
- em_write_phy_reg(&adapter->hw, PHY_CTRL,
+ e1000_write_phy_reg(&adapter->hw, PHY_CONTROL,
phy_tmp);
}
}
@@ -2457,15 +2859,15 @@ em_smartspeed(struct adapter *adapter)
return;
} else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
/* If still no link, perhaps using 2/3 pair cable */
- em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
+ e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
phy_tmp |= CR_1000T_MS_ENABLE;
- em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
- if(adapter->hw.autoneg &&
- !em_phy_setup_autoneg(&adapter->hw) &&
- !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
+ e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
+ if(adapter->hw.mac.autoneg &&
+ !e1000_phy_setup_autoneg(&adapter->hw) &&
+ !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
phy_tmp |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
- em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
+ e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
}
}
/* Restart process after EM_SMARTSPEED_MAX iterations */
@@ -2486,8 +2888,8 @@ em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
}
static int
-em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma,
- int mapflags)
+em_dma_malloc(struct adapter *adapter, bus_size_t size,
+ struct em_dma_alloc *dma, int mapflags)
{
int error;
@@ -2504,7 +2906,8 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma
NULL, /* lockarg */
&dma->dma_tag);
if (error) {
- device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n",
+ device_printf(adapter->dev,
+ "%s: bus_dma_tag_create failed: %d\n",
__func__, error);
goto fail_0;
}
@@ -2512,7 +2915,8 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma
error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
BUS_DMA_NOWAIT, &dma->dma_map);
if (error) {
- device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
+ device_printf(adapter->dev,
+ "%s: bus_dmamem_alloc(%ju) failed: %d\n",
__func__, (uintmax_t)size, error);
goto fail_2;
}
@@ -2521,7 +2925,8 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma
error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
if (error || dma->dma_paddr == 0) {
- device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n",
+ device_printf(adapter->dev,
+ "%s: bus_dmamap_load failed: %d\n",
__func__, error);
goto fail_3;
}
@@ -2567,30 +2972,43 @@ static int
em_allocate_transmit_structures(struct adapter *adapter)
{
device_t dev = adapter->dev;
+
+ adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
+ adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (adapter->tx_buffer_area == NULL) {
+ device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ return (ENOMEM);
+ }
+
+ bzero(adapter->tx_buffer_area,
+ (sizeof(struct em_buffer)) * adapter->num_tx_desc);
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Initialize transmit structures.
+ *
+ **********************************************************************/
+static int
+em_setup_transmit_structures(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
struct em_buffer *tx_buffer;
- bus_size_t size, segsize;
int error, i;
/*
- * Setup DMA descriptor areas.
+ * Create DMA tags for tx descriptors
*/
- segsize = size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
-
- /* Overrides for TSO - want large sizes */
- if ((adapter->hw.mac_type > em_82544) &&
- (adapter->hw.mac_type != em_82547)) {
- size = EM_TSO_SIZE;
- segsize = EM_TSO_PCIE_SEGMENT_SIZE;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- size, /* maxsize */
+ EM_TSO_SIZE, /* maxsize */
EM_MAX_SCATTER, /* nsegments */
- segsize, /* maxsegsize */
+ EM_TSO_SEG_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
@@ -2599,14 +3017,14 @@ em_allocate_transmit_structures(struct adapter *adapter)
goto fail;
}
- adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (adapter->tx_buffer_area == NULL) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
+ if ((error = em_allocate_transmit_structures(adapter)) != 0)
goto fail;
- }
+ /* Clear the old ring contents */
+ bzero(adapter->tx_desc_base,
+ (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
+
+ /* Create the descriptor buffer dma maps */
tx_buffer = adapter->tx_buffer_area;
for (i = 0; i < adapter->num_tx_desc; i++) {
error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
@@ -2614,51 +3032,24 @@ em_allocate_transmit_structures(struct adapter *adapter)
device_printf(dev, "Unable to create TX DMA map\n");
goto fail;
}
+ tx_buffer->next_eop = -1;
tx_buffer++;
}
- return (0);
-
-fail:
- em_free_transmit_structures(adapter);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize transmit structures.
- *
- **********************************************************************/
-static void
-em_setup_transmit_structures(struct adapter *adapter)
-{
- struct em_buffer *tx_buffer;
- int i;
-
- bzero(adapter->tx_desc_base, (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
-
adapter->next_avail_tx_desc = 0;
adapter->next_tx_to_clean = 0;
- /* Free any existing tx buffers. */
- tx_buffer = adapter->tx_buffer_area;
- for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(adapter->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(adapter->txtag, tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- }
- }
-
/* Set number of descriptors available */
adapter->num_tx_desc_avail = adapter->num_tx_desc;
- /* Set checksum context */
- adapter->active_checksum_context = OFFLOAD_NONE;
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ return (0);
+
+fail:
+ em_free_transmit_structures(adapter);
+ return (error);
}
/*********************************************************************
@@ -2669,72 +3060,92 @@ em_setup_transmit_structures(struct adapter *adapter)
static void
em_initialize_transmit_unit(struct adapter *adapter)
{
- uint32_t reg_tctl;
- uint32_t reg_tipg = 0;
+ uint32_t tctl, tarc, tipg = 0;
uint64_t bus_addr;
INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
/* Setup the Base and Length of the Tx Descriptor Ring */
bus_addr = adapter->txdma.dma_paddr;
- E1000_WRITE_REG(&adapter->hw, TDLEN,
- adapter->num_tx_desc * sizeof(struct em_tx_desc));
- E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
- E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDLEN,
+ adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
+ E1000_WRITE_REG(&adapter->hw, E1000_TDBAH, (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(&adapter->hw, E1000_TDBAL, (uint32_t)bus_addr);
/* Setup the HW Tx Head and Tail descriptor pointers */
- E1000_WRITE_REG(&adapter->hw, TDT, 0);
- E1000_WRITE_REG(&adapter->hw, TDH, 0);
-
+ E1000_WRITE_REG(&adapter->hw, E1000_TDT, 0);
+ E1000_WRITE_REG(&adapter->hw, E1000_TDH, 0);
- HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, TDBAL),
- E1000_READ_REG(&adapter->hw, TDLEN));
+ HW_DEBUGOUT2("Base = %x, Length = %x\n",
+ E1000_READ_REG(&adapter->hw, E1000_TDBAL),
+ E1000_READ_REG(&adapter->hw, E1000_TDLEN));
/* Set the default values for the Tx Inter Packet Gap timer */
- switch (adapter->hw.mac_type) {
- case em_82542_rev2_0:
- case em_82542_rev2_1:
- reg_tipg = DEFAULT_82542_TIPG_IPGT;
- reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
- reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+ switch (adapter->hw.mac.type) {
+ case e1000_82542:
+ tipg = DEFAULT_82542_TIPG_IPGT;
+ tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
break;
- case em_80003es2lan:
- reg_tipg = DEFAULT_82543_TIPG_IPGR1;
- reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
+ case e1000_80003es2lan:
+ tipg = DEFAULT_82543_TIPG_IPGR1;
+ tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
E1000_TIPG_IPGR2_SHIFT;
break;
default:
- if ((adapter->hw.media_type == em_media_type_fiber) ||
- (adapter->hw.media_type == em_media_type_internal_serdes))
- reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+ if ((adapter->hw.media_type == e1000_media_type_fiber) ||
+ (adapter->hw.media_type ==
+ e1000_media_type_internal_serdes))
+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
else
- reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
- reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
- reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+ tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
}
- E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
- E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
- if(adapter->hw.mac_type >= em_82540)
- E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay.value);
+ E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
+ E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
+ if(adapter->hw.mac.type >= e1000_82540)
+ E1000_WRITE_REG(&adapter->hw, E1000_TADV,
+ adapter->tx_abs_int_delay.value);
+
+ if ((adapter->hw.mac.type == e1000_82571) ||
+ (adapter->hw.mac.type == e1000_82572)) {
+ tarc = E1000_READ_REG(&adapter->hw, E1000_TARC0);
+ tarc |= SPEED_MODE_BIT;
+ E1000_WRITE_REG(&adapter->hw, E1000_TARC0, tarc);
+ } else if (adapter->hw.mac.type == e1000_80003es2lan) {
+ tarc = E1000_READ_REG(&adapter->hw, E1000_TARC0);
+ tarc |= 1;
+ E1000_WRITE_REG(&adapter->hw, E1000_TARC0, tarc);
+ tarc = E1000_READ_REG(&adapter->hw, E1000_TARC1);
+ tarc |= 1;
+ E1000_WRITE_REG(&adapter->hw, E1000_TARC1, tarc);
+ }
/* Program the Transmit Control Register */
- reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
+ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl = E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
- if (adapter->hw.mac_type >= em_82571)
- reg_tctl |= E1000_TCTL_MULR;
- if (adapter->link_duplex == FULL_DUPLEX) {
- reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
- } else {
- reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
- }
+
+ if (adapter->hw.mac.type >= e1000_82571)
+ tctl |= E1000_TCTL_MULR;
+
/* This write will effectively turn on the transmit unit. */
- E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
- /* Setup Transmit Descriptor Base Settings */
+ /* Setup Transmit Descriptor Base Settings */
adapter->txd_cmd = E1000_TXD_CMD_IFCS;
- if (adapter->tx_int_delay.value > 0)
+ if ((adapter->tx_int_delay.value > 0) &&
+ (adapter->hw.mac.type != e1000_82575))
adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ /* Set the function pointer for the transmit routine */
+ if (adapter->hw.mac.type >= e1000_82575)
+ adapter->em_xmit = em_adv_encap;
+ else
+ adapter->em_xmit = em_encap;
}
/*********************************************************************
@@ -2783,15 +3194,15 @@ em_free_transmit_structures(struct adapter *adapter)
/*********************************************************************
*
* The offload context needs to be set when we transfer the first
- * packet of a particular protocol (TCP/UDP). We change the
- * context only if the protocol type changes.
+ * packet of a particular protocol (TCP/UDP). This routine has been
+ * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
*
**********************************************************************/
static void
em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
uint32_t *txd_upper, uint32_t *txd_lower)
{
- struct em_context_desc *TXD;
+ struct e1000_context_desc *TXD;
struct em_buffer *tx_buffer;
struct ether_vlan_header *eh;
struct ip *ip;
@@ -2805,7 +3216,7 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
/* Setup checksum offload context. */
curr_txd = adapter->next_avail_tx_desc;
tx_buffer = &adapter->tx_buffer_area[curr_txd];
- TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
+ TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
*txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
E1000_TXD_DTYP_D; /* Data descr */
@@ -2932,7 +3343,7 @@ static boolean_t
em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
uint32_t *txd_lower)
{
- struct em_context_desc *TXD;
+ struct e1000_context_desc *TXD;
struct em_buffer *tx_buffer;
struct ether_vlan_header *eh;
struct ip *ip;
@@ -2948,7 +3359,7 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
* in true failure cases as well. Should do -1 (failure), 0 (no)
* and 1 (success).
*/
- if (mp->m_pkthdr.len <= E1000_TX_BUFFER_SIZE)
+ if (mp->m_pkthdr.len <= EM_TX_BUFFER_SIZE)
return FALSE; /* 0 */
/*
@@ -3030,7 +3441,7 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
curr_txd = adapter->next_avail_tx_desc;
tx_buffer = &adapter->tx_buffer_area[curr_txd];
- TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
+ TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
/* IPv6 doesn't have a header checksum. */
if (!isip6) {
@@ -3070,6 +3481,7 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
(mp->m_pkthdr.len - (hdr_len))); /* Total len */
tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
if (++curr_txd == adapter->num_tx_desc)
curr_txd = 0;
@@ -3081,6 +3493,200 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
return TRUE;
}
+
+/**********************************************************************
+ *
+ * Setup work for hardware segmentation offload (TSO) on
+ * adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+static boolean_t
+em_tso_adv_setup(struct adapter *adapter, struct mbuf *mp, u32 *paylen)
+{
+ struct e1000_adv_tx_context_desc *TXD;
+ struct em_buffer *tx_buffer;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ u32 mss_l4len_idx = 0;
+ u16 vtag = 0;
+ int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct tcphdr *th;
+
+ if (mp->m_pkthdr.len <= EM_TX_BUFFER_SIZE)
+ return FALSE;
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ else
+ ehdrlen = ETHER_HDR_LEN;
+
+ /* Ensure we have at least the IP+TCP header in the first mbuf. */
+ if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
+ return FALSE;
+
+ /* Only supports IPV4 for now */
+ ctxd = adapter->next_avail_tx_desc;
+ tx_buffer = &adapter->tx_buffer_area[ctxd];
+ TXD = (struct e1000_adv_tx_context_desc *) &adapter->tx_desc_base[ctxd];
+
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ if (ip->ip_p != IPPROTO_TCP)
+ return FALSE; /* 0 */
+ ip->ip_len = 0;
+ ip->ip_sum = 0;
+ ip_hlen = ip->ip_hl << 2;
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ tcp_hlen = th->th_off << 2;
+ hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+ /* Calculate payload, this is used in the transmit desc in encap */
+ *paylen = mp->m_pkthdr.len - hdrlen;
+
+ /* VLAN MACLEN IPLEN */
+ if (mp->m_flags & M_VLANTAG) {
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
+ }
+ vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT);
+ vlan_macip_lens |= ip_hlen;
+ TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+
+ /* ADV DTYPE TUCMD */
+ type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+ TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+ TXD->seqnum_seed = htole32(0);
+ tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
+
+ if (++ctxd == adapter->num_tx_desc)
+ ctxd = 0;
+
+ adapter->num_tx_desc_avail--;
+ adapter->next_avail_tx_desc = ctxd;
+ return TRUE;
+}
+
+
+/*********************************************************************
+ *
+ * Advanced Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static void
+em_tx_adv_ctx_setup(struct adapter *adapter, struct mbuf *mp)
+{
+ struct e1000_adv_tx_context_desc *TXD;
+ struct em_buffer *tx_buffer;
+ uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ int ehdrlen, ip_hlen;
+ u16 etype;
+ u8 ipproto;
+
+ int ctxd = adapter->next_avail_tx_desc;
+ u16 vtag = 0;
+
+ tx_buffer = &adapter->tx_buffer_area[ctxd];
+ TXD = (struct e1000_adv_tx_context_desc *) &adapter->tx_desc_base[ctxd];
+
+ /*
+ ** In advanced descriptors the vlan tag must
+ ** be placed into the descriptor itself.
+ */
+ if (mp->m_flags & M_VLANTAG) {
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
+ }
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present,
+ * helpful for QinQ too.
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ ehdrlen = ETHER_HDR_LEN;
+ }
+
+ /* Set the ether header length */
+ vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+ if (mp->m_len < ehdrlen + ip_hlen)
+ return; /* failure */
+ ipproto = ip->ip_p;
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ ip_hlen = sizeof(struct ip6_hdr);
+ if (mp->m_len < ehdrlen + ip_hlen)
+ return; /* failure */
+ ipproto = ip6->ip6_nxt;
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
+ break;
+ default:
+ return;
+ }
+
+ vlan_macip_lens |= ip_hlen;
+ type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_UDP:
+ if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ }
+
+ /* Now copy bits into descriptor */
+ TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+ TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+ TXD->seqnum_seed = htole32(0);
+ TXD->mss_l4len_idx = htole32(0);
+
+ tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
+
+ /* We've consumed the first desc, adjust counters */
+ if (++ctxd == adapter->num_tx_desc)
+ ctxd = 0;
+ adapter->next_avail_tx_desc = ctxd;
+ --adapter->num_tx_desc_avail;
+
+ return;
+}
+
+
/**********************************************************************
*
* Examine each tx_buffer in the used queue. If the hardware is done
@@ -3091,88 +3697,95 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
static void
em_txeof(struct adapter *adapter)
{
- int first, last, done, num_avail;
- struct em_buffer *tx_buffer;
- struct em_tx_desc *tx_desc, *eop_desc;
+ int first, last, done, num_avail;
+ struct em_buffer *tx_buffer;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
EM_LOCK_ASSERT(adapter);
- if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
- return;
+ if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
+ return;
- num_avail = adapter->num_tx_desc_avail;
- first = adapter->next_tx_to_clean;
- tx_desc = &adapter->tx_desc_base[first];
- tx_buffer = &adapter->tx_buffer_area[first];
+ num_avail = adapter->num_tx_desc_avail;
+ first = adapter->next_tx_to_clean;
+ tx_desc = &adapter->tx_desc_base[first];
+ tx_buffer = &adapter->tx_buffer_area[first];
last = tx_buffer->next_eop;
- eop_desc = &adapter->tx_desc_base[last];
+ eop_desc = &adapter->tx_desc_base[last];
/*
- * Now calculate the terminating index
- * for the cleanup loop below.
+ * What this does is get the index of the
+ * first descriptor AFTER the EOP of the
+ * first packet, that way we can do the
+ * simple comparison on the inner while loop.
*/
if (++last == adapter->num_tx_desc)
- last = 0;
+ last = 0;
done = last;
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
/* We clean the range of the packet */
while (first != done) {
- tx_desc->upper.data = 0;
- tx_desc->lower.data = 0;
- num_avail++;
+ tx_desc->upper.data = 0;
+ tx_desc->lower.data = 0;
+ tx_desc->buffer_addr = 0;
+ num_avail++;
if (tx_buffer->m_head) {
ifp->if_opackets++;
- bus_dmamap_sync(adapter->txtag, tx_buffer->map,
+ bus_dmamap_sync(adapter->txtag,
+ tx_buffer->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->txtag,
tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- }
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
+ }
tx_buffer->next_eop = -1;
- if (++first == adapter->num_tx_desc)
+ if (++first == adapter->num_tx_desc)
first = 0;
- tx_buffer = &adapter->tx_buffer_area[first];
+ tx_buffer = &adapter->tx_buffer_area[first];
tx_desc = &adapter->tx_desc_base[first];
}
/* See if we can continue to the next packet */
last = tx_buffer->next_eop;
if (last != -1) {
- eop_desc = &adapter->tx_desc_base[last];
+ eop_desc = &adapter->tx_desc_base[last];
/* Get new done point */
- if (++last == adapter->num_tx_desc)
- last = 0;
+ if (++last == adapter->num_tx_desc) last = 0;
done = last;
} else
break;
- }
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- adapter->next_tx_to_clean = first;
-
- /*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
- * that it is OK to send packets.
- * If there are no pending descriptors, clear the timeout. Otherwise,
- * if some descriptors have been freed, restart the timeout.
- */
- if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (num_avail == adapter->num_tx_desc)
+ }
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ adapter->next_tx_to_clean = first;
+
+ /*
+ * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
+ * that it is OK to send packets.
+ * If there are no pending descriptors, clear the timeout. Otherwise,
+ * if some descriptors have been freed, restart the timeout.
+ */
+ if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ /* All clean, turn off the timer */
+ if (num_avail == adapter->num_tx_desc)
adapter->watchdog_timer = 0;
- else if (num_avail != adapter->num_tx_desc_avail)
+ /* Some cleaned, reset the timer */
+ else if (num_avail != adapter->num_tx_desc_avail)
adapter->watchdog_timer = EM_TX_TIMEOUT;
- }
- adapter->num_tx_desc_avail = num_avail;
+ }
+ adapter->num_tx_desc_avail = num_avail;
+ return;
}
/*********************************************************************
@@ -3195,19 +3808,21 @@ em_get_buf(struct adapter *adapter, int i)
return (ENOBUFS);
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
- if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+
+ if (adapter->hw.mac.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
m_adj(m, ETHER_ALIGN);
/*
* Using memory from the mbuf cluster pool, invoke the
* bus_dma machinery to arrange the memory mapping.
*/
- error = bus_dmamap_load_mbuf_sg(adapter->rxtag, adapter->rx_sparemap,
- m, segs, &nsegs, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
+ adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
m_free(m);
return (error);
}
+
/* If nsegs is wrong then the stack is corrupt. */
KASSERT(nsegs == 1, ("Too many segments returned!"));
@@ -3222,7 +3837,6 @@ em_get_buf(struct adapter *adapter, int i)
rx_buffer->m_head = m;
adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
-
return (0);
}
@@ -3241,23 +3855,24 @@ em_allocate_receive_structures(struct adapter *adapter)
struct em_buffer *rx_buffer;
int i, error;
- adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) * adapter->num_rx_desc,
- M_DEVBUF, M_NOWAIT);
+ adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
+ adapter->num_rx_desc, M_DEVBUF, M_NOWAIT);
if (adapter->rx_buffer_area == NULL) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
return (ENOMEM);
}
- bzero(adapter->rx_buffer_area, sizeof(struct em_buffer) * adapter->num_rx_desc);
+ bzero(adapter->rx_buffer_area,
+ sizeof(struct em_buffer) * adapter->num_rx_desc);
- error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MCLBYTES, /* maxsize */
+ MCLBYTES, /* maxsize */
1, /* nsegments */
- MCLBYTES, /* maxsegsize */
+ MCLBYTES, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
@@ -3268,13 +3883,15 @@ em_allocate_receive_structures(struct adapter *adapter)
goto fail;
}
+ /* Create the spare map (used by getbuf) */
error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
- &adapter->rx_sparemap);
+ &adapter->rx_sparemap);
if (error) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
__func__, error);
goto fail;
}
+
rx_buffer = adapter->rx_buffer_area;
for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
@@ -3286,6 +3903,15 @@ em_allocate_receive_structures(struct adapter *adapter)
}
}
+ /* Setup the initial buffers */
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ error = em_get_buf(adapter, i);
+ if (error)
+ goto fail;
+ }
+ bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
return (0);
fail:
@@ -3301,34 +3927,16 @@ fail:
static int
em_setup_receive_structures(struct adapter *adapter)
{
- struct em_buffer *rx_buffer;
- int i, error;
+ int error;
- bzero(adapter->rx_desc_base, (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
+ bzero(adapter->rx_desc_base,
+ (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
- /* Free current RX buffers. */
- rx_buffer = adapter->rx_buffer_area;
- for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
- if (rx_buffer->m_head != NULL) {
- bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
- m_freem(rx_buffer->m_head);
- rx_buffer->m_head = NULL;
- }
- }
-
- /* Allocate new ones. */
- for (i = 0; i < adapter->num_rx_desc; i++) {
- error = em_get_buf(adapter, i);
- if (error)
- return (error);
- }
+ if ((error = em_allocate_receive_structures(adapter)) !=0)
+ return (error);
/* Setup our descriptor pointers */
adapter->next_rx_desc_to_check = 0;
- bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
@@ -3352,72 +3960,92 @@ em_initialize_receive_unit(struct adapter *adapter)
* Make sure receives are disabled while setting
* up the descriptor ring
*/
- E1000_WRITE_REG(&adapter->hw, RCTL, 0);
-
- /* Set the Receive Delay Timer Register */
- E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay.value | E1000_RDT_FPDB);
-
- if(adapter->hw.mac_type >= em_82540) {
- E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay.value);
+ reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl & ~E1000_RCTL_EN);
+ if(adapter->hw.mac.type >= e1000_82540) {
+ E1000_WRITE_REG(&adapter->hw, E1000_RADV,
+ adapter->rx_abs_int_delay.value);
/*
* Set the interrupt throttling rate. Value is calculated
* as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
*/
#define MAX_INTS_PER_SEC 8000
#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
- E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR);
+ E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
}
/* Setup the Base and Length of the Rx Descriptor Ring */
bus_addr = adapter->rxdma.dma_paddr;
- E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
- sizeof(struct em_rx_desc));
- E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
- E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
+ E1000_WRITE_REG(&adapter->hw, E1000_RDLEN, adapter->num_rx_desc *
+ sizeof(struct e1000_rx_desc));
+ E1000_WRITE_REG(&adapter->hw, E1000_RDBAH, (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(&adapter->hw, E1000_RDBAL, (uint32_t)bus_addr);
/* Setup the Receive Control Register */
- reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ reg_rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ reg_rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
- if (adapter->hw.tbi_compatibility_on == TRUE)
+ if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
reg_rctl |= E1000_RCTL_SBP;
-
+ else
+ reg_rctl &= ~E1000_RCTL_SBP;
switch (adapter->rx_buffer_len) {
default:
- case EM_RXBUFFER_2048:
+ case 2048:
reg_rctl |= E1000_RCTL_SZ_2048;
break;
- case EM_RXBUFFER_4096:
- reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ case 4096:
+ reg_rctl |= E1000_RCTL_SZ_4096 |
+ E1000_RCTL_BSEX | E1000_RCTL_LPE;
break;
- case EM_RXBUFFER_8192:
- reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ case 8192:
+ reg_rctl |= E1000_RCTL_SZ_8192 |
+ E1000_RCTL_BSEX | E1000_RCTL_LPE;
break;
- case EM_RXBUFFER_16384:
- reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ case 16384:
+ reg_rctl |= E1000_RCTL_SZ_16384 |
+ E1000_RCTL_BSEX | E1000_RCTL_LPE;
break;
}
if (ifp->if_mtu > ETHERMTU)
reg_rctl |= E1000_RCTL_LPE;
+ else
+ reg_rctl &= ~E1000_RCTL_LPE;
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
- if ((adapter->hw.mac_type >= em_82543) &&
+ if ((adapter->hw.mac.type >= e1000_82543) &&
(ifp->if_capenable & IFCAP_RXCSUM)) {
- reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
+ reg_rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
- E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
+ E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, reg_rxcsum);
}
+ /*
+ ** XXX TEMPORARY WORKAROUND: on some systems with 82573
+ ** long latencies are observed, like Lenovo X60. This
+ ** change eliminates the problem, but since having positive
+ ** values in RDTR is a known source of problems on other
+ ** platforms another solution is being sought.
+ */
+ if (adapter->hw.mac.type == e1000_82573)
+ E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
+
/* Enable Receives */
- E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- E1000_WRITE_REG(&adapter->hw, RDH, 0);
- E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
+ /*
+ * Setup the HW Rx Head and
+ * Tail Descriptor Pointers
+ */
+ E1000_WRITE_REG(&adapter->hw, E1000_RDH, 0);
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT, adapter->num_rx_desc - 1);
+
+ return;
}
/*********************************************************************
@@ -3437,6 +4065,8 @@ em_free_receive_structures(struct adapter *adapter)
bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
adapter->rx_sparemap = NULL;
}
+
+ /* Cleanup any existing buffers */
if (adapter->rx_buffer_area != NULL) {
rx_buffer = adapter->rx_buffer_area;
for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
@@ -3457,10 +4087,12 @@ em_free_receive_structures(struct adapter *adapter)
}
}
}
+
if (adapter->rx_buffer_area != NULL) {
free(adapter->rx_buffer_area, M_DEVBUF);
adapter->rx_buffer_area = NULL;
}
+
if (adapter->rxtag != NULL) {
bus_dma_tag_destroy(adapter->rxtag);
adapter->rxtag = NULL;
@@ -3488,7 +4120,7 @@ em_rxeof(struct adapter *adapter, int count)
int i;
/* Pointer to the receive descriptor being examined. */
- struct em_rx_desc *current_desc;
+ struct e1000_rx_desc *current_desc;
uint8_t status;
ifp = adapter->ifp;
@@ -3540,9 +4172,9 @@ em_rxeof(struct adapter *adapter, int count)
last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
if (TBI_ACCEPT(&adapter->hw, status,
current_desc->errors, pkt_len, last_byte)) {
- em_tbi_adjust_stats(&adapter->hw,
+ e1000_tbi_adjust_stats_82543(&adapter->hw,
&adapter->stats, pkt_len,
- adapter->hw.mac_addr);
+ adapter->hw.mac.addr);
if (len > 0)
len--;
} else
@@ -3586,7 +4218,7 @@ em_rxeof(struct adapter *adapter, int count)
em_receive_checksum(adapter, current_desc,
adapter->fmp);
#ifndef __NO_STRICT_ALIGNMENT
- if (adapter->hw.max_frame_size >
+ if (adapter->hw.mac.max_frame_size >
(MCLBYTES - ETHER_ALIGN) &&
em_fixup_rx(adapter) != 0)
goto skip;
@@ -3612,7 +4244,8 @@ discard:
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
- if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ if (adapter->hw.mac.max_frame_size <=
+ (MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN);
if (adapter->fmp != NULL) {
m_freem(adapter->fmp);
@@ -3637,6 +4270,7 @@ discard:
(*ifp->if_input)(ifp, m);
EM_LOCK(adapter);
#else
+ /* Already running unlocked */
(*ifp->if_input)(ifp, m);
#endif
i = adapter->next_rx_desc_to_check;
@@ -3648,7 +4282,7 @@ discard:
/* Advance the E1000's Receive Queue #0 "Tail Pointer". */
if (--i < 0)
i = adapter->num_rx_desc - 1;
- E1000_WRITE_REG(&adapter->hw, RDT, i);
+ E1000_WRITE_REG(&adapter->hw, E1000_RDT, i);
if (!((current_desc->status) & E1000_RXD_STAT_DD))
return (0);
@@ -3692,12 +4326,10 @@ em_fixup_rx(struct adapter *adapter)
n->m_next = m;
adapter->fmp = n;
} else {
- adapter->ifp->if_iqdrops++;
- adapter->mbuf_alloc_failed++;
+ adapter->dropped_pkts++;
m_freem(adapter->fmp);
adapter->fmp = NULL;
- adapter->lmp = NULL;
- error = ENOBUFS;
+ error = ENOMEM;
}
}
@@ -3713,11 +4345,11 @@ em_fixup_rx(struct adapter *adapter)
*
*********************************************************************/
static void
-em_receive_checksum(struct adapter *adapter, struct em_rx_desc *rx_desc,
- struct mbuf *mp)
+em_receive_checksum(struct adapter *adapter,
+ struct e1000_rx_desc *rx_desc, struct mbuf *mp)
{
/* 82543 or newer only */
- if ((adapter->hw.mac_type < em_82543) ||
+ if ((adapter->hw.mac.type < e1000_82543) ||
/* Ignore Checksum bit is set */
(rx_desc->status & E1000_RXD_STAT_IXSM)) {
mp->m_pkthdr.csum_flags = 0;
@@ -3752,37 +4384,142 @@ em_enable_vlans(struct adapter *adapter)
{
uint32_t ctrl;
- E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
+ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
+ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
}
static void
em_enable_intr(struct adapter *adapter)
{
- E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
+ E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+ (IMS_ENABLE_MASK));
}
static void
em_disable_intr(struct adapter *adapter)
{
- /*
- * The first version of 82542 had an errata where when link was forced
- * it would stay up even up even if the cable was disconnected.
- * Sequence errors were used to detect the disconnect and then the
- * driver would unforce the link. This code in the in the ISR. For this
- * to work correctly the Sequence error interrupt had to be enabled
- * all the time.
- */
+ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
+}
- if (adapter->hw.mac_type == em_82542_rev2_0)
- E1000_WRITE_REG(&adapter->hw, IMC,
- (0xffffffff & ~E1000_IMC_RXSEQ));
- else
- E1000_WRITE_REG(&adapter->hw, IMC,
- 0xffffffff);
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features
+ */
+static void
+em_init_manageability(struct adapter *adapter)
+{
+ /* A shared code workaround */
+#define E1000_82542_MANC2H E1000_MANC2H
+ if (adapter->has_manage) {
+ int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
+ int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ if (adapter->hw.mac.type >= e1000_82571) {
+ manc |= E1000_MANC_EN_MNG2HOST;
+#define E1000_MNG2HOST_PORT_623 (1 << 5)
+#define E1000_MNG2HOST_PORT_664 (1 << 6)
+ manc2h |= E1000_MNG2HOST_PORT_623;
+ manc2h |= E1000_MNG2HOST_PORT_664;
+ E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
+ }
+
+ E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+ }
+}
+
+/*
+ * Give control back to hardware management
+ * controller if there is one.
+ */
+static void
+em_release_manageability(struct adapter *adapter)
+{
+ if (adapter->has_manage) {
+ int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+ /* re-enable hardware interception of ARP */
+ manc |= E1000_MANC_ARP_EN;
+
+ if (adapter->hw.mac.type >= e1000_82571)
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+ }
+}
+
+/*
+ * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ */
+static void
+em_get_hw_control(struct adapter *adapter)
+{
+ u32 ctrl_ext, swsm;
+
+ /* Let firmware know the driver has taken over */
+ switch (adapter->hw.mac.type) {
+ case e1000_82573:
+ swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
+ E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
+ swsm | E1000_SWSM_DRV_LOAD);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ */
+static void
+em_release_hw_control(struct adapter *adapter)
+{
+ u32 ctrl_ext, swsm;
+
+ /* Let firmware taken over control of h/w */
+ switch (adapter->hw.mac.type) {
+ case e1000_82573:
+ swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
+ E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
+ swsm & ~E1000_SWSM_DRV_LOAD);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ default:
+ break;
+
+ }
}
static int
@@ -3797,55 +4534,110 @@ em_is_valid_ether_addr(uint8_t *addr)
return (TRUE);
}
+/*
+ * NOTE: the following routines using the e1000
+ * naming style are provided to the shared
+ * code which expects that rather than 'em'
+ */
+
void
-em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
+e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
{
- pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
+ pci_write_config(((struct e1000_osdep *)hw->back)->dev, reg, *value, 2);
}
void
-em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
+e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
{
- *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
+ *value = pci_read_config(((struct e1000_osdep *)hw->back)->dev, reg, 2);
}
void
-em_pci_set_mwi(struct em_hw *hw)
+e1000_pci_set_mwi(struct e1000_hw *hw)
{
- pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
- (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
+ pci_write_config(((struct e1000_osdep *)hw->back)->dev, PCIR_COMMAND,
+ (hw->bus.pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
}
void
-em_pci_clear_mwi(struct em_hw *hw)
+e1000_pci_clear_mwi(struct e1000_hw *hw)
{
- pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
- (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
+ pci_write_config(((struct e1000_osdep *)hw->back)->dev, PCIR_COMMAND,
+ (hw->bus.pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
}
+/*
+ * Read the PCI Express capabilities
+ */
int32_t
-em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
{
- int32_t rc;
- uint16_t pectl;
- device_t dev;
+ int32_t error = E1000_SUCCESS;
+ uint16_t cap_off;
+
+ switch (hw->mac.type) {
+
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ cap_off = 0xE0;
+ e1000_read_pci_cfg(hw, cap_off + reg, value);
+ break;
+ default:
+ error = ~E1000_NOT_IMPLEMENTED;
+ break;
+ }
- dev = ((struct em_osdep *)hw->back)->dev;
+ return (error);
+}
- /* find the PCIe link width and set max read request to 4KB*/
- if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
- em_read_pci_cfg(hw, reg + 0x12, value);
+int32_t
+e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, uint32_t size)
+{
+ int32_t error = 0;
- em_read_pci_cfg(hw, reg + 0x8, &pectl);
- pectl = (pectl & ~0x7000) | (5 << 12);
- em_write_pci_cfg(hw, reg + 0x8, &pectl);
- rc = 0;
- } else
- rc = -1;
+ hw->dev_spec = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (hw->dev_spec == NULL)
+ error = ENOMEM;
+ bzero(hw->dev_spec, size);
- return (rc);
+ return (error);
+}
+
+void
+e1000_free_dev_spec_struct(struct e1000_hw *hw)
+{
+ if (hw->dev_spec != NULL)
+ free(hw->dev_spec, M_DEVBUF);
+ return;
}
+/*
+ * Enable PCI Wake On Lan capability
+ */
+void
+em_enable_wakeup(device_t dev)
+{
+ u16 cap, status;
+ u8 id;
+
+ /* First find the capabilities pointer*/
+ cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
+ /* Read the PM Capabilities */
+ id = pci_read_config(dev, cap, 1);
+ if (id != PCIY_PMG) /* Something wrong */
+ return;
+ /* OK, we have the power capabilities, so
+ now get the status register */
+ cap += PCIR_POWER_STATUS;
+ status = pci_read_config(dev, cap, 2);
+ status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(dev, cap, status, 2);
+ return;
+}
+
+
/*********************************************************************
* 82544 Coexistence issue workaround.
* There are 2 issues.
@@ -3861,9 +4653,10 @@ em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
*
*
* WORKAROUND:
-* Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
+* Make sure we do not have ending address
+* as 1,2,3,4(Hang) or 9,a,b,c (DAC)
*
-*** *********************************************************************/
+*************************************************************************/
static uint32_t
em_fill_descriptors (bus_addr_t address, uint32_t length,
PDESC_ARRAY desc_array)
@@ -3877,7 +4670,8 @@ em_fill_descriptors (bus_addr_t address, uint32_t length,
desc_array->elements = 1;
return (desc_array->elements);
}
- safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
+ safe_terminator = (uint32_t)((((uint32_t)address & 0x7) +
+ (length & 0xF)) & 0xF);
/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
if (safe_terminator == 0 ||
(safe_terminator > 4 &&
@@ -3908,87 +4702,94 @@ em_update_stats_counters(struct adapter *adapter)
{
struct ifnet *ifp;
- if(adapter->hw.media_type == em_media_type_copper ||
- (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
- adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
- adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
+ if(adapter->hw.media_type == e1000_media_type_copper ||
+ (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
+ adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
}
- adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
- adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
- adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
- adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
-
- adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
- adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
- adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
- adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
- adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
- adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
- adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
- adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
- adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
- adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
- adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
- adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
- adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
- adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
- adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
- adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
- adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
- adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
- adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
- adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
+ adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
+ adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
+ adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
+ adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
+
+ adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
+ adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
+ adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
+ adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
+ adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
+ adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
+ adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
+ adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
+ adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
+ adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
+ adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
+ adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
+ adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
+ adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
+ adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
- adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
- adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
- adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
- adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
-
- adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
- adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
- adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
- adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
- adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
-
- adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
- adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
- adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
- adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
-
- adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
- adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
- adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
- adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
- adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
- adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
- adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
- adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
- adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
- adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
-
- if (adapter->hw.mac_type >= em_82543) {
- adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, ALGNERRC);
- adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, RXERRC);
- adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, TNCRS);
- adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, CEXTERR);
- adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, TSCTC);
- adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, TSCTFC);
+ adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, E1000_GORCL);
+ adapter->stats.gorch += E1000_READ_REG(&adapter->hw, E1000_GORCH);
+ adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, E1000_GOTCL);
+ adapter->stats.gotch += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
+
+ adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
+ adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
+ adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
+ adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
+ adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
+
+ adapter->stats.torl += E1000_READ_REG(&adapter->hw, E1000_TORL);
+ adapter->stats.torh += E1000_READ_REG(&adapter->hw, E1000_TORH);
+ adapter->stats.totl += E1000_READ_REG(&adapter->hw, E1000_TOTL);
+ adapter->stats.toth += E1000_READ_REG(&adapter->hw, E1000_TOTH);
+
+ adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
+ adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
+ adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
+ adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
+ adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
+
+ if (adapter->hw.mac.type >= e1000_82543) {
+ adapter->stats.algnerrc +=
+ E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
+ adapter->stats.rxerrc +=
+ E1000_READ_REG(&adapter->hw, E1000_RXERRC);
+ adapter->stats.tncrs +=
+ E1000_READ_REG(&adapter->hw, E1000_TNCRS);
+ adapter->stats.cexterr +=
+ E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
+ adapter->stats.tsctc +=
+ E1000_READ_REG(&adapter->hw, E1000_TSCTC);
+ adapter->stats.tsctfc +=
+ E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
}
ifp = adapter->ifp;
ifp->if_collisions = adapter->stats.colc;
/* Rx Errors */
- ifp->if_ierrors = adapter->stats.rxerrc + adapter->stats.crcerrs +
- adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc +
+ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
adapter->stats.mpc + adapter->stats.cexterr;
/* Tx Errors */
- ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
- adapter->watchdog_events;
+ ifp->if_oerrors = adapter->stats.ecol +
+ adapter->stats.latecol + adapter->watchdog_events;
}
@@ -4007,26 +4808,29 @@ em_print_debug_info(struct adapter *adapter)
device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
- E1000_READ_REG(&adapter->hw, CTRL),
- E1000_READ_REG(&adapter->hw, RCTL));
+ E1000_READ_REG(&adapter->hw, E1000_CTRL),
+ E1000_READ_REG(&adapter->hw, E1000_RCTL));
device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
- ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),\
- (E1000_READ_REG(&adapter->hw, PBA) & 0xffff) );
+ ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
+ (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
device_printf(dev, "Flow control watermarks high = %d low = %d\n",
- adapter->hw.fc_high_water,
- adapter->hw.fc_low_water);
+ adapter->hw.mac.fc_high_water,
+ adapter->hw.mac.fc_low_water);
device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
- E1000_READ_REG(&adapter->hw, TIDV),
- E1000_READ_REG(&adapter->hw, TADV));
+ E1000_READ_REG(&adapter->hw, E1000_TIDV),
+ E1000_READ_REG(&adapter->hw, E1000_TADV));
device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
- E1000_READ_REG(&adapter->hw, RDTR),
- E1000_READ_REG(&adapter->hw, RADV));
+ E1000_READ_REG(&adapter->hw, E1000_RDTR),
+ E1000_READ_REG(&adapter->hw, E1000_RADV));
device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
(long long)adapter->tx_fifo_wrk_cnt,
(long long)adapter->tx_fifo_reset_cnt);
device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
- E1000_READ_REG(&adapter->hw, TDH),
- E1000_READ_REG(&adapter->hw, TDT));
+ E1000_READ_REG(&adapter->hw, E1000_TDH),
+ E1000_READ_REG(&adapter->hw, E1000_TDT));
+ device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
+ E1000_READ_REG(&adapter->hw, E1000_RDH),
+ E1000_READ_REG(&adapter->hw, E1000_RDT));
device_printf(dev, "Num Tx descriptors avail = %d\n",
adapter->num_tx_desc_avail);
device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
@@ -4037,6 +4841,10 @@ em_print_debug_info(struct adapter *adapter)
adapter->mbuf_alloc_failed);
device_printf(dev, "Std mbuf cluster failed = %ld\n",
adapter->mbuf_cluster_failed);
+ device_printf(dev, "Driver dropped packets = %ld\n",
+ adapter->dropped_pkts);
+ device_printf(dev, "Driver tx dma failure in encap = %ld\n",
+ adapter->no_tx_dma_setup);
}
static void
@@ -4046,13 +4854,16 @@ em_print_hw_stats(struct adapter *adapter)
device_printf(dev, "Excessive collisions = %lld\n",
(long long)adapter->stats.ecol);
+#if (DEBUG_HW > 0) /* Dont output these errors normally */
device_printf(dev, "Symbol errors = %lld\n",
(long long)adapter->stats.symerrs);
+#endif
device_printf(dev, "Sequence errors = %lld\n",
(long long)adapter->stats.sec);
- device_printf(dev, "Defer count = %lld\n", (long long)adapter->stats.dc);
-
- device_printf(dev, "Missed Packets = %lld\n", (long long)adapter->stats.mpc);
+ device_printf(dev, "Defer count = %lld\n",
+ (long long)adapter->stats.dc);
+ device_printf(dev, "Missed Packets = %lld\n",
+ (long long)adapter->stats.mpc);
device_printf(dev, "Receive No Buffers = %lld\n",
(long long)adapter->stats.rnbc);
/* RLEC is inaccurate on some hardware, calculate our own. */
@@ -4060,19 +4871,23 @@ em_print_hw_stats(struct adapter *adapter)
((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
device_printf(dev, "Receive errors = %lld\n",
(long long)adapter->stats.rxerrc);
- device_printf(dev, "Crc errors = %lld\n", (long long)adapter->stats.crcerrs);
+ device_printf(dev, "Crc errors = %lld\n",
+ (long long)adapter->stats.crcerrs);
device_printf(dev, "Alignment errors = %lld\n",
(long long)adapter->stats.algnerrc);
device_printf(dev, "Carrier extension errors = %lld\n",
(long long)adapter->stats.cexterr);
device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
- device_printf(dev, "watchdog timeouts = %ld\n", adapter->watchdog_events);
-
- device_printf(dev, "XON Rcvd = %lld\n", (long long)adapter->stats.xonrxc);
- device_printf(dev, "XON Xmtd = %lld\n", (long long)adapter->stats.xontxc);
- device_printf(dev, "XOFF Rcvd = %lld\n", (long long)adapter->stats.xoffrxc);
- device_printf(dev, "XOFF Xmtd = %lld\n", (long long)adapter->stats.xofftxc);
-
+ device_printf(dev, "watchdog timeouts = %ld\n",
+ adapter->watchdog_events);
+ device_printf(dev, "XON Rcvd = %lld\n",
+ (long long)adapter->stats.xonrxc);
+ device_printf(dev, "XON Xmtd = %lld\n",
+ (long long)adapter->stats.xontxc);
+ device_printf(dev, "XOFF Rcvd = %lld\n",
+ (long long)adapter->stats.xoffrxc);
+ device_printf(dev, "XOFF Xmtd = %lld\n",
+ (long long)adapter->stats.xofftxc);
device_printf(dev, "Good Packets Rcvd = %lld\n",
(long long)adapter->stats.gprc);
device_printf(dev, "Good Packets Xmtd = %lld\n",
@@ -4141,10 +4956,10 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &usecs, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
- if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
+ if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
return (EINVAL);
info->value = usecs;
- ticks = E1000_USECS_TO_TICKS(usecs);
+ ticks = EM_USECS_TO_TICKS(usecs);
adapter = info->adapter;
@@ -4154,17 +4969,15 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
/* Handle a few special cases. */
switch (info->offset) {
case E1000_RDTR:
- case E1000_82542_RDTR:
- regval |= E1000_RDT_FPDB;
break;
case E1000_TIDV:
- case E1000_82542_TIDV:
if (ticks == 0) {
adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
/* Don't write 0 into the TIDV register. */
regval++;
} else
- adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+ if (adapter->hw.mac.type != e1000_82575)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
break;
}
E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
@@ -4188,7 +5001,7 @@ em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
#ifndef DEVICE_POLLING
static void
-em_add_int_process_limit(struct adapter *adapter, const char *name,
+em_add_rx_process_limit(struct adapter *adapter, const char *name,
const char *description, int *limit, int value)
{
*limit = value;
OpenPOWER on IntegriCloud