summaryrefslogtreecommitdiffstats
path: root/sys/dev/em/if_em.c
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2006-10-28 00:47:55 +0000
committerjfv <jfv@FreeBSD.org>2006-10-28 00:47:55 +0000
commit8ca206e26c2343bd1ddbcb0313abb6e7eadf1682 (patch)
tree7ecb50e57013161b3b97e18931d51a083ef45636 /sys/dev/em/if_em.c
parentf3dd13700bb000a0c99cb0996d1f6c9df117b656 (diff)
downloadFreeBSD-src-8ca206e26c2343bd1ddbcb0313abb6e7eadf1682.zip
FreeBSD-src-8ca206e26c2343bd1ddbcb0313abb6e7eadf1682.tar.gz
This is the merge of the Intel 6.2.9 driver. It provides all new shared code,
new device support, and it is hoped a more stable driver for 6.2. RELEASE. This checkin was discussed and approved today by RE, scottl, jhb, and pdeuskar
Diffstat (limited to 'sys/dev/em/if_em.c')
-rw-r--r--sys/dev/em/if_em.c1531
1 files changed, 661 insertions, 870 deletions
diff --git a/sys/dev/em/if_em.c b/sys/dev/em/if_em.c
index bfabd62..030d9a5 100644
--- a/sys/dev/em/if_em.c
+++ b/sys/dev/em/if_em.c
@@ -69,11 +69,11 @@ POSSIBILITY OF SUCH DAMAGE.
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
-#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
+
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/em/if_em_hw.h>
@@ -87,8 +87,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-
-char em_driver_version[] = "Version - 6.1.4 - TSO";
+char em_driver_version[] = "Version - 6.2.9";
/*********************************************************************
@@ -118,8 +117,6 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
- { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
-
{ 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -154,6 +151,8 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
@@ -171,9 +170,13 @@ static em_vendor_info_t em_vendor_info_array[] =
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
@@ -207,13 +210,11 @@ static void em_media_status(struct ifnet *, struct ifmediareq *);
static int em_media_change(struct ifnet *);
static void em_identify_hardware(struct adapter *);
static int em_allocate_pci_resources(struct adapter *);
-static int em_allocate_intr(struct adapter *);
-static void em_free_intr(struct adapter *);
static void em_free_pci_resources(struct adapter *);
static void em_local_timer(void *);
static int em_hardware_init(struct adapter *);
static void em_setup_interface(device_t, struct adapter *);
-static int em_setup_transmit_structures(struct adapter *);
+static void em_setup_transmit_structures(struct adapter *);
static void em_initialize_transmit_unit(struct adapter *);
static int em_setup_receive_structures(struct adapter *);
static void em_initialize_receive_unit(struct adapter *);
@@ -233,14 +234,16 @@ static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
struct mbuf *);
static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
uint32_t *, uint32_t *);
-static boolean_t em_tso_setup(struct adapter *, struct mbuf *,
- uint32_t *, uint32_t *);
+#ifdef EM_TSO
+static boolean_t em_tso_setup(struct adapter *, struct mbuf *, u_int32_t *,
+ uint32_t *);
+#endif
static void em_set_promisc(struct adapter *);
static void em_disable_promisc(struct adapter *);
static void em_set_multi(struct adapter *);
static void em_print_hw_stats(struct adapter *);
static void em_update_link_status(struct adapter *);
-static int em_get_buf(struct adapter *, int);
+static int em_get_buf(int i, struct adapter *, struct mbuf *);
static void em_enable_vlans(struct adapter *);
static void em_disable_vlans(struct adapter *);
static int em_encap(struct adapter *, struct mbuf **);
@@ -262,20 +265,10 @@ static uint32_t em_fill_descriptors (bus_addr_t address, uint32_t length,
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct adapter *, const char *,
const char *, struct em_int_delay_info *, int, int);
+static void em_intr(void *);
-/*
- * Fast interrupt handler and legacy ithread/polling modes are
- * mutually exclusive.
- */
#ifdef DEVICE_POLLING
static poll_handler_t em_poll;
-static void em_intr(void *);
-#else
-static void em_intr_fast(void *);
-static void em_add_int_process_limit(struct adapter *, const char *,
- const char *, int *, int);
-static void em_handle_rxtx(void *context, int pending);
-static void em_handle_link(void *context, int pending);
#endif
/*********************************************************************
@@ -308,7 +301,7 @@ MODULE_DEPEND(em, ether, 1, 1, 1);
#define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
#define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
-#define M_TSO_LEN 66
+#define M_TSO_LEN 66 /* mbuf with just hdr and TSO pkthdr */
static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
@@ -325,10 +318,6 @@ TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
TUNABLE_INT("hw.em.rxd", &em_rxd);
TUNABLE_INT("hw.em.txd", &em_txd);
TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
-#ifndef DEVICE_POLLING
-static int em_rx_process_limit = 100;
-TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
-#endif
/*********************************************************************
* Device identification routine
@@ -441,13 +430,6 @@ em_attach(device_t dev)
em_tx_abs_int_delay_dflt);
}
-#ifndef DEVICE_POLLING
- /* Sysctls for limiting the amount of work done in the taskqueue */
- em_add_int_process_limit(adapter, "rx_processing_limit",
- "max number of rx packets to process", &adapter->rx_process_limit,
- em_rx_process_limit);
-#endif
-
/*
* Validate number of transmit and receive descriptors. It
* must not exceed hardware maximum, and must be multiple
@@ -490,9 +472,11 @@ em_attach(device_t dev)
* Set the max frame size assuming standard ethernet
* sized frames.
*/
- adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ adapter->hw.max_frame_size =
+ ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
- adapter->hw.min_frame_size = MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
+ adapter->hw.min_frame_size =
+ MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
/*
* This controls when hardware reports transmit completion
@@ -551,11 +535,23 @@ em_attach(device_t dev)
goto err_hw_init;
}
+ /* Allocate transmit descriptors and buffers */
+ if (em_allocate_transmit_structures(adapter)) {
+ device_printf(dev, "Could not setup transmit structures\n");
+ error = ENOMEM;
+ goto err_tx_struct;
+ }
+
+ /* Allocate receive descriptors and buffers */
+ if (em_allocate_receive_structures(adapter)) {
+ device_printf(dev, "Could not setup receive structures\n");
+ error = ENOMEM;
+ goto err_rx_struct;
+ }
+
/* Setup OS specific network interface */
em_setup_interface(dev, adapter);
- em_allocate_intr(adapter);
-
/* Initialize statistics */
em_clear_hw_cntrs(&adapter->hw);
em_update_stats_counters(adapter);
@@ -569,7 +565,8 @@ em_attach(device_t dev)
/* Identify 82544 on PCIX */
em_get_bus_info(&adapter->hw);
- if(adapter->hw.bus_type == em_bus_type_pcix && adapter->hw.mac_type == em_82544)
+ if (adapter->hw.bus_type == em_bus_type_pcix &&
+ adapter->hw.mac_type == em_82544)
adapter->pcix_82544 = TRUE;
else
adapter->pcix_82544 = FALSE;
@@ -578,13 +575,15 @@ em_attach(device_t dev)
return (0);
+err_rx_struct:
+ em_free_transmit_structures(adapter);
err_hw_init:
+err_tx_struct:
em_dma_free(adapter, &adapter->rxdma);
err_rx_desc:
em_dma_free(adapter, &adapter->txdma);
err_tx_desc:
err_pci:
- em_free_intr(adapter);
em_free_pci_resources(adapter);
EM_LOCK_DESTROY(adapter);
@@ -614,7 +613,6 @@ em_detach(device_t dev)
ether_poll_deregister(ifp);
#endif
- em_free_intr(adapter);
EM_LOCK(adapter);
adapter->in_detach = 1;
em_stop(adapter);
@@ -626,6 +624,9 @@ em_detach(device_t dev)
bus_generic_detach(dev);
if_free(ifp);
+ em_free_transmit_structures(adapter);
+ em_free_receive_structures(adapter);
+
/* Free Transmit Descriptor ring */
if (adapter->tx_desc_base) {
em_dma_free(adapter, &adapter->txdma);
@@ -806,8 +807,8 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* 82573 only supports jumbo frames
* if ASPM is disabled.
*/
- em_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
- &eeprom_data);
+ em_read_eeprom(&adapter->hw,
+ EEPROM_INIT_3GIO_3, 1, &eeprom_data);
if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
max_frame_size = ETHER_MAX_LEN;
break;
@@ -834,13 +835,14 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_mtu = ifr->ifr_mtu;
adapter->hw.max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
em_init_locked(adapter);
EM_UNLOCK(adapter);
break;
}
case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
+ IOCTL_DEBUGOUT("ioctl rcv'd:\
+ SIOCSIFFLAGS (Set Interface Flags)");
EM_LOCK(adapter);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
@@ -877,8 +879,15 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
break;
case SIOCSIFMEDIA:
+ /* Check SOL/IDER usage */
+ if (em_check_phy_reset_block(&adapter->hw)) {
+ device_printf(adapter->dev, "Media change is"
+ "blocked due to SOL/IDER session.\n");
+ break;
+ }
case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
+ IOCTL_DEBUGOUT("ioctl rcv'd: \
+ SIOCxIFMEDIA (Get/Set Interface Media)");
error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
break;
case SIOCSIFCAP:
@@ -912,17 +921,12 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_HWCSUM;
reinit = 1;
}
- if (mask & IFCAP_TSO4) {
- ifp->if_capenable ^= IFCAP_TSO4;
- reinit = 1;
- }
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = 1;
}
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
em_init(adapter);
- VLAN_CAPABILITIES(ifp);
break;
}
default:
@@ -955,18 +959,6 @@ em_watchdog(struct ifnet *ifp)
return;
}
- /*
- * Reclaim first as there is a possibility of losing Tx completion
- * interrupts. Possible cause of missing Tx completion interrupts
- * comes from Tx interrupt moderation mechanism(delayed interrupts)
- * or chipset bug.
- */
- em_txeof(adapter);
- if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
- EM_UNLOCK(adapter);
- return;
- }
-
if (em_check_for_link(&adapter->hw) == 0)
device_printf(adapter->dev, "watchdog timeout -- resetting\n");
@@ -1021,15 +1013,16 @@ em_init_locked(struct adapter *adapter)
pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
adapter->tx_fifo_head = 0;
adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
- adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
+ adapter->tx_fifo_size =
+ (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
break;
- case em_80003es2lan: /* 80003es2lan: Total Packet Buffer is 48K */
- case em_82571: /* 82571: Total Packet Buffer is 48K */
- case em_82572: /* 82572: Total Packet Buffer is 48K */
+ /* Total Packet Buffer on these is 48K */
+ case em_82571:
+ case em_82572:
+ case em_80003es2lan:
pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
break;
case em_82573: /* 82573: Total Packet Buffer is 32K */
- /* Jumbo frames not supported */
pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
break;
case em_ich8lan:
@@ -1047,7 +1040,8 @@ em_init_locked(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, PBA, pba);
/* Get the latest mac address, User can use a LAA */
- bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr, ETHER_ADDR_LEN);
+ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac_addr,
+ ETHER_ADDR_LEN);
/* Initialize the hardware */
if (em_hardware_init(adapter)) {
@@ -1059,24 +1053,19 @@ em_init_locked(struct adapter *adapter)
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
em_enable_vlans(adapter);
+ /* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (adapter->hw.mac_type >= em_82543) {
if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist = EM_CHECKSUM_FEATURES;
- /*
- * em_setup_transmit_structures() will behave differently
- * based on the state of TSO.
- */
+ ifp->if_hwassist |= EM_CHECKSUM_FEATURES;
+#ifdef EM_TSO
if (ifp->if_capenable & IFCAP_TSO)
ifp->if_hwassist |= EM_TCPSEG_FEATURES;
+#endif
}
/* Prepare transmit descriptors and buffers */
- if (em_setup_transmit_structures(adapter)) {
- device_printf(dev, "Could not setup transmit structures\n");
- em_stop(adapter);
- return;
- }
+ em_setup_transmit_structures(adapter);
em_initialize_transmit_unit(adapter);
/* Setup Multicast table */
@@ -1098,6 +1087,7 @@ em_init_locked(struct adapter *adapter)
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
em_clear_hw_cntrs(&adapter->hw);
+
#ifdef DEVICE_POLLING
/*
* Only enable interrupts if we are not polling, make sure
@@ -1125,11 +1115,6 @@ em_init(void *arg)
#ifdef DEVICE_POLLING
-/*********************************************************************
- *
- * Legacy polling routine
- *
- *********************************************************************/
static void
em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
{
@@ -1149,7 +1134,8 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
adapter->hw.get_link_status = 1;
em_check_for_link(&adapter->hw);
em_update_link_status(adapter);
- callout_reset(&adapter->timer, hz, em_local_timer, adapter);
+ callout_reset(&adapter->timer, hz,
+ em_local_timer, adapter);
}
}
em_rxeof(adapter, count);
@@ -1159,10 +1145,11 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
em_start_locked(ifp);
EM_UNLOCK(adapter);
}
+#endif /* DEVICE_POLLING */
/*********************************************************************
*
- * Legacy Interrupt Service routine
+ * Interrupt Service routine
*
*********************************************************************/
static void
@@ -1176,10 +1163,12 @@ em_intr(void *arg)
ifp = adapter->ifp;
+#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING) {
EM_UNLOCK(adapter);
return;
}
+#endif /* DEVICE_POLLING */
for (;;) {
reg_icr = E1000_READ_REG(&adapter->hw, ICR);
@@ -1209,7 +1198,8 @@ em_intr(void *arg)
adapter->hw.get_link_status = 1;
em_check_for_link(&adapter->hw);
em_update_link_status(adapter);
- callout_reset(&adapter->timer, hz, em_local_timer, adapter);
+ callout_reset(&adapter->timer, hz,
+ em_local_timer, adapter);
}
if (reg_icr & E1000_ICR_RXO)
@@ -1223,103 +1213,6 @@ em_intr(void *arg)
EM_UNLOCK(adapter);
}
-#else /* if not DEVICE_POLLING, then fast interrupt routines only */
-
-static void
-em_handle_link(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ifnet *ifp;
-
- ifp = adapter->ifp;
-
- EM_LOCK(adapter);
-
- callout_stop(&adapter->timer);
- adapter->hw.get_link_status = 1;
- em_check_for_link(&adapter->hw);
- em_update_link_status(adapter);
- callout_reset(&adapter->timer, hz, em_local_timer, adapter);
- EM_UNLOCK(adapter);
-}
-
-static void
-em_handle_rxtx(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ifnet *ifp;
-
- NET_LOCK_GIANT();
- ifp = adapter->ifp;
-
- /*
- * TODO:
- * It should be possible to run the tx clean loop without the lock.
- */
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
- taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
- EM_LOCK(adapter);
- em_txeof(adapter);
-
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- em_start_locked(ifp);
- EM_UNLOCK(adapter);
- }
-
- em_enable_intr(adapter);
- NET_UNLOCK_GIANT();
-}
-
-/*********************************************************************
- *
- * Fast Interrupt Service routine
- *
- *********************************************************************/
-static void
-em_intr_fast(void *arg)
-{
- struct adapter *adapter = arg;
- struct ifnet *ifp;
- uint32_t reg_icr;
-
- ifp = adapter->ifp;
-
- reg_icr = E1000_READ_REG(&adapter->hw, ICR);
-
- /* Hot eject? */
- if (reg_icr == 0xffffffff)
- return;
-
- /* Definitely not our interrupt. */
- if (reg_icr == 0x0)
- return;
-
- /*
- * Starting with the 82571 chip, bit 31 should be used to
- * determine whether the interrupt belongs to us.
- */
- if (adapter->hw.mac_type >= em_82571 &&
- (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
- return;
-
- /*
- * Mask interrupts until the taskqueue is finished running. This is
- * cheap, just assume that it is needed. This also works around the
- * MSI message reordering errata on certain systems.
- */
- em_disable_intr(adapter);
- taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
-
- /* Link status change */
- if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
- taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
-
- if (reg_icr & E1000_ICR_RXO)
- adapter->rx_overruns++;
-}
-#endif /* ! DEVICE_POLLING */
-
/*********************************************************************
*
* Media Ioctl callback
@@ -1332,6 +1225,7 @@ static void
em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct adapter *adapter = ifp->if_softc;
+ u_char fiber_type = IFM_1000_SX;
INIT_DEBUGOUT("em_media_status: begin");
@@ -1352,9 +1246,8 @@ em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
if ((adapter->hw.media_type == em_media_type_fiber) ||
(adapter->hw.media_type == em_media_type_internal_serdes)) {
if (adapter->hw.mac_type == em_82545)
- ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
- else
- ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
+ fiber_type = IFM_1000_LX;
+ ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (adapter->link_speed) {
case 10:
@@ -1449,43 +1342,54 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
struct ifnet *ifp = adapter->ifp;
bus_dma_segment_t segs[EM_MAX_SCATTER];
bus_dmamap_t map;
- struct em_buffer *tx_buffer, *tx_buffer_last;
+ struct em_buffer *tx_buffer, *tx_buffer_mapped;
struct em_tx_desc *current_tx_desc;
struct mbuf *m_head;
+ struct m_tag *mtag;
uint32_t txd_upper, txd_lower, txd_used, txd_saved;
- int nsegs, i, j;
+ int nsegs, i, j, first, last = 0;
int error, do_tso, tso_desc = 0;
m_head = *m_headp;
current_tx_desc = NULL;
txd_upper = txd_lower = txd_used = txd_saved = 0;
+#ifdef EM_TSO
do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
+#else
+ do_tso = 0;
+#endif
- /*
- * Force a cleanup if number of TX descriptors
- * available hits the threshold.
- */
+ /*
+ * Force a cleanup if number of TX descriptors
+ * available hits the threshold
+ */
if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
em_txeof(adapter);
if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
adapter->no_tx_desc_avail1++;
- return (ENOBUFS);
+ return(ENOBUFS);
}
- }
+ }
/*
- * When operating in promiscuous mode, hardware stripping of the
- * VLAN tag on receive is disabled. This should not prevent us
- * from doing hardware insertion of the VLAN tag here as that
- * is controlled by the dma descriptor flags and not the receive
- * tag strip setting. Unfortunatly this hardware switches the
- * VLAN encapsulation type from 802.1q to ISL when stripping om
- * receive is disabled. This means we have to add the vlan
- * encapsulation here in the driver, since it will have come down
- * from the VLAN layer with a tag instead of a VLAN header.
+ * Capture the first descriptor index,
+ * this descriptor will have the index
+ * of the EOP which is the only one that
+ * now gets a DONE bit writeback.
*/
- if ((m_head->m_flags & M_VLANTAG) && adapter->em_insert_vlan_header) {
+ first = adapter->next_avail_tx_desc;
+
+ /* Find out if we are in vlan mode */
+ mtag = VLAN_OUTPUT_TAG(ifp, m_head);
+
+ /*
+ * When operating in promiscuous mode, hardware encapsulation for
+ * packets is disabled. This means we have to add the vlan
+ * encapsulation in the driver, since it will have come down from the
+ * VLAN layer with a tag instead of a VLAN header.
+ */
+ if (mtag != NULL && adapter->em_insert_vlan_header) {
struct ether_vlan_header *evl;
struct ether_header eh;
@@ -1509,57 +1413,46 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
bcopy(&eh, evl, sizeof(*evl));
evl->evl_proto = evl->evl_encap_proto;
evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
- evl->evl_tag = htons(m_head->m_pkthdr.ether_vtag);
+ evl->evl_tag = htons(VLAN_TAG_VALUE(mtag));
+ m_tag_delete(m_head, mtag);
+ mtag = NULL;
*m_headp = m_head;
}
/*
- * TSO workaround:
- * If an mbuf contains only the IP and TCP header we have
- * to pull 4 bytes of data into it.
+ * TSO workaround:
+ * If an mbuf is only header we need
+ * to pull 4 bytes of data into it.
*/
if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
m_head = m_pullup(m_head, M_TSO_LEN + 4);
*m_headp = m_head;
- if (m_head == NULL) {
+ if (m_head == NULL)
return (ENOBUFS);
- }
}
- /*
- * Map the packet for DMA.
- */
- tx_buffer = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
- tx_buffer_last = tx_buffer;
+ /*
+ * Map the packet for DMA.
+ */
+ tx_buffer = &adapter->tx_buffer_area[first];
+ tx_buffer_mapped = tx_buffer;
map = tx_buffer->map;
-
- error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp, segs,
- &nsegs, BUS_DMA_NOWAIT);
-
- /*
- * There are two types of errors we can (try) to handle:
- * - EFBIG means the mbuf chain was too long and bus_dma ran
- * out of segments. Defragment the mbuf chain and try again.
- * - ENOMEM means bus_dma could not obtain enough bounce buffers
- * at this point in time. Defer sending and try again later.
- * All other errors, in particular EINVAL, are fatal and prevent the
- * mbuf chain from ever going through. Drop it and report error.
- */
+ error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
struct mbuf *m;
m = m_defrag(*m_headp, M_DONTWAIT);
if (m == NULL) {
- /* Assume m_defrag(9) used only m_get(9). */
adapter->mbuf_alloc_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (ENOBUFS);
}
*m_headp = m;
-
- error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp,
- segs, &nsegs, BUS_DMA_NOWAIT);
+ /* Try it again */
+ error = bus_dmamap_load_mbuf_sg(adapter->txtag, tx_buffer->map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == ENOMEM) {
adapter->no_tx_dma_setup++;
@@ -1592,36 +1485,36 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
adapter->tx_tso = FALSE;
}
- if (nsegs > adapter->num_tx_desc_avail - 2) {
- adapter->no_tx_desc_avail2++;
- bus_dmamap_unload(adapter->txtag, map);
- return (ENOBUFS);
- }
- m_head = *m_headp;
+ if (nsegs > (adapter->num_tx_desc_avail - 2)) {
+ adapter->no_tx_desc_avail2++;
+ error = ENOBUFS;
+ goto encap_fail;
+ }
/* Do hardware assists */
- if (ifp->if_hwassist) {
- if (do_tso &&
- em_tso_setup(adapter, m_head, &txd_upper, &txd_lower)) {
+ if (ifp->if_hwassist > 0) {
+#ifdef EM_TSO
+ if (em_tso_setup(adapter, m_head, &txd_upper, &txd_lower)) {
/* we need to make a final sentinel transmit desc */
tso_desc = TRUE;
} else
+#endif
em_transmit_checksum_setup(adapter, m_head,
&txd_upper, &txd_lower);
}
i = adapter->next_avail_tx_desc;
- if (adapter->pcix_82544)
+ if (adapter->pcix_82544)
txd_saved = i;
+ /* Set up our transmit descriptors */
for (j = 0; j < nsegs; j++) {
bus_size_t seg_len;
bus_addr_t seg_addr;
- /* If adapter is 82544 and on PCIX bus. */
+ /* If adapter is 82544 and on PCIX bus */
if(adapter->pcix_82544) {
DESC_ARRAY desc_array;
uint32_t array_elements, counter;
-
/*
* Check the Address and Length combination and
* split the data accordingly
@@ -1632,23 +1525,25 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
if (txd_used == adapter->num_tx_desc_avail) {
adapter->next_avail_tx_desc = txd_saved;
adapter->no_tx_desc_avail2++;
- bus_dmamap_unload(adapter->txtag, map);
- return (ENOBUFS);
+ error = ENOBUFS;
+ goto encap_fail;
}
tx_buffer = &adapter->tx_buffer_area[i];
current_tx_desc = &adapter->tx_desc_base[i];
current_tx_desc->buffer_addr = htole64(
- desc_array.descriptor[counter].address);
+ desc_array.descriptor[counter].address);
current_tx_desc->lower.data = htole32(
- (adapter->txd_cmd | txd_lower |
- (uint16_t)desc_array.descriptor[counter].length));
- current_tx_desc->upper.data = htole32((txd_upper));
+ (adapter->txd_cmd | txd_lower | (uint16_t)
+ desc_array.descriptor[counter].length));
+ current_tx_desc->upper.data =
+ htole32((txd_upper));
+ last = i;
if (++i == adapter->num_tx_desc)
- i = 0;
-
+ i = 0;
tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
txd_used++;
- }
+ }
} else {
tx_buffer = &adapter->tx_buffer_area[i];
current_tx_desc = &adapter->tx_desc_base[i];
@@ -1668,7 +1563,7 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
htole32(txd_upper);
if (++i == adapter->num_tx_desc)
i = 0;
- /* Now make the sentinel */
+ /* Now make the sentinel */
++txd_used; /* using an extra txd */
current_tx_desc = &adapter->tx_desc_base[i];
tx_buffer = &adapter->tx_buffer_area[i];
@@ -1678,6 +1573,7 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
adapter->txd_cmd | txd_lower | 4);
current_tx_desc->upper.data =
htole32(txd_upper);
+ last = i;
if (++i == adapter->num_tx_desc)
i = 0;
} else {
@@ -1686,10 +1582,12 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
adapter->txd_cmd | txd_lower | seg_len);
current_tx_desc->upper.data =
htole32(txd_upper);
+ last = i;
if (++i == adapter->num_tx_desc)
i = 0;
}
tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
}
}
@@ -1702,24 +1600,33 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
adapter->num_tx_desc_avail -= txd_used;
}
- if (m_head->m_flags & M_VLANTAG) {
+ if (mtag != NULL) {
/* Set the vlan id. */
current_tx_desc->upper.fields.special =
- htole16(m_head->m_pkthdr.ether_vtag);
-
- /* Tell hardware to add tag. */
- current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
- }
-
- tx_buffer->m_head = m_head;
- tx_buffer_last->map = tx_buffer->map;
+ htole16(VLAN_TAG_VALUE(mtag));
+ /* Tell hardware to add tag */
+ current_tx_desc->lower.data |=
+ htole32(E1000_TXD_CMD_VLE);
+ }
+
+ tx_buffer->m_head = m_head;
+ tx_buffer_mapped->map = tx_buffer->map;
tx_buffer->map = map;
- bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
-
+ bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
+
+ /*
+ * Last Descriptor of Packet
+ * needs End Of Packet (EOP)
+ * and Report Status (RS)
+ */
+ current_tx_desc->lower.data |=
+ htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
/*
- * Last Descriptor of Packet needs End Of Packet (EOP).
+ * Keep track in the first buffer which
+ * descriptor will be written back
*/
- current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
+ tx_buffer = &adapter->tx_buffer_area[first];
+ tx_buffer->next_eop = last;
/*
* Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
@@ -1727,17 +1634,21 @@ em_encap(struct adapter *adapter, struct mbuf **m_headp)
*/
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
if (adapter->hw.mac_type == em_82547 &&
adapter->link_duplex == HALF_DUPLEX)
em_82547_move_tail_locked(adapter);
else {
E1000_WRITE_REG(&adapter->hw, TDT, i);
if (adapter->hw.mac_type == em_82547)
- em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
+ em_82547_update_fifo_head(adapter,
+ m_head->m_pkthdr.len);
}
return (0);
+
+encap_fail:
+ bus_dmamap_unload(adapter->txtag, tx_buffer->map);
+ return (error);
}
/*********************************************************************
@@ -1766,7 +1677,7 @@ em_82547_move_tail_locked(struct adapter *adapter)
tx_desc = &adapter->tx_desc_base[hw_tdt];
length += tx_desc->lower.flags.length;
eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
- if(++hw_tdt == adapter->num_tx_desc)
+ if (++hw_tdt == adapter->num_tx_desc)
hw_tdt = 0;
if (eop) {
@@ -1829,14 +1740,16 @@ em_82547_update_fifo_head(struct adapter *adapter, int len)
static int
em_82547_tx_fifo_reset(struct adapter *adapter)
-{
+{
uint32_t tctl;
- if ((E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH)) &&
- (E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH)) &&
- (E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS))&&
+ if ((E1000_READ_REG(&adapter->hw, TDT) ==
+ E1000_READ_REG(&adapter->hw, TDH)) &&
+ (E1000_READ_REG(&adapter->hw, TDFT) ==
+ E1000_READ_REG(&adapter->hw, TDFH)) &&
+ (E1000_READ_REG(&adapter->hw, TDFTS) ==
+ E1000_READ_REG(&adapter->hw, TDFHS)) &&
(E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
-
/* Disable TX unit */
tctl = E1000_READ_REG(&adapter->hw, TCTL);
E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
@@ -1872,12 +1785,9 @@ em_set_promisc(struct adapter *adapter)
if (ifp->if_flags & IFF_PROMISC) {
reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
- /*
- * Disable VLAN stripping in promiscous mode.
+ /* Disable VLAN stripping in promiscous mode
* This enables bridging of vlan tagged frames to occur
- * and also allows vlan tags to be seen in tcpdump.
- * XXX: This is a bit bogus as tcpdump may be used
- * w/o promisc mode as well.
+ * and also allows vlan tags to be seen in tcpdump
*/
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
em_disable_vlans(adapter);
@@ -2003,8 +1913,8 @@ em_update_link_status(struct adapter *adapter)
if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
if (adapter->link_active == 0) {
- em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
- &adapter->link_duplex);
+ em_get_speed_and_duplex(&adapter->hw,
+ &adapter->link_speed, &adapter->link_duplex);
/* Check if we may set SPEED_MODE bit on PCI-E */
if ((adapter->link_speed == SPEED_1000) &&
((adapter->hw.mac_type == em_82571) ||
@@ -2055,18 +1965,16 @@ em_stop(void *arg)
INIT_DEBUGOUT("em_stop: begin");
em_disable_intr(adapter);
- em_reset_hw(&adapter->hw);
callout_stop(&adapter->timer);
callout_stop(&adapter->tx_fifo_timer);
- em_free_transmit_structures(adapter);
- em_free_receive_structures(adapter);
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ em_reset_hw(&adapter->hw);
}
-/********************************************************************
+/*********************************************************************
*
* Determine hardware revision.
*
@@ -2077,23 +1985,33 @@ em_identify_hardware(struct adapter *adapter)
device_t dev = adapter->dev;
/* Make sure our PCI config space has the necessary stuff set */
- pci_enable_busmaster(dev);
- pci_enable_io(dev, SYS_RES_MEMORY);
adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ if ((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) == 0 &&
+ (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN)) {
+ device_printf(dev, "Memory Access and/or Bus Master bits "
+ "were not set!\n");
+ adapter->hw.pci_cmd_word |=
+ (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
+ pci_write_config(dev, PCIR_COMMAND,
+ adapter->hw.pci_cmd_word, 2);
+ }
/* Save off the information about this board */
adapter->hw.vendor_id = pci_get_vendor(dev);
adapter->hw.device_id = pci_get_device(dev);
- adapter->hw.revision_id = pci_get_revid(dev);
- adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
- adapter->hw.subsystem_id = pci_get_subdevice(dev);
+ adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ adapter->hw.subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
/* Identify the MAC */
if (em_set_mac_type(&adapter->hw))
device_printf(dev, "Unknown MAC Type\n");
- if(adapter->hw.mac_type == em_82541 || adapter->hw.mac_type == em_82541_rev_2 ||
- adapter->hw.mac_type == em_82547 || adapter->hw.mac_type == em_82547_rev_2)
+ if (adapter->hw.mac_type == em_82541 ||
+ adapter->hw.mac_type == em_82541_rev_2 ||
+ adapter->hw.mac_type == em_82547 ||
+ adapter->hw.mac_type == em_82547_rev_2)
adapter->hw.phy_init_script = TRUE;
}
@@ -2111,36 +2029,38 @@ em_allocate_pci_resources(struct adapter *adapter)
return (ENXIO);
}
adapter->osdep.mem_bus_space_tag =
- rman_get_bustag(adapter->res_memory);
- adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory);
+ rman_get_bustag(adapter->res_memory);
+ adapter->osdep.mem_bus_space_handle =
+ rman_get_bushandle(adapter->res_memory);
adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
if (adapter->hw.mac_type > em_82543) {
/* Figure our where our IO BAR is ? */
for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
val = pci_read_config(dev, rid, 4);
- if (E1000_BAR_TYPE(val) == E1000_BAR_TYPE_IO) {
+ if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
adapter->io_rid = rid;
break;
}
rid += 4;
/* check for 64bit BAR */
- if (E1000_BAR_MEM_TYPE(val) == E1000_BAR_MEM_TYPE_64BIT)
+ if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
rid += 4;
}
if (rid >= PCIR_CIS) {
device_printf(dev, "Unable to locate IO BAR\n");
return (ENXIO);
}
- adapter->res_ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
- &adapter->io_rid, RF_ACTIVE);
+ adapter->res_ioport = bus_alloc_resource_any(dev,
+ SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
if (adapter->res_ioport == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"ioport\n");
return (ENXIO);
}
adapter->hw.io_base = 0;
- adapter->osdep.io_bus_space_tag = rman_get_bustag(adapter->res_ioport);
+ adapter->osdep.io_bus_space_tag =
+ rman_get_bustag(adapter->res_ioport);
adapter->osdep.io_bus_space_handle =
rman_get_bushandle(adapter->res_ioport);
}
@@ -2148,106 +2068,58 @@ em_allocate_pci_resources(struct adapter *adapter)
/* For ICH8 we need to find the flash memory. */
if (adapter->hw.mac_type == em_ich8lan) {
rid = EM_FLASH;
-
- adapter->flash_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
- adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash_mem);
+ adapter->flash_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ adapter->osdep.flash_bus_space_tag =
+ rman_get_bustag(adapter->flash_mem);
adapter->osdep.flash_bus_space_handle =
rman_get_bushandle(adapter->flash_mem);
}
rid = 0x0;
- adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
+ adapter->res_interrupt = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (adapter->res_interrupt == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"interrupt\n");
return (ENXIO);
}
+ if (bus_setup_intr(dev, adapter->res_interrupt,
+ INTR_TYPE_NET | INTR_MPSAFE,
+ (void (*)(void *)) em_intr, adapter,
+ &adapter->int_handler_tag)) {
+ device_printf(dev, "Error registering interrupt handler!\n");
+ return(ENXIO);
+ }
adapter->hw.back = &adapter->osdep;
return (0);
}
-int
-em_allocate_intr(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- int error;
-
- /* Manually turn off all interrupts */
- E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
-
-#ifdef DEVICE_POLLING
- if (adapter->int_handler_tag == NULL && (error = bus_setup_intr(dev,
- adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
- &adapter->int_handler_tag)) != 0) {
- device_printf(dev, "Failed to register interrupt handler");
- return (error);
- }
-#else
- /*
- * Try allocating a fast interrupt and the associated deferred
- * processing contexts.
- */
- TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
- TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
- adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
- device_get_nameunit(adapter->dev));
- if ((error = bus_setup_intr(dev, adapter->res_interrupt,
- INTR_TYPE_NET | INTR_FAST, em_intr_fast, adapter,
- &adapter->int_handler_tag)) != 0) {
- device_printf(dev, "Failed to register fast interrupt "
- "handler: %d\n", error);
- taskqueue_free(adapter->tq);
- adapter->tq = NULL;
- return (error);
- }
-#endif
-
- em_enable_intr(adapter);
- return (0);
-}
-
-static void
-em_free_intr(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
-
- if (adapter->int_handler_tag != NULL) {
- bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag);
- adapter->int_handler_tag = NULL;
- }
- if (adapter->tq != NULL) {
- taskqueue_drain(adapter->tq, &adapter->rxtx_task);
- taskqueue_drain(taskqueue_fast, &adapter->link_task);
- taskqueue_free(adapter->tq);
- adapter->tq = NULL;
- }
-}
-
static void
em_free_pci_resources(struct adapter *adapter)
{
device_t dev = adapter->dev;
- if (adapter->res_interrupt != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt);
+ if (adapter->res_interrupt != NULL) {
+ bus_teardown_intr(dev, adapter->res_interrupt,
+ adapter->int_handler_tag);
+ bus_release_resource(dev, SYS_RES_IRQ,
+ 0, adapter->res_interrupt);
+ }
if (adapter->res_memory != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
- adapter->res_memory);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), adapter->res_memory);
if (adapter->flash_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH,
- adapter->flash_mem);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ EM_FLASH, adapter->flash_mem);
if (adapter->res_ioport != NULL)
- bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
- adapter->res_ioport);
+ bus_release_resource(dev, SYS_RES_IOPORT,
+ adapter->io_rid, adapter->res_ioport);
}
/*********************************************************************
@@ -2278,20 +2150,22 @@ em_hardware_init(struct adapter *adapter)
}
if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
- device_printf(dev, "EEPROM read error while reading part "
- "number\n");
+ device_printf(dev, "EEPROM read error "
+ "reading part number\n");
return (EIO);
}
/* Set up smart power down as default off on newer adapters. */
- if (!em_smart_pwr_down &&
- (adapter->hw.mac_type == em_82571 || adapter->hw.mac_type == em_82572)) {
+ if (!em_smart_pwr_down && (adapter->hw.mac_type == em_82571 ||
+ adapter->hw.mac_type == em_82572)) {
uint16_t phy_tmp = 0;
/* Speed up time to link by disabling smart power down. */
- em_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
+ em_read_phy_reg(&adapter->hw,
+ IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
phy_tmp &= ~IGP02E1000_PM_SPD;
- em_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
+ em_write_phy_reg(&adapter->hw,
+ IGP02E1000_PHY_POWER_MGMT, phy_tmp);
}
/*
@@ -2318,7 +2192,7 @@ em_hardware_init(struct adapter *adapter)
else
adapter->hw.fc_pause_time = 0x1000;
adapter->hw.fc_send_xon = TRUE;
- adapter->hw.fc = em_fc_full;
+ adapter->hw.fc = E1000_FC_FULL;
if (em_init_hw(&adapter->hw) < 0) {
device_printf(dev, "Hardware Initialization Failed");
@@ -2339,6 +2213,8 @@ static void
em_setup_interface(device_t dev, struct adapter *adapter)
{
struct ifnet *ifp;
+ u_char fiber_type = IFM_1000_SX; /* default type */
+
INIT_DEBUGOUT("em_setup_interface: begin");
ifp = adapter->ifp = if_alloc(IFT_ETHER);
@@ -2361,16 +2237,18 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = ifp->if_capenable = 0;
if (adapter->hw.mac_type >= em_82543) {
- ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
- ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capenable |= IFCAP_HWCSUM;
}
+#ifdef EM_TSO
/* Enable TSO if available */
if ((adapter->hw.mac_type > em_82544) &&
- (adapter->hw.mac_type != em_82547)) {
- ifp->if_capabilities |= IFCAP_TSO4;
- ifp->if_capenable |= IFCAP_TSO4;
+ (adapter->hw.mac_type != em_82547)) {
+ ifp->if_capabilities |= IFCAP_TSO;
+ ifp->if_capenable |= IFCAP_TSO;
}
+#endif
/*
* Tell the upper layer(s) we support long frames.
@@ -2387,16 +2265,14 @@ em_setup_interface(device_t dev, struct adapter *adapter)
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
- ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
- em_media_status);
+ ifmedia_init(&adapter->media, IFM_IMASK,
+ em_media_change, em_media_status);
if ((adapter->hw.media_type == em_media_type_fiber) ||
(adapter->hw.media_type == em_media_type_internal_serdes)) {
- u_char fiber_type = IFM_1000_SX; /* default type; */
-
if (adapter->hw.mac_type == em_82545)
fiber_type = IFM_1000_LX;
- ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
- 0, NULL);
+ ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
+ 0, NULL);
ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
} else {
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
@@ -2490,12 +2366,12 @@ em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
}
static int
-em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma,
- int mapflags)
+em_dma_malloc(struct adapter *adapter, bus_size_t size,
+ struct em_dma_alloc *dma, int mapflags)
{
int error;
- error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
+ error = bus_dma_tag_create(NULL, /* parent */
EM_DBA_ALIGN, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -2508,7 +2384,8 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma
NULL, /* lockarg */
&dma->dma_tag);
if (error) {
- device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n",
+ device_printf(adapter->dev,
+ "%s: bus_dma_tag_create failed: %d\n",
__func__, error);
goto fail_0;
}
@@ -2516,7 +2393,8 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma
error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
BUS_DMA_NOWAIT, &dma->dma_map);
if (error) {
- device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
+ device_printf(adapter->dev,
+ "%s: bus_dmamem_alloc(%ju) failed: %d\n",
__func__, (uintmax_t)size, error);
goto fail_2;
}
@@ -2525,7 +2403,8 @@ em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma
error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
if (error || dma->dma_paddr == 0) {
- device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n",
+ device_printf(adapter->dev,
+ "%s: bus_dmamap_load failed: %d\n",
__func__, error);
goto fail_3;
}
@@ -2570,44 +2449,21 @@ em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
static int
em_allocate_transmit_structures(struct adapter *adapter)
{
- adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT);
- if (adapter->tx_buffer_area == NULL) {
- device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
- return (ENOMEM);
- }
-
- bzero(adapter->tx_buffer_area, sizeof(struct em_buffer) * adapter->num_tx_desc);
-
- return (0);
-}
-
-/*********************************************************************
- *
- * Allocate and initialize transmit structures.
- *
- **********************************************************************/
-static int
-em_setup_transmit_structures(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
struct em_buffer *tx_buffer;
bus_size_t size, segsize;
int error, i;
+#ifdef EM_TSO
+ size = EM_TSO_SIZE;
+ segsize = PAGE_SIZE;
+#else
+ segsize = size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
+#endif
/*
* Setup DMA descriptor areas.
*/
- segsize = size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
-
- /* Overrides for TSO - want large sizes */
- if (ifp->if_hwassist & EM_TCPSEG_FEATURES) {
- size = EM_TSO_SIZE;
- segsize = PAGE_SIZE;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ if ((error = bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -2623,10 +2479,14 @@ em_setup_transmit_structures(struct adapter *adapter)
goto fail;
}
- if ((error = em_allocate_transmit_structures(adapter)) != 0)
+ adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
+ adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (adapter->tx_buffer_area == NULL) {
+ device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ error = ENOMEM;
goto fail;
+ }
- bzero(adapter->tx_desc_base, (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
tx_buffer = adapter->tx_buffer_area;
for (i = 0; i < adapter->num_tx_desc; i++) {
error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
@@ -2637,8 +2497,41 @@ em_setup_transmit_structures(struct adapter *adapter)
tx_buffer++;
}
+ return (0);
+
+fail:
+ em_free_transmit_structures(adapter);
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Initialize transmit structures.
+ *
+ **********************************************************************/
+static void
+em_setup_transmit_structures(struct adapter *adapter)
+{
+ struct em_buffer *tx_buffer;
+ int i;
+
+ bzero(adapter->tx_desc_base,
+ (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
+
adapter->next_avail_tx_desc = 0;
- adapter->oldest_used_tx_desc = 0;
+ adapter->next_tx_to_clean = 0;
+
+ /* Free any existing tx buffers. */
+ tx_buffer = adapter->tx_buffer_area;
+ for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+ if (tx_buffer->m_head != NULL) {
+ bus_dmamap_sync(adapter->txtag, tx_buffer->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(adapter->txtag, tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
+ }
+ }
/* Set number of descriptors available */
adapter->num_tx_desc_avail = adapter->num_tx_desc;
@@ -2647,12 +2540,6 @@ em_setup_transmit_structures(struct adapter *adapter)
adapter->active_checksum_context = OFFLOAD_NONE;
bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- return (0);
-
-fail:
- em_free_transmit_structures(adapter);
- return (error);
}
/*********************************************************************
@@ -2663,8 +2550,7 @@ fail:
static void
em_initialize_transmit_unit(struct adapter *adapter)
{
- uint32_t reg_tctl, reg_tarc;
- uint32_t reg_tipg = 0;
+ uint32_t reg_tctl, reg_tipg = 0;
uint64_t bus_addr;
INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
@@ -2679,8 +2565,8 @@ em_initialize_transmit_unit(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, TDT, 0);
E1000_WRITE_REG(&adapter->hw, TDH, 0);
-
- HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, TDBAL),
+ HW_DEBUGOUT2("Base = %x, Length = %x\n",
+ E1000_READ_REG(&adapter->hw, TDBAL),
E1000_READ_REG(&adapter->hw, TDLEN));
/* Set the default values for the Tx Inter Packet Gap timer */
@@ -2709,25 +2595,8 @@ em_initialize_transmit_unit(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
if(adapter->hw.mac_type >= em_82540)
- E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay.value);
-
- /* Do adapter specific tweaks before we enable the transmitter. */
- if (adapter->hw.mac_type == em_82571 || adapter->hw.mac_type == em_82572) {
- reg_tarc = E1000_READ_REG(&adapter->hw, TARC0);
- reg_tarc |= (1 << 25);
- E1000_WRITE_REG(&adapter->hw, TARC0, reg_tarc);
- reg_tarc = E1000_READ_REG(&adapter->hw, TARC1);
- reg_tarc |= (1 << 25);
- reg_tarc &= ~(1 << 28);
- E1000_WRITE_REG(&adapter->hw, TARC1, reg_tarc);
- } else if (adapter->hw.mac_type == em_80003es2lan) {
- reg_tarc = E1000_READ_REG(&adapter->hw, TARC0);
- reg_tarc |= 1;
- E1000_WRITE_REG(&adapter->hw, TARC0, reg_tarc);
- reg_tarc = E1000_READ_REG(&adapter->hw, TARC1);
- reg_tarc |= 1;
- E1000_WRITE_REG(&adapter->hw, TARC1, reg_tarc);
- }
+ E1000_WRITE_REG(&adapter->hw, TADV,
+ adapter->tx_abs_int_delay.value);
/* Program the Transmit Control Register */
reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
@@ -2742,8 +2611,8 @@ em_initialize_transmit_unit(struct adapter *adapter)
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
- /* Setup Transmit Descriptor Settings for this adapter */
- adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
+ /* Setup Transmit Descriptor Base Settings */
+ adapter->txd_cmd = E1000_TXD_CMD_IFCS;
if (adapter->tx_int_delay.value > 0)
adapter->txd_cmd |= E1000_TXD_CMD_IDE;
@@ -2805,127 +2674,69 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
{
struct em_context_desc *TXD;
struct em_buffer *tx_buffer;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- struct tcp_hdr *th;
- int curr_txd, ehdrlen, hdr_len, ip_hlen;
- uint32_t cmd = 0;
- uint16_t etype;
- uint8_t ipproto;
-
- /* Setup checksum offload context. */
- curr_txd = adapter->next_avail_tx_desc;
- tx_buffer = &adapter->tx_buffer_area[curr_txd];
- TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
+ int curr_txd;
- *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
- E1000_TXD_DTYP_D; /* Data descr */
+ if (mp->m_pkthdr.csum_flags) {
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
+ *txd_upper = E1000_TXD_POPTS_TXSM << 8;
+ *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
+ return;
+ else
+ adapter->active_checksum_context =
+ OFFLOAD_TCP_IP;
+ } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
+ *txd_upper = E1000_TXD_POPTS_TXSM << 8;
+ *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
+ return;
+ else
+ adapter->active_checksum_context =
+ OFFLOAD_UDP_IP;
+ } else {
+ *txd_upper = 0;
+ *txd_lower = 0;
+ return;
+ }
} else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
+ *txd_upper = 0;
+ *txd_lower = 0;
+ return;
}
- /*
- * We only support TCP/UDP for IPv4 and IPv6 for the moment.
- * TODO: Support SCTP too when it hits the tree.
+ /* If we reach this point, the checksum offload context
+ * needs to be reset.
*/
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
-
- /* Setup of IP header checksum. */
- if (mp->m_pkthdr.csum_flags & CSUM_IP) {
- /*
- * Start offset for header checksum calculation.
- * End offset for header checksum calculation.
- * Offset of place to put the checksum.
- */
- TXD->lower_setup.ip_fields.ipcss = ehdrlen;
- TXD->lower_setup.ip_fields.ipcse =
- htole16(ehdrlen + ip_hlen);
- TXD->lower_setup.ip_fields.ipcso =
- ehdrlen + offsetof(struct ip, ip_sum);
- cmd |= E1000_TXD_CMD_IP;
- *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
- }
-
- if (mp->m_len < ehdrlen + ip_hlen)
- return; /* failure */
-
- hdr_len = ehdrlen + ip_hlen;
- ipproto = ip->ip_p;
-
- break;
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
-
- if (mp->m_len < ehdrlen + ip_hlen)
- return; /* failure */
-
- /* IPv6 doesn't have a header checksum. */
+ curr_txd = adapter->next_avail_tx_desc;
+ tx_buffer = &adapter->tx_buffer_area[curr_txd];
+ TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
- hdr_len = ehdrlen + ip_hlen;
- ipproto = ip6->ip6_nxt;
+ TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
+ TXD->lower_setup.ip_fields.ipcso =
+ ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
+ TXD->lower_setup.ip_fields.ipcse =
+ htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
- break;
- default:
- *txd_upper = 0;
- *txd_lower = 0;
- return;
- }
+ TXD->upper_setup.tcp_fields.tucss =
+ ETHER_HDR_LEN + sizeof(struct ip);
+ TXD->upper_setup.tcp_fields.tucse = htole16(0);
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
- /*
- * Start offset for payload checksum calculation.
- * End offset for payload checksum calculation.
- * Offset of place to put the checksum.
- */
- th = (struct tcp_hdr *)(mp->m_data + hdr_len);
- TXD->upper_setup.tcp_fields.tucss = hdr_len;
- TXD->upper_setup.tcp_fields.tucse = htole16(0);
- TXD->upper_setup.tcp_fields.tucso =
- hdr_len + offsetof(struct tcphdr, th_sum);
- cmd |= E1000_TXD_CMD_TCP;
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
- }
- break;
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
- /*
- * Start offset for header checksum calculation.
- * End offset for header checksum calculation.
- * Offset of place to put the checksum.
- */
- TXD->upper_setup.tcp_fields.tucss = hdr_len;
- TXD->upper_setup.tcp_fields.tucse = htole16(0);
- TXD->upper_setup.tcp_fields.tucso =
- hdr_len + offsetof(struct udphdr, uh_sum);
- *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
- }
- break;
- default:
- break;
+ if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
+ TXD->upper_setup.tcp_fields.tucso =
+ ETHER_HDR_LEN + sizeof(struct ip) +
+ offsetof(struct tcphdr, th_sum);
+ } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
+ TXD->upper_setup.tcp_fields.tucso =
+ ETHER_HDR_LEN + sizeof(struct ip) +
+ offsetof(struct udphdr, uh_sum);
}
TXD->tcp_seg_setup.data = htole32(0);
- TXD->cmd_and_length =
- htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
+ TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
+
tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
if (++curr_txd == adapter->num_tx_desc)
curr_txd = 0;
@@ -2934,153 +2745,76 @@ em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
adapter->next_avail_tx_desc = curr_txd;
}
+#ifdef EM_TSO
/**********************************************************************
*
* Setup work for hardware segmentation offload (TSO)
*
**********************************************************************/
static boolean_t
-em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
- uint32_t *txd_lower)
+em_tso_setup(struct adapter *adapter,
+ struct mbuf *mp,
+ u_int32_t *txd_upper,
+ u_int32_t *txd_lower)
{
struct em_context_desc *TXD;
struct em_buffer *tx_buffer;
- struct ether_vlan_header *eh;
struct ip *ip;
- struct ip6_hdr *ip6;
struct tcphdr *th;
- int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
- uint16_t etype;
-
- /*
- * XXX: This is not really correct as the stack would not have
- * set up all checksums.
- * XXX: Return FALSE is not sufficient as we may have to return
- * in true failure cases as well. Should do -1 (failure), 0 (no)
- * and 1 (success).
- */
- if (mp->m_pkthdr.len <= E1000_TX_BUFFER_SIZE)
- return FALSE; /* 0 */
+ int curr_txd, hdr_len, ip_hlen, tcp_hlen;
- /*
- * This function could/should be extended to support IP/IPv6
- * fragmentation as well. But as they say, one step at a time.
- */
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
- return FALSE; /* -1 */
-
- /*
- * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
- * TODO: Support SCTP too when it hits the tree.
- */
- switch (etype) {
- case ETHERTYPE_IP:
- isip6 = 0;
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip->ip_len = 0;
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
- return FALSE; /* -1 */
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
-#if 1
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
-#else
- th->th_sum = mp->m_pkthdr.csum_data;
-#endif
- break;
- case ETHERTYPE_IPV6:
- isip6 = 1;
- return FALSE; /* Not supported yet. */
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip6->ip6_plen = 0;
- ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
- if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
- return FALSE; /* -1 */
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
-#if 0
- th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
- htons(IPPROTO_TCP)); /* XXX: function notyet. */
-#else
- th->th_sum = mp->m_pkthdr.csum_data;
-#endif
- break;
- default:
+ if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
+ (mp->m_pkthdr.len <= E1000_TX_BUFFER_SIZE)) {
return FALSE;
}
- hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
- *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
- E1000_TXD_DTYP_D | /* Data descr type */
- E1000_TXD_CMD_TSE); /* Do TSE on this packet */
+ *txd_lower = (E1000_TXD_CMD_DEXT |
+ E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE);
- /* IP and/or TCP header checksum calculation and insertion. */
- *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
+ *txd_upper = (E1000_TXD_POPTS_IXSM |
E1000_TXD_POPTS_TXSM) << 8;
curr_txd = adapter->next_avail_tx_desc;
tx_buffer = &adapter->tx_buffer_area[curr_txd];
TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
- /* IPv6 doesn't have a header checksum. */
- if (!isip6) {
- /*
- * Start offset for header checksum calculation.
- * End offset for header checksum calculation.
- * Offset of place put the checksum.
- */
- TXD->lower_setup.ip_fields.ipcss = ehdrlen;
- TXD->lower_setup.ip_fields.ipcse =
- htole16(ehdrlen + ip_hlen - 1);
- TXD->lower_setup.ip_fields.ipcso =
- ehdrlen + offsetof(struct ip, ip_sum);
- }
- /*
- * Start offset for payload checksum calculation.
- * End offset for payload checksum calculation.
- * Offset of place to put the checksum.
- */
- TXD->upper_setup.tcp_fields.tucss =
- ehdrlen + ip_hlen;
+ mp->m_data += sizeof(struct ether_header);
+ ip = mtod(mp, struct ip *);
+ ip->ip_len = 0;
+ ip->ip_sum = 0;
+ ip_hlen = ip->ip_hl << 2 ;
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ tcp_hlen = th->th_off << 2;
+
+ hdr_len = ETHER_HDR_LEN + ip_hlen + tcp_hlen;
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr,
+ htons(IPPROTO_TCP));
+
+ mp->m_data -= sizeof(struct ether_header);
+ TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
+ TXD->lower_setup.ip_fields.ipcso =
+ ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
+ TXD->lower_setup.ip_fields.ipcse =
+ htole16(ETHER_HDR_LEN + ip_hlen - 1);
+
+ TXD->upper_setup.tcp_fields.tucss =
+ ETHER_HDR_LEN + ip_hlen;
TXD->upper_setup.tcp_fields.tucse = 0;
- TXD->upper_setup.tcp_fields.tucso =
- ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
- /*
- * Payload size per packet w/o any headers.
- * Length of all headers up to payload.
- */
+ TXD->upper_setup.tcp_fields.tucso =
+ ETHER_HDR_LEN + ip_hlen +
+ offsetof(struct tcphdr, th_sum);
TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
-
TXD->cmd_and_length = htole32(adapter->txd_cmd |
- E1000_TXD_CMD_DEXT | /* Extended descr */
- E1000_TXD_CMD_TSE | /* TSE context */
- (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
- E1000_TXD_CMD_TCP | /* Do TCP checksum */
- (mp->m_pkthdr.len - (hdr_len))); /* Total len */
+ E1000_TXD_CMD_DEXT |
+ E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
+ (mp->m_pkthdr.len - (hdr_len)));
tx_buffer->m_head = NULL;
+ tx_buffer->next_eop = -1;
if (++curr_txd == adapter->num_tx_desc)
curr_txd = 0;
@@ -3091,6 +2825,7 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
return TRUE;
}
+#endif /* EM_TSO */
/**********************************************************************
*
@@ -3102,64 +2837,89 @@ em_tso_setup(struct adapter *adapter, struct mbuf *mp, uint32_t *txd_upper,
static void
em_txeof(struct adapter *adapter)
{
- int i, num_avail;
- struct em_buffer *tx_buffer;
- struct em_tx_desc *tx_desc;
+ int first, last, done, num_avail;
+ struct em_buffer *tx_buffer;
+ struct em_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
EM_LOCK_ASSERT(adapter);
- if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
- return;
-
- num_avail = adapter->num_tx_desc_avail;
- i = adapter->oldest_used_tx_desc;
+ if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
+ return;
- tx_buffer = &adapter->tx_buffer_area[i];
- tx_desc = &adapter->tx_desc_base[i];
+ num_avail = adapter->num_tx_desc_avail;
+ first = adapter->next_tx_to_clean;
+ tx_desc = &adapter->tx_desc_base[first];
+ tx_buffer = &adapter->tx_buffer_area[first];
+ last = tx_buffer->next_eop;
+ eop_desc = &adapter->tx_desc_base[last];
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
+ /*
+ * Now caculate the terminating index
+ * for the cleanup loop below
+ */
+ if (++last == adapter->num_tx_desc) last = 0;
+ done = last;
+
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
+ /* We clean the range of the packet */
+ while (first != done) {
+ tx_desc->upper.data = 0;
+ tx_desc->lower.data = 0;
+ num_avail++;
+
+ if (tx_buffer->m_head) {
+ ifp->if_opackets++;
+ bus_dmamap_sync(adapter->txtag,
+ tx_buffer->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(adapter->txtag,
+ tx_buffer->map);
- tx_desc->upper.data = 0;
- num_avail++;
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
+ }
+ tx_buffer->next_eop = -1;
- if (tx_buffer->m_head) {
- ifp->if_opackets++;
- bus_dmamap_sync(adapter->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(adapter->txtag, tx_buffer->map);
+ if (++first == adapter->num_tx_desc)
+ first = 0;
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
+ tx_buffer = &adapter->tx_buffer_area[first];
+ tx_desc = &adapter->tx_desc_base[first];
}
-
- if (++i == adapter->num_tx_desc)
- i = 0;
-
- tx_buffer = &adapter->tx_buffer_area[i];
- tx_desc = &adapter->tx_desc_base[i];
- }
- bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- adapter->oldest_used_tx_desc = i;
-
- /*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
- * that it is OK to send packets.
- * If there are no pending descriptors, clear the timeout. Otherwise,
- * if some descriptors have been freed, restart the timeout.
- */
- if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (num_avail == adapter->num_tx_desc)
- ifp->if_timer = 0;
- else if (num_avail != adapter->num_tx_desc_avail)
- ifp->if_timer = EM_TX_TIMEOUT;
- }
- adapter->num_tx_desc_avail = num_avail;
+ /* See if we can continue to the next packet */
+ last = tx_buffer->next_eop;
+ if (last != -1) {
+ eop_desc = &adapter->tx_desc_base[last];
+ /* Get new done point */
+ if (++last == adapter->num_tx_desc) last = 0;
+ done = last;
+ } else
+ break;
+ }
+ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ adapter->next_tx_to_clean = first;
+
+ /*
+ * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack
+ * that it is OK to send packets.
+ * If there are no pending descriptors, clear the timeout. Otherwise,
+ * if some descriptors have been freed, restart the timeout.
+ */
+ if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if (num_avail == adapter->num_tx_desc)
+ ifp->if_timer = 0;
+ else if (num_avail != adapter->num_tx_desc_avail)
+ ifp->if_timer = EM_TX_TIMEOUT;
+ }
+ adapter->num_tx_desc_avail = num_avail;
+ return;
}
/*********************************************************************
@@ -3168,47 +2928,46 @@ em_txeof(struct adapter *adapter)
*
**********************************************************************/
static int
-em_get_buf(struct adapter *adapter, int i)
+em_get_buf(int i, struct adapter *adapter, struct mbuf *mp)
{
- struct mbuf *m;
+ struct ifnet *ifp = adapter->ifp;
bus_dma_segment_t segs[1];
- bus_dmamap_t map;
struct em_buffer *rx_buffer;
int error, nsegs;
- m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL) {
- adapter->mbuf_cluster_failed++;
- return (ENOBUFS);
+ if (mp == NULL) {
+ mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (mp == NULL) {
+ adapter->mbuf_cluster_failed++;
+ return (ENOBUFS);
+ }
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ } else {
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
}
- m->m_len = m->m_pkthdr.len = MCLBYTES;
- if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(m, ETHER_ALIGN);
+
+ if (ifp->if_mtu <= ETHERMTU)
+ m_adj(mp, ETHER_ALIGN);
+
+ rx_buffer = &adapter->rx_buffer_area[i];
/*
* Using memory from the mbuf cluster pool, invoke the
* bus_dma machinery to arrange the memory mapping.
*/
- error = bus_dmamap_load_mbuf_sg(adapter->rxtag, adapter->rx_sparemap,
- m, segs, &nsegs, BUS_DMA_NOWAIT);
+ error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map,
+ mp, segs, &nsegs, 0);
if (error != 0) {
- m_free(m);
+ m_free(mp);
return (error);
}
/* If nsegs is wrong then the stack is corrupt. */
KASSERT(nsegs == 1, ("Too many segments returned!"));
-
- rx_buffer = &adapter->rx_buffer_area[i];
- if (rx_buffer->m_head != NULL)
- bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
-
- map = rx_buffer->map;
- rx_buffer->map = adapter->rx_sparemap;
- adapter->rx_sparemap = map;
- bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
- rx_buffer->m_head = m;
-
+ rx_buffer->m_head = mp;
adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
+ bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
return (0);
}
@@ -3228,16 +2987,17 @@ em_allocate_receive_structures(struct adapter *adapter)
struct em_buffer *rx_buffer;
int i, error;
- adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) * adapter->num_rx_desc,
- M_DEVBUF, M_NOWAIT);
+ adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
+ adapter->num_rx_desc, M_DEVBUF, M_NOWAIT);
if (adapter->rx_buffer_area == NULL) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
return (ENOMEM);
}
- bzero(adapter->rx_buffer_area, sizeof(struct em_buffer) * adapter->num_rx_desc);
+ bzero(adapter->rx_buffer_area,
+ sizeof(struct em_buffer) * adapter->num_rx_desc);
- error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ error = bus_dma_tag_create(NULL, /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -3255,13 +3015,6 @@ em_allocate_receive_structures(struct adapter *adapter)
goto fail;
}
- error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
- &adapter->rx_sparemap);
- if (error) {
- device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
- __func__, error);
- goto fail;
- }
rx_buffer = adapter->rx_buffer_area;
for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
@@ -3273,14 +3026,6 @@ em_allocate_receive_structures(struct adapter *adapter)
}
}
- for (i = 0; i < adapter->num_rx_desc; i++) {
- error = em_get_buf(adapter, i);
- if (error)
- goto fail;
- }
- bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
return (0);
fail:
@@ -3296,15 +3041,35 @@ fail:
static int
em_setup_receive_structures(struct adapter *adapter)
{
- int error;
+ struct em_buffer *rx_buffer;
+ int i, error;
- bzero(adapter->rx_desc_base, (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
+ bzero(adapter->rx_desc_base,
+ (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
- if ((error = em_allocate_receive_structures(adapter)) != 0)
- return (error);
+ /* Free current RX buffers. */
+ rx_buffer = adapter->rx_buffer_area;
+ for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
+ if (rx_buffer->m_head != NULL) {
+ bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
+ m_freem(rx_buffer->m_head);
+ rx_buffer->m_head = NULL;
+ }
+ }
+
+ /* Allocate new ones. */
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ error = em_get_buf(i, adapter, NULL);
+ if (error)
+ return (error);
+ }
/* Setup our descriptor pointers */
adapter->next_rx_desc_to_check = 0;
+ bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
@@ -3330,12 +3095,9 @@ em_initialize_receive_unit(struct adapter *adapter)
*/
E1000_WRITE_REG(&adapter->hw, RCTL, 0);
- /* Set the Receive Delay Timer Register */
- E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay.value | E1000_RDT_FPDB);
-
if(adapter->hw.mac_type >= em_82540) {
- E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay.value);
-
+ E1000_WRITE_REG(&adapter->hw, RADV,
+ adapter->rx_abs_int_delay.value);
/*
* Set the interrupt throttling rate. Value is calculated
* as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
@@ -3352,10 +3114,6 @@ em_initialize_receive_unit(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
- E1000_WRITE_REG(&adapter->hw, RDH, 0);
-
/* Setup the Receive Control Register */
reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
E1000_RCTL_RDMTS_HALF |
@@ -3371,13 +3129,16 @@ em_initialize_receive_unit(struct adapter *adapter)
reg_rctl |= E1000_RCTL_SZ_2048;
break;
case EM_RXBUFFER_4096:
- reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ reg_rctl |= E1000_RCTL_SZ_4096 |
+ E1000_RCTL_BSEX | E1000_RCTL_LPE;
break;
case EM_RXBUFFER_8192:
- reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ reg_rctl |= E1000_RCTL_SZ_8192 |
+ E1000_RCTL_BSEX | E1000_RCTL_LPE;
break;
case EM_RXBUFFER_16384:
- reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
+ reg_rctl |= E1000_RCTL_SZ_16384 |
+ E1000_RCTL_BSEX | E1000_RCTL_LPE;
break;
}
@@ -3394,6 +3155,14 @@ em_initialize_receive_unit(struct adapter *adapter)
/* Enable Receives */
E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
+ /*
+ * Setup the HW Rx Head and
+ * Tail Descriptor Pointers
+ */
+ E1000_WRITE_REG(&adapter->hw, RDH, 0);
+ E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
+
+ return;
}
/*********************************************************************
@@ -3409,10 +3178,6 @@ em_free_receive_structures(struct adapter *adapter)
INIT_DEBUGOUT("free_receive_structures: begin");
- if (adapter->rx_sparemap) {
- bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
- adapter->rx_sparemap = NULL;
- }
if (adapter->rx_buffer_area != NULL) {
rx_buffer = adapter->rx_buffer_area;
for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
@@ -3526,9 +3291,14 @@ em_rxeof(struct adapter *adapter, int count)
}
if (accept_frame) {
- if (em_get_buf(adapter, i) != 0) {
- ifp->if_iqdrops++;
- goto discard;
+ if (em_get_buf(i, adapter, NULL) == ENOBUFS) {
+ adapter->dropped_pkts++;
+ em_get_buf(i, adapter, mp);
+ if (adapter->fmp != NULL)
+ m_freem(adapter->fmp);
+ adapter->fmp = NULL;
+ adapter->lmp = NULL;
+ break;
}
/* Assign correct length to the current fragment */
@@ -3567,12 +3337,10 @@ em_rxeof(struct adapter *adapter, int count)
em_fixup_rx(adapter) != 0)
goto skip;
#endif
- if (status & E1000_RXD_STAT_VP) {
- adapter->fmp->m_pkthdr.ether_vtag =
+ if (status & E1000_RXD_STAT_VP)
+ VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
(le16toh(current_desc->special) &
- E1000_RXD_SPC_VLAN_MASK);
- adapter->fmp->m_flags |= M_VLANTAG;
- }
+ E1000_RXD_SPC_VLAN_MASK));
#ifndef __NO_STRICT_ALIGNMENT
skip:
#endif
@@ -3581,21 +3349,12 @@ skip:
adapter->lmp = NULL;
}
} else {
- ifp->if_ierrors++;
-discard:
- /* Reuse loaded DMA map and just update mbuf chain */
- mp = adapter->rx_buffer_area[i].m_head;
- mp->m_len = mp->m_pkthdr.len = MCLBYTES;
- mp->m_data = mp->m_ext.ext_buf;
- mp->m_next = NULL;
- if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(mp, ETHER_ALIGN);
- if (adapter->fmp != NULL) {
+ adapter->dropped_pkts++;
+ em_get_buf(i, adapter, mp);
+ if (adapter->fmp != NULL)
m_freem(adapter->fmp);
- adapter->fmp = NULL;
- adapter->lmp = NULL;
- }
- m = NULL;
+ adapter->fmp = NULL;
+ adapter->lmp = NULL;
}
/* Zero out the receive descriptors status. */
@@ -3608,13 +3367,9 @@ discard:
i = 0;
if (m != NULL) {
adapter->next_rx_desc_to_check = i;
-#ifdef DEVICE_POLLING
EM_UNLOCK(adapter);
(*ifp->if_input)(ifp, m);
EM_LOCK(adapter);
-#else
- (*ifp->if_input)(ifp, m);
-#endif
i = adapter->next_rx_desc_to_check;
}
current_desc = &adapter->rx_desc_base[i];
@@ -3668,12 +3423,10 @@ em_fixup_rx(struct adapter *adapter)
n->m_next = m;
adapter->fmp = n;
} else {
- adapter->ifp->if_iqdrops++;
- adapter->mbuf_alloc_failed++;
+ adapter->dropped_pkts++;
m_freem(adapter->fmp);
adapter->fmp = NULL;
- adapter->lmp = NULL;
- error = ENOBUFS;
+ error = ENOMEM;
}
}
@@ -3689,8 +3442,8 @@ em_fixup_rx(struct adapter *adapter)
*
*********************************************************************/
static void
-em_receive_checksum(struct adapter *adapter, struct em_rx_desc *rx_desc,
- struct mbuf *mp)
+em_receive_checksum(struct adapter *adapter,
+ struct em_rx_desc *rx_desc, struct mbuf *mp)
{
/* 82543 or newer only */
if ((adapter->hw.mac_type < em_82543) ||
@@ -3767,8 +3520,7 @@ em_disable_intr(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, IMC,
(0xffffffff & ~E1000_IMC_RXSEQ));
else
- E1000_WRITE_REG(&adapter->hw, IMC,
- 0xffffffff);
+ E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
}
static int
@@ -3809,6 +3561,37 @@ em_pci_clear_mwi(struct em_hw *hw)
(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
}
+uint32_t
+em_io_read(struct em_hw *hw, unsigned long port)
+{
+ struct em_osdep *io = hw->back;
+ uint32_t ret;
+
+ ret = bus_space_read_4(io->io_bus_space_tag,
+ io->io_bus_space_handle, port);
+ return (ret);
+}
+
+void
+em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
+{
+ struct em_osdep *io = hw->back;
+
+ bus_space_write_4(io->io_bus_space_tag,
+ io->io_bus_space_handle, port, value);
+ return;
+}
+
+/*
+ * We may eventually really do this, but its unnecessary
+ * for now so we just return unsupported.
+ */
+int32_t
+em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
+{
+ return (0);
+}
+
/*********************************************************************
* 82544 Coexistence issue workaround.
* There are 2 issues.
@@ -3824,9 +3607,10 @@ em_pci_clear_mwi(struct em_hw *hw)
*
*
* WORKAROUND:
-* Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
+* Make sure we do not have ending address
+* as 1,2,3,4(Hang) or 9,a,b,c (DAC)
*
-*** *********************************************************************/
+*************************************************************************/
static uint32_t
em_fill_descriptors (bus_addr_t address, uint32_t length,
PDESC_ARRAY desc_array)
@@ -3840,7 +3624,8 @@ em_fill_descriptors (bus_addr_t address, uint32_t length,
desc_array->elements = 1;
return (desc_array->elements);
}
- safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
+ safe_terminator = (uint32_t)((((uint32_t)address & 0x7) +
+ (length & 0xF)) & 0xF);
/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
if (safe_terminator == 0 ||
(safe_terminator > 4 &&
@@ -3933,25 +3718,32 @@ em_update_stats_counters(struct adapter *adapter)
adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
if (adapter->hw.mac_type >= em_82543) {
- adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, ALGNERRC);
- adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, RXERRC);
- adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, TNCRS);
- adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, CEXTERR);
- adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, TSCTC);
- adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, TSCTFC);
+ adapter->stats.algnerrc +=
+ E1000_READ_REG(&adapter->hw, ALGNERRC);
+ adapter->stats.rxerrc +=
+ E1000_READ_REG(&adapter->hw, RXERRC);
+ adapter->stats.tncrs +=
+ E1000_READ_REG(&adapter->hw, TNCRS);
+ adapter->stats.cexterr +=
+ E1000_READ_REG(&adapter->hw, CEXTERR);
+ adapter->stats.tsctc +=
+ E1000_READ_REG(&adapter->hw, TSCTC);
+ adapter->stats.tsctfc +=
+ E1000_READ_REG(&adapter->hw, TSCTFC);
}
ifp = adapter->ifp;
ifp->if_collisions = adapter->stats.colc;
/* Rx Errors */
- ifp->if_ierrors = adapter->stats.rxerrc + adapter->stats.crcerrs +
- adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc +
+ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
adapter->stats.mpc + adapter->stats.cexterr;
/* Tx Errors */
- ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
- adapter->watchdog_events;
+ ifp->if_oerrors = adapter->stats.ecol +
+ adapter->stats.latecol + adapter->watchdog_events;
}
@@ -4000,6 +3792,8 @@ em_print_debug_info(struct adapter *adapter)
adapter->mbuf_alloc_failed);
device_printf(dev, "Std mbuf cluster failed = %ld\n",
adapter->mbuf_cluster_failed);
+ device_printf(dev, "Driver dropped packets = %ld\n",
+ adapter->dropped_pkts);
}
static void
@@ -4009,13 +3803,16 @@ em_print_hw_stats(struct adapter *adapter)
device_printf(dev, "Excessive collisions = %lld\n",
(long long)adapter->stats.ecol);
+#if (DEBUG_HW > 0) /* Dont output these errors normally */
device_printf(dev, "Symbol errors = %lld\n",
(long long)adapter->stats.symerrs);
+#endif
device_printf(dev, "Sequence errors = %lld\n",
(long long)adapter->stats.sec);
- device_printf(dev, "Defer count = %lld\n", (long long)adapter->stats.dc);
-
- device_printf(dev, "Missed Packets = %lld\n", (long long)adapter->stats.mpc);
+ device_printf(dev, "Defer count = %lld\n",
+ (long long)adapter->stats.dc);
+ device_printf(dev, "Missed Packets = %lld\n",
+ (long long)adapter->stats.mpc);
device_printf(dev, "Receive No Buffers = %lld\n",
(long long)adapter->stats.rnbc);
/* RLEC is inaccurate on some hardware, calculate our own. */
@@ -4023,27 +3820,33 @@ em_print_hw_stats(struct adapter *adapter)
((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
device_printf(dev, "Receive errors = %lld\n",
(long long)adapter->stats.rxerrc);
- device_printf(dev, "Crc errors = %lld\n", (long long)adapter->stats.crcerrs);
+ device_printf(dev, "Crc errors = %lld\n",
+ (long long)adapter->stats.crcerrs);
device_printf(dev, "Alignment errors = %lld\n",
(long long)adapter->stats.algnerrc);
device_printf(dev, "Carrier extension errors = %lld\n",
(long long)adapter->stats.cexterr);
device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
- device_printf(dev, "watchdog timeouts = %ld\n", adapter->watchdog_events);
-
- device_printf(dev, "XON Rcvd = %lld\n", (long long)adapter->stats.xonrxc);
- device_printf(dev, "XON Xmtd = %lld\n", (long long)adapter->stats.xontxc);
- device_printf(dev, "XOFF Rcvd = %lld\n", (long long)adapter->stats.xoffrxc);
- device_printf(dev, "XOFF Xmtd = %lld\n", (long long)adapter->stats.xofftxc);
-
+ device_printf(dev, "watchdog timeouts = %ld\n",
+ adapter->watchdog_events);
+ device_printf(dev, "XON Rcvd = %lld\n",
+ (long long)adapter->stats.xonrxc);
+ device_printf(dev, "XON Xmtd = %lld\n",
+ (long long)adapter->stats.xontxc);
+ device_printf(dev, "XOFF Rcvd = %lld\n",
+ (long long)adapter->stats.xoffrxc);
+ device_printf(dev, "XOFF Xmtd = %lld\n",
+ (long long)adapter->stats.xofftxc);
device_printf(dev, "Good Packets Rcvd = %lld\n",
(long long)adapter->stats.gprc);
device_printf(dev, "Good Packets Xmtd = %lld\n",
(long long)adapter->stats.gptc);
+#ifdef EM_TSO
device_printf(dev, "TSO Contexts Xmtd = %lld\n",
(long long)adapter->stats.tsctc);
device_printf(dev, "TSO Contexts Failed = %lld\n",
(long long)adapter->stats.tsctfc);
+#endif
}
static int
@@ -4148,15 +3951,3 @@ em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
info, 0, em_sysctl_int_delay, "I", description);
}
-
-#ifndef DEVICE_POLLING
-static void
-em_add_int_process_limit(struct adapter *adapter, const char *name,
- const char *description, int *limit, int value)
-{
- *limit = value;
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
-}
-#endif
OpenPOWER on IntegriCloud