summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2008-11-26 23:41:18 +0000
committerjfv <jfv@FreeBSD.org>2008-11-26 23:41:18 +0000
commit62188214bdac09b8b3fe223bd994d8aef28db6db (patch)
treeda2cf29f3a19749ccacb115e679f319ba8aeae8f /sys/dev
parent8c32a341579fa9f728f1a37365754b5507e3c538 (diff)
downloadFreeBSD-src-62188214bdac09b8b3fe223bd994d8aef28db6db.zip
FreeBSD-src-62188214bdac09b8b3fe223bd994d8aef28db6db.tar.gz
Updated ixgbe driver - version 1.6.2
-This version has header split, and as a result a number of aspects of the code have been improved/simplified. - Interrupt handling refined for performance - Many small bugs fixed along the way MFC after: ASAP - in time for 7.1
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/ixgbe/ixgbe.c1253
-rw-r--r--sys/dev/ixgbe/ixgbe.h76
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c777
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c43
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c235
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h17
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c275
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h51
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h291
11 files changed, 1999 insertions, 1026 deletions
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index 26f4066..81100fa 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -36,9 +36,6 @@
#include "opt_device_polling.h"
#endif
-/* Undefine this if not using CURRENT */
-#define IXGBE_VLAN_EVENTS
-
#include "ixgbe.h"
/*********************************************************************
@@ -49,7 +46,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "1.4.7";
+char ixgbe_driver_version[] = "1.6.2";
/*********************************************************************
* PCI Device ID Table
@@ -65,11 +62,15 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT_DUAL_PORT, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -128,14 +129,14 @@ static void ixgbe_disable_intr(struct adapter *);
static void ixgbe_update_stats_counters(struct adapter *);
static bool ixgbe_txeof(struct tx_ring *);
static bool ixgbe_rxeof(struct rx_ring *, int);
-static void ixgbe_rx_checksum(struct adapter *, u32, struct mbuf *);
+static void ixgbe_rx_checksum(u32, struct mbuf *);
static void ixgbe_set_promisc(struct adapter *);
static void ixgbe_disable_promisc(struct adapter *);
static void ixgbe_set_multi(struct adapter *);
static void ixgbe_print_hw_stats(struct adapter *);
static void ixgbe_print_debug_info(struct adapter *);
static void ixgbe_update_link_status(struct adapter *);
-static int ixgbe_get_buf(struct rx_ring *, int);
+static int ixgbe_get_buf(struct rx_ring *, int, u8);
static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
@@ -147,15 +148,20 @@ static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
static boolean_t ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
-static void ixgbe_set_ivar(struct adapter *, u16, u8);
+static void ixgbe_set_ivar(struct adapter *, u16, u8, s8);
static void ixgbe_configure_ivars(struct adapter *);
static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
-#ifdef IXGBE_VLAN_EVENTS
+#ifdef IXGBE_HW_VLAN_SUPPORT
static void ixgbe_register_vlan(void *, struct ifnet *, u16);
static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
#endif
+static void ixgbe_update_aim(struct rx_ring *);
+
+/* Support for pluggable optic modules */
+static bool ixgbe_sfp_probe(struct adapter *);
+
/* Legacy (single vector interrupt handler */
static void ixgbe_legacy_irq(void *);
@@ -168,9 +174,6 @@ static void ixgbe_msix_link(void *);
static void ixgbe_handle_tx(void *context, int pending);
static void ixgbe_handle_rx(void *context, int pending);
-#ifndef NO_82598_A0_SUPPORT
-static void desc_flip(void *);
-#endif
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -199,12 +202,28 @@ MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
** TUNEABLE PARAMETERS:
*/
+/*
+** These parameters are used in Adaptive
+** Interrupt Moderation. The value is set
+** into EITR and controls the interrupt
+** frequency. They can be modified but
+** be careful in tuning them.
+*/
+static int ixgbe_enable_aim = TRUE;
+TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
+static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
+TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
+static int ixgbe_ave_latency = IXGBE_LOW_LATENCY;
+TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_low_latency);
+static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
+TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
+
/* How many packets rxeof tries to clean at a time */
static int ixgbe_rx_process_limit = 100;
TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
/* Flow control setting, default to full */
-static int ixgbe_flow_control = 3;
+static int ixgbe_flow_control = ixgbe_fc_none;
TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
/*
@@ -213,7 +232,7 @@ TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
* interface must be reset (down/up) for it
* to take effect.
*/
-static int ixgbe_enable_lro = 0;
+static int ixgbe_enable_lro = 1;
TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
/*
@@ -224,12 +243,18 @@ static int ixgbe_enable_msix = 1;
TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
/*
+ * Enable RX Header Split
+ */
+static int ixgbe_rx_hdr_split = 1;
+TUNABLE_INT("hw.ixgbe.rx_hdr_split", &ixgbe_rx_hdr_split);
+
+/*
* Number of TX/RX Queues, with 0 setting
* it autoconfigures to the number of cpus.
*/
static int ixgbe_tx_queues = 1;
TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
-static int ixgbe_rx_queues = 4;
+static int ixgbe_rx_queues = 1;
TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
/* Number of TX descriptors per ring */
@@ -243,9 +268,6 @@ TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
/* Total number of Interfaces - need for config sanity check */
static int ixgbe_total_ports;
-/* Optics type of this interface */
-static int ixgbe_optics;
-
/*********************************************************************
* Device identification routine
*
@@ -260,11 +282,11 @@ ixgbe_probe(device_t dev)
{
ixgbe_vendor_info_t *ent;
- u_int16_t pci_vendor_id = 0;
- u_int16_t pci_device_id = 0;
- u_int16_t pci_subvendor_id = 0;
- u_int16_t pci_subdevice_id = 0;
- char adapter_name[128];
+ u16 pci_vendor_id = 0;
+ u16 pci_device_id = 0;
+ u16 pci_subvendor_id = 0;
+ u16 pci_subdevice_id = 0;
+ char adapter_name[256];
INIT_DEBUGOUT("ixgbe_probe: begin");
@@ -289,41 +311,11 @@ ixgbe_probe(device_t dev)
sprintf(adapter_name, "%s, Version - %s",
ixgbe_strings[ent->index],
ixgbe_driver_version);
- switch (pci_device_id) {
- case IXGBE_DEV_ID_82598AT_DUAL_PORT :
- ixgbe_total_ports += 2;
- break;
- case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
- ixgbe_optics = IFM_10G_CX4;
- ixgbe_total_ports += 2;
- break;
- case IXGBE_DEV_ID_82598AF_DUAL_PORT :
- ixgbe_optics = IFM_10G_SR;
- ixgbe_total_ports += 2;
- break;
- case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
- ixgbe_optics = IFM_10G_SR;
- ixgbe_total_ports += 1;
- break;
- case IXGBE_DEV_ID_82598EB_XF_LR :
- ixgbe_optics = IFM_10G_LR;
- ixgbe_total_ports += 1;
- break;
- case IXGBE_DEV_ID_82598EB_CX4 :
- ixgbe_optics = IFM_10G_CX4;
- ixgbe_total_ports += 1;
- break;
- case IXGBE_DEV_ID_82598AT :
- ixgbe_total_ports += 1;
- default:
- break;
- }
device_set_desc_copy(dev, adapter_name);
return (0);
}
ent++;
}
-
return (ENXIO);
}
@@ -342,7 +334,8 @@ ixgbe_attach(device_t dev)
{
struct adapter *adapter;
int error = 0;
- u32 ctrl_ext;
+ u16 pci_device_id;
+ u32 ctrl_ext;
INIT_DEBUGOUT("ixgbe_attach: begin");
@@ -353,6 +346,37 @@ ixgbe_attach(device_t dev)
/* Core Lock Init*/
IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+ /* Keep track of number of ports and optics */
+ pci_device_id = pci_get_device(dev);
+ switch (pci_device_id) {
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
+ adapter->optics = IFM_10G_CX4;
+ ixgbe_total_ports += 2;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT :
+ adapter->optics = IFM_10G_SR;
+ ixgbe_total_ports += 2;
+ break;
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
+ adapter->optics = IFM_10G_SR;
+ ixgbe_total_ports += 1;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR :
+ adapter->optics = IFM_10G_LR;
+ ixgbe_total_ports += 1;
+ break;
+ case IXGBE_DEV_ID_82598EB_CX4 :
+ adapter->optics = IFM_10G_CX4;
+ ixgbe_total_ports += 1;
+ break;
+ case IXGBE_DEV_ID_82598AT :
+ ixgbe_total_ports += 1;
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
+ ixgbe_total_ports += 2;
+ default:
+ break;
+ }
+
/* SYSCTL APIs */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@@ -374,15 +398,37 @@ ixgbe_attach(device_t dev)
OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
&ixgbe_enable_lro, 1, "Large Receive Offload");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+ &ixgbe_enable_aim, 1, "Interrupt Moderation");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
+ &ixgbe_low_latency, 1, "Low Latency");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
+ &ixgbe_ave_latency, 1, "Average Latency");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
+ &ixgbe_bulk_latency, 1, "Bulk Latency");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
+ &ixgbe_rx_hdr_split, 1, "RX Header Split");
+
/* Set up the timer callout */
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
/* Determine hardware revision */
ixgbe_identify_hardware(adapter);
- /* Indicate to RX setup to use Jumbo Clusters */
- adapter->bigbufs = TRUE;
-
/* Do base PCI setup - map BAR0 */
if (ixgbe_allocate_pci_resources(adapter)) {
device_printf(dev, "Allocation of PCI resources failed\n");
@@ -428,7 +474,20 @@ ixgbe_attach(device_t dev)
}
/* Initialize the shared code */
- if (ixgbe_init_shared_code(&adapter->hw)) {
+ error = ixgbe_init_shared_code(&adapter->hw);
+ if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
+ /*
+ ** No optics in this port, set up
+ ** so the timer routine will probe
+ ** for later insertion.
+ */
+ adapter->sfp_probe = TRUE;
+ error = 0;
+ } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ device_printf(dev,"Unsupported SFP+ module detected!\n");
+ error = EIO;
+ goto err_late;
+ } else if (error) {
device_printf(dev,"Unable to initialize the shared code\n");
error = EIO;
goto err_late;
@@ -459,14 +518,14 @@ ixgbe_attach(device_t dev)
/* Initialize statistics */
ixgbe_update_stats_counters(adapter);
-#ifdef IXGBE_VLAN_EVENTS
+#ifdef IXGBE_HW_VLAN_SUPPORT
/* Register for VLAN events */
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
#endif
-
+
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@@ -521,7 +580,6 @@ ixgbe_detach(device_t dev)
if (txr->tq) {
taskqueue_drain(txr->tq, &txr->tx_task);
taskqueue_free(txr->tq);
- txr->tq = NULL;
}
}
@@ -529,22 +587,21 @@ ixgbe_detach(device_t dev)
if (rxr->tq) {
taskqueue_drain(rxr->tq, &rxr->rx_task);
taskqueue_free(rxr->tq);
- rxr->tq = NULL;
}
}
-#ifdef IXGBE_VLAN_EVENTS
+ /* let hardware know driver is unloading */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+#ifdef IXGBE_HW_VLAN_SUPPORT
/* Unregister VLAN events */
if (adapter->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-#endif
-
- /* let hardware know driver is unloading */
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
+#endif
ether_ifdetach(adapter->ifp);
callout_drain(&adapter->timer);
@@ -848,10 +905,13 @@ ixgbe_watchdog(struct adapter *adapter)
static void
ixgbe_init_locked(struct adapter *adapter)
{
+ struct rx_ring *rxr = adapter->rx_rings;
+ struct tx_ring *txr = adapter->tx_rings;
struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
struct ixgbe_hw *hw;
- u32 txdctl, rxdctl, mhadd, gpie;
+ u32 k, txdctl, mhadd, gpie;
+ u32 rxdctl, rxctrl;
INIT_DEBUGOUT("ixgbe_init: begin");
@@ -872,17 +932,16 @@ ixgbe_init_locked(struct adapter *adapter)
return;
}
-#ifndef IXGBE_VLAN_EVENTS
- /* With events this is done when a vlan registers */
+#ifndef IXGBE_HW_VLAN_SUPPORT
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
- u32 ctrl;
+ u32 ctrl;
+
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
ctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
}
#endif
-
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(adapter)) {
device_printf(dev,"Could not setup transmit structures\n");
@@ -892,15 +951,24 @@ ixgbe_init_locked(struct adapter *adapter)
ixgbe_initialize_transmit_units(adapter);
+ /* TX irq moderation rate is fixed */
+ for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_EITR(txr->msix), ixgbe_ave_latency);
+ txr->watchdog_timer = FALSE;
+ }
+
/* Setup Multicast table */
ixgbe_set_multi(adapter);
/*
- ** If we are resetting MTU smaller than 2K
- ** drop to small RX buffers
+ ** Determine the correct mbuf pool
+ ** for doing jumbo/headersplit
*/
- if (adapter->max_frame_size <= MCLBYTES)
- adapter->bigbufs = FALSE;
+ if (ifp->if_mtu > ETHERMTU)
+ adapter->rx_mbuf_sz = MJUMPAGESIZE;
+ else
+ adapter->rx_mbuf_sz = MCLBYTES;
/* Prepare receive descriptors and buffers */
if (ixgbe_setup_receive_structures(adapter)) {
@@ -912,10 +980,22 @@ ixgbe_init_locked(struct adapter *adapter)
/* Configure RX settings */
ixgbe_initialize_receive_units(adapter);
+ /* RX moderation will be adapted over time, set default */
+ for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_EITR(rxr->msix), ixgbe_low_latency);
+ }
+
+ /* Set Link moderation */
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
+
gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+
/* Enable Fan Failure Interrupt */
if (adapter->hw.phy.media_type == ixgbe_media_type_copper)
gpie |= IXGBE_SDP1_GPIEN;
+
if (adapter->msix) {
/* Enable Enhanced MSIX mode */
gpie |= IXGBE_GPIE_MSIX_MODE;
@@ -955,12 +1035,29 @@ ixgbe_init_locked(struct adapter *adapter)
rxdctl |= 0x0020;
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
+ for (k = 0; k < 10; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
+ IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msec_delay(1);
+ }
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
}
+ /* Enable Receive engine */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
/* Set up MSI/X routing */
- ixgbe_configure_ivars(adapter);
+ if (ixgbe_enable_msix)
+ ixgbe_configure_ivars(adapter);
ixgbe_enable_intr(adapter);
@@ -984,7 +1081,7 @@ ixgbe_init(void *arg)
/*
-** Legacy Deferred Interrupt Handlers
+** MSIX Interrupt Handlers
*/
static void
@@ -992,11 +1089,14 @@ ixgbe_handle_rx(void *context, int pending)
{
struct rx_ring *rxr = context;
struct adapter *adapter = rxr->adapter;
- u32 loop = 0;
+ u32 loop = MAX_LOOP;
+ bool more;
- while (loop++ < MAX_INTR)
- if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
- break;
+ do {
+ more = ixgbe_rxeof(rxr, -1);
+ } while (loop-- && more);
+ /* Reenable this interrupt */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
}
static void
@@ -1005,15 +1105,21 @@ ixgbe_handle_tx(void *context, int pending)
struct tx_ring *txr = context;
struct adapter *adapter = txr->adapter;
struct ifnet *ifp = adapter->ifp;
- u32 loop = 0;
+ u32 loop = MAX_LOOP;
+ bool more;
- IXGBE_TX_LOCK(txr);
- while (loop++ < MAX_INTR)
- if (ixgbe_txeof(txr) == 0)
- break;
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- ixgbe_start_locked(txr, ifp);
- IXGBE_TX_UNLOCK(txr);
+ IXGBE_TX_LOCK(txr);
+ do {
+ more = ixgbe_txeof(txr);
+ } while (loop-- && more);
+
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ ixgbe_start_locked(txr, ifp);
+
+ IXGBE_TX_UNLOCK(txr);
+
+ /* Reenable this interrupt */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
}
@@ -1026,34 +1132,38 @@ ixgbe_handle_tx(void *context, int pending)
static void
ixgbe_legacy_irq(void *arg)
{
- u32 reg_eicr;
struct adapter *adapter = arg;
+ struct ixgbe_hw *hw = &adapter->hw;
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- struct ixgbe_hw *hw;
+ u32 reg_eicr;
- hw = &adapter->hw;
- reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
- if (reg_eicr == 0)
+
+ reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ if (reg_eicr == 0) {
+ ixgbe_enable_intr(adapter);
return;
+ }
- if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
+ if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
taskqueue_enqueue(rxr->tq, &rxr->rx_task);
- if (ixgbe_txeof(txr) != 0)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
+ if (ixgbe_txeof(txr))
+ taskqueue_enqueue(txr->tq, &txr->tx_task);
/* Check for fan failure */
if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(reg_eicr & IXGBE_EICR_GPI_SDP1)) {
device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
"REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
- IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
}
+
/* Link status change */
if (reg_eicr & IXGBE_EICR_LSC)
ixgbe_update_link_status(adapter);
+ ixgbe_enable_intr(adapter);
return;
}
@@ -1067,25 +1177,25 @@ ixgbe_legacy_irq(void *arg)
void
ixgbe_msix_tx(void *arg)
{
- struct tx_ring *txr = arg;
- struct adapter *adapter = txr->adapter;
- u32 loop = 0;
+ struct tx_ring *txr = arg;
+ struct adapter *adapter = txr->adapter;
+ bool more;
- ++txr->tx_irq;
IXGBE_TX_LOCK(txr);
- while (loop++ < MAX_INTR)
- if (ixgbe_txeof(txr) == 0)
- break;
+ ++txr->tx_irq;
+ more = ixgbe_txeof(txr);
IXGBE_TX_UNLOCK(txr);
- /* Reenable this interrupt */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
-
+ if (more)
+ taskqueue_enqueue(txr->tq, &txr->tx_task);
+ else /* Reenable this interrupt */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
return;
}
+
/*********************************************************************
*
- * MSI RX Interrupt Service routine
+ * MSIX RX Interrupt Service routine
*
**********************************************************************/
@@ -1093,18 +1203,71 @@ static void
ixgbe_msix_rx(void *arg)
{
struct rx_ring *rxr = arg;
- struct adapter *adapter = rxr->adapter;
- u32 loop = 0;
+ struct adapter *adapter = rxr->adapter;
+ bool more;
++rxr->rx_irq;
- while (loop++ < MAX_INTR)
- if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
- break;
- /* Reenable this interrupt */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
+ more = ixgbe_rxeof(rxr, -1);
+ if (more)
+ taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ else
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
+ /* Update interrupt rate */
+ if (ixgbe_enable_aim == TRUE)
+ ixgbe_update_aim(rxr);
return;
}
+/*
+** Routine to do adjust the RX EITR value based on traffic,
+** its a simple three state model, but seems to help.
+**
+** Note that the three EITR values are tuneable using
+** sysctl in real time. The feature can be effectively
+** nullified by setting them equal.
+*/
+#define BULK_THRESHOLD 10000
+#define AVE_THRESHOLD 1600
+
+static void
+ixgbe_update_aim(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ u32 olditr, newitr;
+
+ /* Update interrupt moderation based on traffic */
+ olditr = rxr->eitr_setting;
+ newitr = olditr;
+
+ /* Idle, don't change setting */
+ if (rxr->bytes == 0)
+ return;
+
+ if (olditr == ixgbe_low_latency) {
+ if (rxr->bytes > AVE_THRESHOLD)
+ newitr = ixgbe_ave_latency;
+ } else if (olditr == ixgbe_ave_latency) {
+ if (rxr->bytes < AVE_THRESHOLD)
+ newitr = ixgbe_low_latency;
+ else if (rxr->bytes > BULK_THRESHOLD)
+ newitr = ixgbe_bulk_latency;
+ } else if (olditr == ixgbe_bulk_latency) {
+ if (rxr->bytes < BULK_THRESHOLD)
+ newitr = ixgbe_ave_latency;
+ }
+
+ if (olditr != newitr) {
+ /* Change interrupt rate */
+ rxr->eitr_setting = newitr;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
+ newitr | (newitr << 16));
+ }
+
+ rxr->bytes = 0;
+ return;
+}
+
+
static void
ixgbe_msix_link(void *arg)
{
@@ -1164,7 +1327,7 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
- ifmr->ifm_active |= ixgbe_optics | IFM_FDX;
+ ifmr->ifm_active |= adapter->optics | IFM_FDX;
break;
}
@@ -1220,7 +1383,7 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
{
struct adapter *adapter = txr->adapter;
u32 olinfo_status = 0, cmd_type_len = 0;
- u32 paylen;
+ u32 paylen = 0;
int i, j, error, nsegs;
int first, last = 0;
struct mbuf *m_head;
@@ -1230,7 +1393,6 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
union ixgbe_adv_tx_desc *txd = NULL;
m_head = *m_headp;
- paylen = 0;
/* Basic descriptor defines */
cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
@@ -1274,7 +1436,7 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
m = m_defrag(*m_headp, M_DONTWAIT);
if (m == NULL) {
- adapter->mbuf_alloc_failed++;
+ adapter->mbuf_defrag_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (ENOBUFS);
@@ -1326,6 +1488,11 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
} else if (ixgbe_tx_ctx_setup(txr, m_head))
olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+ /* Record payload length */
+ if (paylen == 0)
+ olinfo_status |= m_head->m_pkthdr.len <<
+ IXGBE_ADVTXD_PAYLEN_SHIFT;
+
i = txr->next_avail_tx_desc;
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
@@ -1346,19 +1513,10 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
i = 0;
txbuf->m_head = NULL;
- /*
- ** we have to do this inside the loop right now
- ** because of the hardware workaround.
- */
- if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
- txd->read.cmd_type_len |=
- htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
-#ifndef NO_82598_A0_SUPPORT
- if (adapter->hw.revision_id == 0)
- desc_flip(txd);
-#endif
}
+ txd->read.cmd_type_len |=
+ htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
txr->tx_avail -= nsegs;
txr->next_avail_tx_desc = i;
@@ -1375,8 +1533,8 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
* Advance the Transmit Descriptor Tail (Tdt), this tells the
* hardware that this frame is available to transmit.
*/
+ ++txr->total_packets;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
- ++txr->tx_packets;
return (0);
xmit_fail:
@@ -1504,17 +1662,26 @@ ixgbe_local_timer(void *arg)
mtx_assert(&adapter->core_mtx, MA_OWNED);
+ /* Check for pluggable optics */
+ if (adapter->sfp_probe)
+ if (!ixgbe_sfp_probe(adapter))
+ goto out; /* Nothing to do */
+
ixgbe_update_link_status(adapter);
ixgbe_update_stats_counters(adapter);
if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
ixgbe_print_hw_stats(adapter);
}
/*
- * Each second we check the watchdog
+ * Each tick we check the watchdog
* to protect against hardware hangs.
*/
ixgbe_watchdog(adapter);
+out:
+ /* Trigger an RX interrupt on all queues */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, adapter->rx_mask);
+
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
}
@@ -1701,6 +1868,11 @@ ixgbe_allocate_msix(struct adapter *adapter)
}
txr->msix = vector;
txr->eims = IXGBE_IVAR_TX_QUEUE(vector);
+ TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
+ txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
+ taskqueue_thread_enqueue, &txr->tq);
+ taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
+ device_get_nameunit(adapter->dev));
}
/* RX setup */
@@ -1725,6 +1897,13 @@ ixgbe_allocate_msix(struct adapter *adapter)
}
rxr->msix = vector;
rxr->eims = IXGBE_IVAR_RX_QUEUE(vector);
+ /* used in local timer */
+ adapter->rx_mask |= rxr->eims;
+ TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
+ rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
+ taskqueue_thread_enqueue, &rxr->tq);
+ taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
+ device_get_nameunit(adapter->dev));
}
/* Now for Link changes */
@@ -1759,11 +1938,20 @@ ixgbe_setup_msix(struct adapter *adapter)
device_t dev = adapter->dev;
int rid, want, queues, msgs;
+ /* Override by tuneable */
+ if (ixgbe_enable_msix == 0)
+ goto msi;
+
/* First try MSI/X */
- rid = PCIR_BAR(IXGBE_MSIX_BAR);
+ rid = PCIR_BAR(MSIX_82598_BAR);
adapter->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!adapter->msix_mem) {
+ rid += 4; /* 82599 maps in higher BAR */
+ adapter->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ }
+ if (!adapter->msix_mem) {
/* May not be enabled */
device_printf(adapter->dev,
"Unable to map MSIX table \n");
@@ -1773,7 +1961,7 @@ ixgbe_setup_msix(struct adapter *adapter)
msgs = pci_msix_count(dev);
if (msgs == 0) { /* system has msix disabled */
bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
+ rid, adapter->msix_mem);
adapter->msix_mem = NULL;
goto msi;
}
@@ -1853,7 +2041,8 @@ ixgbe_allocate_pci_resources(struct adapter *adapter)
static void
ixgbe_free_pci_resources(struct adapter * adapter)
{
- device_t dev = adapter->dev;
+ device_t dev = adapter->dev;
+ int rid;
/*
* Legacy has this set to 0, but we need
@@ -1862,6 +2051,8 @@ ixgbe_free_pci_resources(struct adapter * adapter)
if (adapter->msix == 0)
adapter->msix = 1;
+ rid = PCIR_BAR(MSIX_82598_BAR);
+
/*
* First release all the interrupt resources:
* notice that since these are just kept
@@ -1885,7 +2076,7 @@ ixgbe_free_pci_resources(struct adapter * adapter)
if (adapter->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem);
+ rid, adapter->msix_mem);
if (adapter->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -1920,7 +2111,7 @@ ixgbe_hardware_init(struct adapter *adapter)
}
/* Get Hardware Flow Control setting */
- adapter->hw.fc.type = ixgbe_fc_full;
+ adapter->hw.fc.requested_mode = ixgbe_fc_full;
adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
adapter->hw.fc.low_water = IXGBE_FC_LO;
adapter->hw.fc.high_water = IXGBE_FC_HI;
@@ -1977,8 +2168,7 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capenable = ifp->if_capabilities;
- if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
- (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT))
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
else
@@ -1991,10 +2181,9 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
*/
ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
ixgbe_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | ixgbe_optics |
+ ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
IFM_FDX, 0, NULL);
- if ((hw->device_id == IXGBE_DEV_ID_82598AT) ||
- (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) {
+ if (hw->device_id == IXGBE_DEV_ID_82598AT) {
ifmedia_add(&adapter->media,
IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
ifmedia_add(&adapter->media,
@@ -2095,7 +2284,6 @@ ixgbe_allocate_queues(struct adapter *adapter)
struct tx_ring *txr;
struct rx_ring *rxr;
int rsize, tsize, error = IXGBE_SUCCESS;
- char name_string[16];
int txconf = 0, rxconf = 0;
/* First allocate the TX ring struct memory */
@@ -2134,9 +2322,9 @@ ixgbe_allocate_queues(struct adapter *adapter)
txr->me = i;
/* Initialize the TX side lock */
- snprintf(name_string, sizeof(name_string), "%s:tx(%d)",
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), txr->me);
- mtx_init(&txr->tx_mtx, name_string, NULL, MTX_DEF);
+ mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
if (ixgbe_dma_malloc(adapter, tsize,
&txr->txdma, BUS_DMA_NOWAIT)) {
@@ -2169,10 +2357,10 @@ ixgbe_allocate_queues(struct adapter *adapter)
rxr->adapter = adapter;
rxr->me = i;
- /* Initialize the TX side lock */
- snprintf(name_string, sizeof(name_string), "%s:rx(%d)",
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
device_get_nameunit(dev), rxr->me);
- mtx_init(&rxr->rx_mtx, name_string, NULL, MTX_DEF);
+ mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
if (ixgbe_dma_malloc(adapter, rsize,
&rxr->rxdma, BUS_DMA_NOWAIT)) {
@@ -2554,11 +2742,6 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
TXD->seqnum_seed = htole32(0);
TXD->mss_l4len_idx = htole32(0);
-#ifndef NO_82598_A0_SUPPORT
- if (adapter->hw.revision_id == 0)
- desc_flip(TXD);
-#endif
-
tx_buffer->m_head = NULL;
/* We've consumed the first desc, adjust counters */
@@ -2652,11 +2835,6 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
TXD->seqnum_seed = htole32(0);
tx_buffer->m_head = NULL;
-#ifndef NO_82598_A0_SUPPORT
- if (adapter->hw.revision_id == 0)
- desc_flip(TXD);
-#endif
-
if (++ctxd == adapter->num_tx_desc)
ctxd = 0;
@@ -2778,76 +2956,110 @@ ixgbe_txeof(struct tx_ring *txr)
*
**********************************************************************/
static int
-ixgbe_get_buf(struct rx_ring *rxr, int i)
+ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
{
- struct adapter *adapter = rxr->adapter;
- struct mbuf *mp;
- bus_dmamap_t map;
- int nsegs, error, old, s = 0;
- int size = MCLBYTES;
-
-
- bus_dma_segment_t segs[1];
+ struct adapter *adapter = rxr->adapter;
+ bus_dma_segment_t seg[2];
struct ixgbe_rx_buf *rxbuf;
+ struct mbuf *mh, *mp;
+ bus_dmamap_t map;
+ int nsegs, error;
+ int merr = 0;
- /* Are we going to Jumbo clusters? */
- if (adapter->bigbufs) {
- size = MJUMPAGESIZE;
- s = 1;
- };
-
- mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
- if (mp == NULL) {
- adapter->mbuf_alloc_failed++;
- return (ENOBUFS);
- }
- mp->m_len = mp->m_pkthdr.len = size;
-
- if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(mp, ETHER_ALIGN);
+ rxbuf = &rxr->rx_buffers[i];
+ /* First get our header and payload mbuf */
+ if (clean & IXGBE_CLEAN_HDR) {
+ mh = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (mh == NULL)
+ goto remap;
+ } else /* reuse */
+ mh = rxr->rx_buffers[i].m_head;
+
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+
+ if (clean & IXGBE_CLEAN_PKT) {
+ mp = m_getjcl(M_DONTWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
+ if (mp == NULL)
+ goto remap;
+ mp->m_len = adapter->rx_mbuf_sz;
+ mp->m_flags &= ~M_PKTHDR;
+ } else { /* reusing */
+ mp = rxr->rx_buffers[i].m_pack;
+ mp->m_len = adapter->rx_mbuf_sz;
+ mp->m_flags &= ~M_PKTHDR;
+ }
/*
- * Using memory from the mbuf cluster pool, invoke the bus_dma
- * machinery to arrange the memory mapping.
- */
- error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
- mp, segs, &nsegs, BUS_DMA_NOWAIT);
- if (error) {
- m_free(mp);
+ ** Need to create a chain for the following
+ ** dmamap call at this point.
+ */
+ mh->m_next = mp;
+ mh->m_pkthdr.len = mh->m_len + mp->m_len;
+
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
+ rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("GET BUF: dmamap load failure - %d\n", error);
+ m_free(mh);
return (error);
}
- /* Now check our target buffer for existing mapping */
- rxbuf = &rxr->rx_buffers[i];
- old = rxbuf->bigbuf;
+ /* Unload old mapping and update buffer struct */
if (rxbuf->m_head != NULL)
- bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ map = rxbuf->map;
+ rxbuf->map = rxr->spare_map;
+ rxr->spare_map = map;
+ rxbuf->m_head = mh;
+ rxbuf->m_pack = mp;
+ bus_dmamap_sync(rxr->rxtag,
+ rxbuf->map, BUS_DMASYNC_PREREAD);
+
+ /* Update descriptor */
+ rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
+ rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
- map = rxbuf->map[old];
- rxbuf->map[s] = rxr->spare_map[s];
- rxr->spare_map[old] = map;
- bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
- rxbuf->m_head = mp;
- rxbuf->bigbuf = s;
-
- rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
-
-#ifndef NO_82598_A0_SUPPORT
- /* A0 needs to One's Compliment descriptors */
- if (adapter->hw.revision_id == 0) {
- struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
- struct dhack *d;
+ return (0);
- d = (struct dhack *)&rxr->rx_base[i];
- d->a1 = ~(d->a1);
- d->a2 = ~(d->a2);
+ /*
+ ** If we get here, we have an mbuf resource
+ ** issue, so we discard the incoming packet
+ ** and attempt to reuse existing mbufs next
+ ** pass thru the ring, but to do so we must
+ ** fix up the descriptor which had the address
+ ** clobbered with writeback info.
+ */
+remap:
+ adapter->mbuf_header_failed++;
+ merr = ENOBUFS;
+ /* Is there a reusable buffer? */
+ mh = rxr->rx_buffers[i].m_head;
+ if (mh == NULL) /* Nope, init error */
+ return (merr);
+ mp = rxr->rx_buffers[i].m_pack;
+ if (mp == NULL) /* Nope, init error */
+ return (merr);
+ /* Get our old mapping */
+ rxbuf = &rxr->rx_buffers[i];
+ error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
+ rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ /* We really have a problem */
+ m_free(mh);
+ return (error);
}
-#endif
+ /* Now fix the descriptor as needed */
+ rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
+ rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
- return (0);
+ return (merr);
}
+
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
@@ -2873,45 +3085,30 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
goto fail;
}
- /* First make the small (2K) tag/map */
- if ((error = bus_dma_tag_create(NULL, /* parent */
- PAGE_SIZE, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MCLBYTES, /* maxsize */
- 1, /* nsegments */
- MCLBYTES, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->rxtag[0]))) {
- device_printf(dev, "Unable to create RX Small DMA tag\n");
- goto fail;
- }
-
- /* Next make the large (4K) tag/map */
+ /*
+ ** The tag is made to accomodate the largest buffer size
+ ** with packet split (hence the two segments, even though
+ ** it may not always use this.
+ */
if ((error = bus_dma_tag_create(NULL, /* parent */
PAGE_SIZE, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MJUMPAGESIZE, /* maxsize */
- 1, /* nsegments */
+ MJUM16BYTES, /* maxsize */
+ 2, /* nsegments */
MJUMPAGESIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
- &rxr->rxtag[1]))) {
- device_printf(dev, "Unable to create RX Large DMA tag\n");
+ &rxr->rxtag))) {
+ device_printf(dev, "Unable to create RX DMA tag\n");
goto fail;
}
- /* Create the spare maps (used by getbuf) */
- error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
- &rxr->spare_map[0]);
- error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
- &rxr->spare_map[1]);
+ /* Create the spare map (used by getbuf) */
+ error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
+ &rxr->spare_map);
if (error) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
__func__, error);
@@ -2920,16 +3117,10 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->rxtag[0],
- BUS_DMA_NOWAIT, &rxbuf->map[0]);
+ error = bus_dmamap_create(rxr->rxtag,
+ BUS_DMA_NOWAIT, &rxbuf->map);
if (error) {
- device_printf(dev, "Unable to create Small RX DMA map\n");
- goto fail;
- }
- error = bus_dmamap_create(rxr->rxtag[1],
- BUS_DMA_NOWAIT, &rxbuf->map[1]);
- if (error) {
- device_printf(dev, "Unable to create Large RX DMA map\n");
+ device_printf(dev, "Unable to create RX DMA map\n");
goto fail;
}
}
@@ -2954,38 +3145,41 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
device_t dev;
struct ixgbe_rx_buf *rxbuf;
struct lro_ctrl *lro = &rxr->lro;
- int j, rsize, s = 0;
+ int j, rsize;
adapter = rxr->adapter;
dev = adapter->dev;
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), 4096);
+
/* Clear the ring contents */
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
bzero((void *)rxr->rx_base, rsize);
/*
- ** Free current RX buffers: the size buffer
- ** that is loaded is indicated by the buffer
- ** bigbuf value.
+ ** Free current RX buffer structs and their mbufs
*/
for (int i = 0; i < adapter->num_rx_desc; i++) {
rxbuf = &rxr->rx_buffers[i];
- s = rxbuf->bigbuf;
if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
- m_freem(rxbuf->m_head);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ if (rxbuf->m_head) {
+ rxbuf->m_head->m_next = rxbuf->m_pack;
+ m_freem(rxbuf->m_head);
+ }
rxbuf->m_head = NULL;
+ rxbuf->m_pack = NULL;
}
}
+ /* Now refresh the mbufs */
for (j = 0; j < adapter->num_rx_desc; j++) {
- if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
+ if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
rxr->rx_buffers[j].m_head = NULL;
+ rxr->rx_buffers[j].m_pack = NULL;
+ rxr->rx_base[j].read.hdr_addr = 0;
rxr->rx_base[j].read.pkt_addr = 0;
- /* If we fail some may have change size */
- s = adapter->bigbufs;
goto fail;
}
}
@@ -3001,27 +3195,26 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
if (ixgbe_enable_lro) {
int err = tcp_lro_init(lro);
if (err) {
- device_printf(dev,"LRO Initialization failed!\n");
+ INIT_DEBUGOUT("LRO Initialization failed!\n");
goto fail;
}
- device_printf(dev,"RX LRO Initialized\n");
+ INIT_DEBUGOUT("RX LRO Initialized\n");
lro->ifp = adapter->ifp;
}
-
return (0);
+
fail:
/*
- * We need to clean up any buffers allocated so far
- * 'j' is the failing index, decrement it to get the
- * last success.
+ * We need to clean up any buffers allocated
+ * so far, 'j' is the failing index.
*/
- for (--j; j < 0; j--) {
- rxbuf = &rxr->rx_buffers[j];
+ for (int i = 0; i < j; i++) {
+ rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
m_freem(rxbuf->m_head);
rxbuf->m_head = NULL;
}
@@ -3038,9 +3231,9 @@ static int
ixgbe_setup_receive_structures(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
- int i, j, s;
+ int j;
- for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
+ for (j = 0; j < adapter->num_rx_queues; j++, rxr++)
if (ixgbe_setup_receive_ring(rxr))
goto fail;
@@ -3049,19 +3242,17 @@ fail:
/*
* Free RX buffers allocated so far, we will only handle
* the rings that completed, the failing case will have
- * cleaned up for itself. The value of 'i' will be the
- * failed ring so we must pre-decrement it.
+ * cleaned up for itself. 'j' failed, so its the terminus.
*/
- rxr = adapter->rx_rings;
- for (--i; i > 0; i--, rxr++) {
- for (j = 0; j < adapter->num_rx_desc; j++) {
+ for (int i = 0; i < j; ++i) {
+ rxr = &adapter->rx_rings[i];
+ for (int n = 0; n < adapter->num_rx_desc; n++) {
struct ixgbe_rx_buf *rxbuf;
- rxbuf = &rxr->rx_buffers[j];
- s = rxbuf->bigbuf;
+ rxbuf = &rxr->rx_buffers[n];
if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
m_freem(rxbuf->m_head);
rxbuf->m_head = NULL;
}
@@ -3073,110 +3264,109 @@ fail:
/*********************************************************************
*
- * Enable receive unit.
+ * Setup receive registers and features.
*
**********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
static void
ixgbe_initialize_receive_units(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
+ struct ixgbe_hw *hw = &adapter->hw;
struct ifnet *ifp = adapter->ifp;
u32 rxctrl, fctrl, srrctl, rxcsum;
- u32 mrqc, hlreg, linkvec;
- u32 random[10];
- int i,j;
- union {
- u8 c[128];
- u32 i[32];
- } reta;
+ u32 reta, mrqc = 0, hlreg, random[10];
/*
* Make sure receives are disabled while
* setting up the descriptor ring
*/
- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
rxctrl & ~IXGBE_RXCTRL_RXEN);
/* Enable broadcasts */
- fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_BAM;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+ fctrl |= IXGBE_FCTRL_DPF;
+ fctrl |= IXGBE_FCTRL_PMCF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
- if (ifp->if_mtu > ETHERMTU)
- hlreg |= IXGBE_HLREG0_JUMBOEN;
- else
- hlreg &= ~IXGBE_HLREG0_JUMBOEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
-
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
+ srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(0));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- if (adapter->bigbufs)
+
+ hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ /* Set for Jumbo Frames? */
+ if (ifp->if_mtu > ETHERMTU) {
+ hlreg |= IXGBE_HLREG0_JUMBOEN;
srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
+ } else {
+ hlreg &= ~IXGBE_HLREG0_JUMBOEN;
srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
- /* Set Queue moderation rate */
- for (i = 0; i < IXGBE_MSGS; i++)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
+ if (ixgbe_rx_hdr_split) {
+ /* Use a standard mbuf for the header */
+ srrctl |= ((IXGBE_RX_HDR << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
+ & IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- /* Set Link moderation lower */
- linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(0), srrctl);
for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
u64 rdba = rxr->rxdma.dma_paddr;
/* Setup the Base and Length of the Rx Descriptor Ring */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
(rdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
/* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
- adapter->num_rx_desc - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
}
- rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ /* Setup RSS */
if (adapter->num_rx_queues > 1) {
+ int i, j;
+ reta = 0;
+
/* set up random bits */
arc4rand(&random, sizeof(random), 0);
- /* Create reta data */
- for (i = 0; i < 128; )
- for (j = 0; j < adapter->num_rx_queues &&
- i < 128; j++, i++)
- reta.c[i] = j;
-
/* Set up the redirection table */
- for (i = 0; i < 32; i++)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta.i[i]);
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == adapter->num_rx_queues) j = 0;
+ reta = (reta << 8) | (j * 0x11);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ }
/* Now fill our hash function seeds */
for (int i = 0; i < 10; i++)
- IXGBE_WRITE_REG_ARRAY(&adapter->hw,
- IXGBE_RSSRK(0), i, random[i]);
-
- mrqc = IXGBE_MRQC_RSSEN
- /* Perform hash on these packet types */
- | IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
- | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6_EX
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
- | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
+
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSSEN
+ | IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+ | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+ | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* RSS and RX IPP Checksum are mutually exclusive */
rxcsum |= IXGBE_RXCSUM_PCSD;
@@ -3188,11 +3378,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
if (!(rxcsum & IXGBE_RXCSUM_PCSD))
rxcsum |= IXGBE_RXCSUM_IPPCSE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
-
- /* Enable Receive engine */
- rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
return;
}
@@ -3235,10 +3421,11 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
if (rxr->rx_buffers != NULL) {
rxbuf = &rxr->rx_buffers[0];
for (int i = 0; i < adapter->num_rx_desc; i++) {
- int s = rxbuf->bigbuf;
if (rxbuf->map != NULL) {
- bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
- bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
+ bus_dmamap_sync(rxr->rxtag, rxbuf->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxtag, rxbuf->map);
+ bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
}
if (rxbuf->m_head != NULL) {
m_freem(rxbuf->m_head);
@@ -3251,11 +3438,9 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
free(rxr->rx_buffers, M_DEVBUF);
rxr->rx_buffers = NULL;
}
- for (int s = 0; s < 2; s++) {
- if (rxr->rxtag[s] != NULL) {
- bus_dma_tag_destroy(rxr->rxtag[s]);
- rxr->rxtag[s] = NULL;
- }
+ if (rxr->rxtag != NULL) {
+ bus_dma_tag_destroy(rxr->rxtag);
+ rxr->rxtag = NULL;
}
return;
}
@@ -3269,6 +3454,7 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
* We loop at most count times if count is > 0, or until done if
* count < 0.
*
+ * Return TRUE for more work, FALSE for all clean.
*********************************************************************/
static bool
ixgbe_rxeof(struct rx_ring *rxr, int count)
@@ -3277,10 +3463,8 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
struct ifnet *ifp = adapter->ifp;
struct lro_ctrl *lro = &rxr->lro;
struct lro_entry *queued;
- struct mbuf *mp;
- int len, i, eop = 0;
- u8 accept_frame = 0;
- u32 staterr;
+ int i;
+ u32 staterr;
union ixgbe_adv_rx_desc *cur;
@@ -3294,70 +3478,127 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
return FALSE;
}
+ /* Sync the ring */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- struct mbuf *m = NULL;
- int s;
+ struct mbuf *sendmp, *mh, *mp;
+ u16 hlen, plen, hdr;
+ u8 dopayload, accept_frame, eop;
+
- mp = rxr->rx_buffers[i].m_head;
- s = rxr->rx_buffers[i].bigbuf;
- bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
- BUS_DMASYNC_POSTREAD);
accept_frame = 1;
+ hlen = plen = 0;
+ sendmp = mh = mp = NULL;
+
+ /* Sync the buffers */
+ bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
+ BUS_DMASYNC_POSTREAD);
+
+ /*
+ ** The way the hardware is configured to
+ ** split, it will ONLY use the header buffer
+ ** when header split is enabled, otherwise we
+ ** get normal behavior, ie, both header and
+ ** payload are DMA'd into the payload buffer.
+ **
+ ** The fmp test is to catch the case where a
+ ** packet spans multiple descriptors, in that
+ ** case only the first header is valid.
+ */
+ if ((ixgbe_rx_hdr_split) && (rxr->fmp == NULL)){
+ hdr = le16toh(cur->
+ wb.lower.lo_dword.hs_rss.hdr_info);
+ hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR)
+ hlen = IXGBE_RX_HDR;
+ plen = le16toh(cur->wb.upper.length);
+ /* Handle the header mbuf */
+ mh = rxr->rx_buffers[i].m_head;
+ mh->m_len = hlen;
+ dopayload = IXGBE_CLEAN_HDR;
+ /*
+ ** Get the payload length, this
+ ** could be zero if its a small
+ ** packet.
+ */
+ if (plen) {
+ mp = rxr->rx_buffers[i].m_pack;
+ mp->m_len = plen;
+ mp->m_next = NULL;
+ mp->m_flags &= ~M_PKTHDR;
+ mh->m_next = mp;
+ mh->m_flags |= M_PKTHDR;
+ dopayload = IXGBE_CLEAN_ALL;
+ rxr->rx_split_packets++;
+ } else { /* small packets */
+ mh->m_flags &= ~M_PKTHDR;
+ mh->m_next = NULL;
+ }
+ } else {
+ /*
+ ** Either no header split, or a
+ ** secondary piece of a fragmented
+ ** split packet.
+ */
+ mh = rxr->rx_buffers[i].m_pack;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_len = le16toh(cur->wb.upper.length);
+ dopayload = IXGBE_CLEAN_PKT;
+ }
+
if (staterr & IXGBE_RXD_STAT_EOP) {
count--;
eop = 1;
- } else {
+ } else
eop = 0;
- }
- len = cur->wb.upper.length;
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
accept_frame = 0;
if (accept_frame) {
- /* Get a fresh buffer first */
- if (ixgbe_get_buf(rxr, i) != 0) {
+ if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
ifp->if_iqdrops++;
goto discard;
}
-
- /* Assign correct length to the current fragment */
- mp->m_len = len;
-
+ /* Initial frame - setup */
if (rxr->fmp == NULL) {
- mp->m_pkthdr.len = len;
- rxr->fmp = mp; /* Store the first mbuf */
- rxr->lmp = mp;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_pkthdr.len = mh->m_len;
+ rxr->fmp = mh; /* Store the first mbuf */
+ rxr->lmp = mh;
+ if (mp) { /* Add payload if split */
+ mh->m_pkthdr.len += mp->m_len;
+ rxr->lmp = mh->m_next;
+ }
} else {
/* Chain mbuf's together */
- mp->m_flags &= ~M_PKTHDR;
- rxr->lmp->m_next = mp;
+ mh->m_flags &= ~M_PKTHDR;
+ rxr->lmp->m_next = mh;
rxr->lmp = rxr->lmp->m_next;
- rxr->fmp->m_pkthdr.len += len;
+ rxr->fmp->m_pkthdr.len += mh->m_len;
}
if (eop) {
rxr->fmp->m_pkthdr.rcvif = ifp;
ifp->if_ipackets++;
- rxr->packet_count++;
- rxr->byte_count += rxr->fmp->m_pkthdr.len;
-
- ixgbe_rx_checksum(adapter,
- staterr, rxr->fmp);
-
+ rxr->rx_packets++;
+ /* capture data for AIM */
+ rxr->bytes += rxr->fmp->m_pkthdr.len;
+ rxr->rx_bytes += rxr->bytes;
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ ixgbe_rx_checksum(staterr, rxr->fmp);
+ else
+ rxr->fmp->m_pkthdr.csum_flags = 0;
if (staterr & IXGBE_RXD_STAT_VP) {
-#if __FreeBSD_version < 700000
- VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
- (le16toh(cur->wb.upper.vlan) &
- IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
-#else
rxr->fmp->m_pkthdr.ether_vtag =
- le16toh(cur->wb.upper.vlan);
- rxr->fmp->m_flags |= M_VLANTAG;
-#endif
+ le16toh(cur->wb.upper.vlan);
+ rxr->fmp->m_flags |= M_VLANTAG;
}
- m = rxr->fmp;
+ sendmp = rxr->fmp;
rxr->fmp = NULL;
rxr->lmp = NULL;
}
@@ -3365,23 +3606,26 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
ifp->if_ierrors++;
discard:
/* Reuse loaded DMA map and just update mbuf chain */
- mp = rxr->rx_buffers[i].m_head;
- mp->m_len = mp->m_pkthdr.len =
- (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
+ if (hlen) {
+ mh = rxr->rx_buffers[i].m_head;
+ mh->m_len = MHLEN;
+ mh->m_next = NULL;
+ }
+ mp = rxr->rx_buffers[i].m_pack;
+ mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
- if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ if (adapter->max_frame_size <=
+ (MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN);
if (rxr->fmp != NULL) {
+ /* handles the whole chain */
m_freem(rxr->fmp);
rxr->fmp = NULL;
rxr->lmp = NULL;
}
- m = NULL;
+ sendmp = NULL;
}
-
- /* Zero out the receive descriptors status */
- cur->wb.upper.status_error = 0;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -3390,17 +3634,18 @@ discard:
if (++i == adapter->num_rx_desc)
i = 0;
- /* Now send up to the stack */
- if (m != NULL) {
- rxr->next_to_check = i;
+ /*
+ ** Now send up to the stack,
+ ** note the the value of next_to_check
+ ** is safe because we keep the RX lock
+ ** thru this call.
+ */
+ if (sendmp != NULL) {
/* Use LRO if possible */
- if ((!lro->lro_cnt) || (tcp_lro_rx(lro, m, 0))) {
- IXGBE_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXGBE_RX_LOCK(rxr);
- i = rxr->next_to_check;
- }
+ if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
+ (*ifp->if_input)(ifp, sendmp);
}
+
/* Get next descriptor */
cur = &rxr->rx_base[i];
staterr = cur->wb.upper.status_error;
@@ -3409,23 +3654,28 @@ discard:
/* Advance the IXGB's Receive Queue "Tail Pointer" */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
- IXGBE_RX_UNLOCK(rxr);
/*
- ** Flush any outstanding LRO work
- ** this may call into the stack and
- ** must not hold a driver lock.
- */
- while(!SLIST_EMPTY(&lro->lro_active)) {
+ * Flush any outstanding LRO work
+ */
+ while (!SLIST_EMPTY(&lro->lro_active)) {
queued = SLIST_FIRST(&lro->lro_active);
SLIST_REMOVE_HEAD(&lro->lro_active, next);
tcp_lro_flush(lro, queued);
}
- if (!(staterr & IXGBE_RXD_STAT_DD))
- return FALSE;
+ IXGBE_RX_UNLOCK(rxr);
- return TRUE;
+ /*
+ ** Leaving with more to clean?
+ ** then schedule another interrupt.
+ */
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rxr->eims);
+ return TRUE;
+ }
+
+ return FALSE;
}
/*********************************************************************
@@ -3436,19 +3686,11 @@ discard:
*
*********************************************************************/
static void
-ixgbe_rx_checksum(struct adapter *adapter,
- u32 staterr, struct mbuf * mp)
+ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
{
- struct ifnet *ifp = adapter->ifp;
u16 status = (u16) staterr;
u8 errors = (u8) (staterr >> 24);
- /* Not offloading */
- if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
- mp->m_pkthdr.csum_flags = 0;
- return;
- }
-
if (status & IXGBE_RXD_STAT_IPCS) {
/* Did it pass? */
if (!(errors & IXGBE_RXD_ERR_IPE)) {
@@ -3470,7 +3712,8 @@ ixgbe_rx_checksum(struct adapter *adapter,
return;
}
-#ifdef IXGBE_VLAN_EVENTS
+
+#ifdef IXGBE_HW_VLAN_SUPPORT
/*
* This routine is run via an vlan
* config EVENT
@@ -3479,7 +3722,7 @@ static void
ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 ctrl;
+ u32 ctrl, rctl, index, vfta;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
@@ -3498,6 +3741,7 @@ static void
ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
+ u32 index, vfta;
/* Remove entry in the hardware filter table */
ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
@@ -3513,7 +3757,7 @@ ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
}
}
-#endif /* IXGBE_VLAN_EVENTS */
+#endif
static void
ixgbe_enable_intr(struct adapter *adapter)
@@ -3524,16 +3768,17 @@ ixgbe_enable_intr(struct adapter *adapter)
/* Enable Fan Failure detection */
if (hw->phy.media_type == ixgbe_media_type_copper)
mask |= IXGBE_EIMS_GPI_SDP1;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+
/* With RSS we use auto clear */
if (adapter->msix_mem) {
/* Dont autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
- adapter->eims_mask | mask);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
}
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
IXGBE_WRITE_FLUSH(hw);
return;
@@ -3560,17 +3805,38 @@ ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
return (value);
}
+/*
+** Setup the correct IVAR register for a particular MSIX interrupt
+** (yes this is all very magic and confusing :)
+** - entry is the register array entry
+** - vector is the MSIX vector for this queue
+** - type is RX/TX/MISC
+*/
static void
-ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
+ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector, s8 type)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 ivar, index;
vector |= IXGBE_IVAR_ALLOC_VAL;
- index = (entry >> 2) & 0x1F;
- ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
- ivar &= ~(0xFF << (8 * (entry & 0x3)));
- ivar |= (vector << (8 * (entry & 0x3)));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
+
+ switch (hw->mac.type) {
+
+ case ixgbe_mac_82598EB:
+ if (type == -1)
+ entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+ else
+ entry += (type * 64);
+ index = (entry >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (entry & 0x3)));
+ ivar |= (vector << (8 * (entry & 0x3)));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
+ break;
+
+ default:
+ break;
+ }
}
static void
@@ -3579,22 +3845,48 @@ ixgbe_configure_ivars(struct adapter *adapter)
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
- ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
- adapter->eims_mask |= rxr->eims;
- }
+ for (int i = 0; i < adapter->num_rx_queues; i++, rxr++)
+ ixgbe_set_ivar(adapter, i, rxr->msix, 0);
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
- ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
- adapter->eims_mask |= txr->eims;
- }
+ for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
+ ixgbe_set_ivar(adapter, i, txr->msix, 1);
/* For the Link interrupt */
- ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- adapter->linkvec);
- adapter->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
+ ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
+}
+
+/*
+** ixgbe_sfp_probe - called in the local timer to
+** determine if a port had optics inserted.
+*/
+static bool ixgbe_sfp_probe(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ device_t dev = adapter->dev;
+ bool result = FALSE;
+
+ if ((hw->phy.type == ixgbe_phy_nl) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
+ s32 ret = hw->phy.ops.identify_sfp(hw);
+ if (ret)
+ goto out;
+ ret = hw->phy.ops.reset(hw);
+ if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ device_printf(dev,"Unsupported SFP+ module detected!");
+ printf(" Reload driver with supported module.\n");
+ adapter->sfp_probe = FALSE;
+ goto out;
+ } else
+ device_printf(dev,"SFP+ module detected!\n");
+ /* We now have supported optics */
+ adapter->sfp_probe = FALSE;
+ result = TRUE;
+ }
+out:
+ return (result);
}
+
/**********************************************************************
*
* Update the board statistics counters.
@@ -3643,9 +3935,6 @@ ixgbe_update_stats_counters(struct adapter *adapter)
adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-
lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
adapter->stats.lxontxc += lxon;
lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
@@ -3700,10 +3989,7 @@ ixgbe_print_hw_stats(struct adapter * adapter)
device_printf(dev,"Std Mbuf Failed = %lu\n",
- adapter->mbuf_alloc_failed);
- device_printf(dev,"Std Cluster Failed = %lu\n",
- adapter->mbuf_cluster_failed);
-
+ adapter->mbuf_defrag_failed);
device_printf(dev,"Missed Packets = %llu\n",
(long long)adapter->stats.mpc[0]);
device_printf(dev,"Receive length errors = %llu\n",
@@ -3760,10 +4046,12 @@ ixgbe_print_debug_info(struct adapter *adapter)
device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
IXGBE_READ_REG(hw, IXGBE_RDT(i)));
- device_printf(dev,"RX(%d) Packets Received: %lu\n",
- rxr->me, (long)rxr->packet_count);
+ device_printf(dev,"RX(%d) Packets Received: %lld\n",
+ rxr->me, (long long)rxr->rx_packets);
+ device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
+ rxr->me, (long long)rxr->rx_split_packets);
device_printf(dev,"RX(%d) Bytes Received: %lu\n",
- rxr->me, (long)rxr->byte_count);
+ rxr->me, (long)rxr->rx_bytes);
device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
rxr->me, (long)rxr->rx_irq);
device_printf(dev,"RX(%d) LRO Queued= %d\n",
@@ -3777,7 +4065,7 @@ ixgbe_print_debug_info(struct adapter *adapter)
IXGBE_READ_REG(hw, IXGBE_TDH(i)),
IXGBE_READ_REG(hw, IXGBE_TDT(i)));
device_printf(dev,"TX(%d) Packets Sent: %lu\n",
- txr->me, (long)txr->tx_packets);
+ txr->me, (long)txr->total_packets);
device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
txr->me, (long)txr->tx_irq);
device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
@@ -3852,11 +4140,11 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- adapter->hw.fc.type = ixgbe_flow_control;
+ adapter->hw.fc.requested_mode = ixgbe_flow_control;
break;
case ixgbe_fc_none:
default:
- adapter->hw.fc.type = ixgbe_fc_none;
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
}
ixgbe_setup_fc(&adapter->hw, 0);
@@ -3872,26 +4160,3 @@ ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
-
-#ifndef NO_82598_A0_SUPPORT
-/*
- * A0 Workaround: invert descriptor for hardware
- */
-void
-desc_flip(void *desc)
-{
- struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
- struct dhack *d;
-
- d = (struct dhack *)desc;
- d->a1 = ~(d->a1);
- d->a2 = ~(d->a2);
- d->b1 = ~(d->b1);
- d->b2 = ~(d->b2);
- d->b2 &= 0xFFFFFFF0;
- d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
-}
-#endif
-
-
-
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 32a8160..5a5e2ef 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -32,6 +32,7 @@
******************************************************************************/
/*$FreeBSD$*/
+
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -63,7 +64,6 @@
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
-#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
@@ -84,6 +84,7 @@
#include <sys/pcpu.h>
#include "ixgbe_api.h"
+#include "tcp_lro.h"
/* Tunables */
@@ -121,7 +122,7 @@
* This parameter controls the maximum no of times the driver will loop in
* the isr. Minimum Value = 1
*/
-#define MAX_INTR 10
+#define MAX_LOOP 10
/*
* This parameter controls the duration of transmit watchdog timer.
@@ -159,10 +160,11 @@
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define IXGBE_MAX_SCATTER 100
-#define IXGBE_MSIX_BAR 3
+#define MSIX_82598_BAR 3
+#define MSIX_82599_BAR 4
#define IXGBE_TSO_SIZE 65535
#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
-#define IXGBE_RX_HDR_SIZE ((u32) 256)
+#define IXGBE_RX_HDR 128
#define CSUM_OFFLOAD 7 /* Bits in csum flags */
/* The number of MSIX messages the 82598 supports */
@@ -179,24 +181,28 @@
/*
* Interrupt Moderation parameters
- * for now we hardcode, later
- * it would be nice to do dynamic
*/
-#define MAX_IRQ_SEC 8000
-#define DEFAULT_ITR 1000000000/(MAX_IRQ_SEC * 256)
-#define LINK_ITR 1000000000/(1950 * 256)
+#define IXGBE_LOW_LATENCY 128
+#define IXGBE_AVE_LATENCY 400
+#define IXGBE_BULK_LATENCY 1200
+#define IXGBE_LINK_ITR 2000
+
+/* Header split args for get_bug */
+#define IXGBE_CLEAN_HDR 1
+#define IXGBE_CLEAN_PKT 2
+#define IXGBE_CLEAN_ALL 3
/* Used for auto RX queue configuration */
extern int mp_ncpus;
/*
- * ******************************************************************************
+ *****************************************************************************
* vendor_info_array
*
* This array contains the list of Subvendor/Subdevice IDs on which the driver
* should load.
*
-*****************************************************************************
+ *****************************************************************************
*/
typedef struct _ixgbe_vendor_info_t {
unsigned int vendor_id;
@@ -204,7 +210,7 @@ typedef struct _ixgbe_vendor_info_t {
unsigned int subvendor_id;
unsigned int subdevice_id;
unsigned int index;
-} ixgbe_vendor_info_t;
+} ixgbe_vendor_info_t;
struct ixgbe_tx_buf {
@@ -214,9 +220,8 @@ struct ixgbe_tx_buf {
struct ixgbe_rx_buf {
struct mbuf *m_head;
- boolean_t bigbuf;
- /* one small and one large map */
- bus_dmamap_t map[2];
+ struct mbuf *m_pack;
+ bus_dmamap_t map;
};
/*
@@ -253,11 +258,12 @@ struct tx_ring {
volatile u16 tx_avail;
u32 txd_cmd;
bus_dma_tag_t txtag;
+ char mtx_name[16];
/* Soft Stats */
u32 no_tx_desc_avail;
u32 no_tx_desc_late;
u64 tx_irq;
- u64 tx_packets;
+ u64 total_packets;
};
@@ -279,14 +285,20 @@ struct rx_ring {
unsigned int last_cleaned;
unsigned int next_to_check;
struct ixgbe_rx_buf *rx_buffers;
- bus_dma_tag_t rxtag[2];
- bus_dmamap_t spare_map[2];
+ bus_dma_tag_t rxtag;
+ bus_dmamap_t spare_map;
struct mbuf *fmp;
struct mbuf *lmp;
+ char mtx_name[16];
+
+ u32 bytes; /* Used for AIM calc */
+ u32 eitr_setting;
+
/* Soft stats */
u64 rx_irq;
- u64 packet_count;
- u64 byte_count;
+ u64 rx_split_packets;
+ u64 rx_packets;
+ u64 rx_bytes;
};
/* Our adapter structure */
@@ -294,7 +306,6 @@ struct adapter {
struct ifnet *ifp;
struct ixgbe_hw hw;
- /* FreeBSD operating-system-specific structures */
struct ixgbe_osdep osdep;
struct device *dev;
@@ -309,7 +320,6 @@ struct adapter {
void *tag[IXGBE_MSGS];
struct resource *res[IXGBE_MSGS];
int rid[IXGBE_MSGS];
- u32 eims_mask;
struct ifmedia media;
struct callout timer;
@@ -318,21 +328,23 @@ struct adapter {
struct mtx core_mtx;
- /* Legacy Fast Intr handling */
- struct task link_task;
-
/* Info about the board itself */
u32 part_num;
+ u32 optics;
bool link_active;
u16 max_frame_size;
u32 link_speed;
+ u32 linkvec;
u32 tx_int_delay;
u32 tx_abs_int_delay;
u32 rx_int_delay;
u32 rx_abs_int_delay;
- /* Indicates the cluster size to use */
- bool bigbufs;
+ /* Mbuf cluster size */
+ u32 rx_mbuf_sz;
+
+ /* Check for missing optics */
+ bool sfp_probe;
/*
* Transmit rings:
@@ -349,20 +361,18 @@ struct adapter {
struct rx_ring *rx_rings;
int num_rx_desc;
int num_rx_queues;
+ u32 rx_mask;
u32 rx_process_limit;
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
-
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
- unsigned long mbuf_alloc_failed;
- unsigned long mbuf_cluster_failed;
+ unsigned long mbuf_defrag_failed;
+ unsigned long mbuf_header_failed;
+ unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
- unsigned long linkvec;
unsigned long link_irq;
struct ixgbe_hw_stats stats;
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index b1307b3..c94b2f9 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -38,40 +38,44 @@
#include "ixgbe_phy.h"
s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
-s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
-s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
-s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
-s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete);
+static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
#ifndef NO_82598_A0_SUPPORT
s32 ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw);
#endif
-s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
-s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fiber_serdes_link_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
-s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
-s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
+ u32 vind, bool vlan_on);
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
+static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
/**
* ixgbe_init_ops_82598 - Inits func ptrs and MAC type
@@ -85,6 +89,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val;
+ u16 list_offset, data_offset;
ret_val = ixgbe_init_phy_ops_generic(hw);
ret_val = ixgbe_init_ops_generic(hw);
@@ -99,6 +104,10 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->ops.reset_hw = &ixgbe_reset_hw_82598;
#endif
mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
/* LEDs */
mac->ops.blink_led_start = &ixgbe_blink_led_start_82598;
@@ -113,20 +122,6 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* Flow Control */
mac->ops.setup_fc = &ixgbe_setup_fc_82598;
- /* Call PHY identify routine to get the phy type */
- phy->ops.identify(hw);
-
- /* PHY Init */
- switch (hw->phy.type) {
- case ixgbe_phy_tn:
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
- break;
- default:
- break;
- }
-
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
@@ -148,7 +143,46 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
- return IXGBE_SUCCESS;
+ /* SFP+ Module */
+ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+
+ /* Call PHY identify routine to get the phy type */
+ phy->ops.identify(hw);
+
+ /* PHY Init */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ case ixgbe_phy_nl:
+ phy->ops.reset = &ixgbe_reset_phy_nl;
+
+ /* Call SFP+ identify routine to get the SFP+ module type */
+ ret_val = phy->ops.identify_sfp(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+ else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Check to see if SFP+ module is supported */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret_val;
}
/**
@@ -159,9 +193,9 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_SUCCESS;
s32 autoc_reg;
@@ -228,8 +262,8 @@ s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
*autoneg = TRUE;
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &speed_ability);
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &speed_ability);
if (status == IXGBE_SUCCESS) {
if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
@@ -247,25 +281,29 @@ s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
*
* Returns the media type (fiber, copper, backplane)
**/
-enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
{
enum ixgbe_media_type media_type;
/* Media type for I82598 is based on device ID */
switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82598AT:
media_type = ixgbe_media_type_copper;
break;
- case IXGBE_DEV_ID_82598AT_DUAL_PORT:
- media_type = ixgbe_media_type_copper;
- break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -275,106 +313,77 @@ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc_82598 - Configure flow control settings
+ * ixgbe_fc_enable_82598 - Enable flow control
* @hw: pointer to hardware structure
* @packetbuf_num: packet buffer number (0-7)
*
- * Configures the flow control settings based on SW configuration. This
- * function is used for 802.3x flow control configuration only.
+ * Enable flow control according to the current settings.
**/
-s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
{
- u32 frctl_reg;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 fctrl_reg;
u32 rmcs_reg;
+ u32 reg;
- if (packetbuf_num < 0 || packetbuf_num > 7) {
- DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
- " 0-7\n", packetbuf_num);
- ASSERT(0);
- }
+ DEBUGFUNC("ixgbe_fc_enable_82598");
- frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
/*
- * 10 gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.type == ixgbe_fc_default)
- hw->fc.type = ixgbe_fc_full;
-
- /*
- * We want to save off the original Flow Control configuration just in
- * case we get disconnected and then reconnected into a different hub
- * or switch with different Flow Control capabilities.
- */
- hw->fc.original_type = hw->fc.type;
-
- /*
- * The possible values of the "flow_control" parameter are:
+ * The possible values of fc.current_mode are:
* 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames but not
- * send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do not
- * support receiving pause frames)
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: Invalid.
*/
- switch (hw->fc.type) {
+ switch (hw->fc.current_mode) {
case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
break;
case ixgbe_fc_rx_pause:
/*
- * Rx Flow control is enabled,
- * and Tx Flow control is disabled.
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
*/
- frctl_reg |= IXGBE_FCTRL_RFCE;
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
break;
case ixgbe_fc_tx_pause:
/*
- * Tx Flow control is enabled, and Rx Flow control is disabled,
- * by a software over-ride.
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
*/
rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
break;
case ixgbe_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
- * over-ride.
- */
- frctl_reg |= IXGBE_FCTRL_RFCE;
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
break;
default:
- /* We should never get here. The value should be 0-3. */
DEBUGOUT("Flow control param set incorrectly\n");
- ASSERT(0);
+ ret_val = -IXGBE_ERR_CONFIG;
+ goto out;
break;
}
/* Enable 802.3x based flow control settings. */
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- /*
- * Check for invalid software configuration, zeros are completely
- * invalid for all parameters used past this point, and if we enable
- * flow control with zero water marks, we blast flow control packets.
- */
- if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
- DEBUGOUT("Flow control structure initialized incorrectly\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
- }
-
- /*
- * We need to set up the Receive Threshold high and low water
- * marks as well as (optionally) enabling the transmission of
- * XON frames.
- */
- if (hw->fc.type & ixgbe_fc_tx_pause) {
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
if (hw->fc.send_xon) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
(hw->fc.low_water | IXGBE_FCRTL_XONE));
@@ -382,14 +391,90 @@ s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
hw->fc.low_water);
}
+
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
+ (hw->fc.high_water | IXGBE_FCRTH_FCEN));
}
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
+ /* Configure pause time (2 TCs per register) */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+ if ((packetbuf_num & 1) == 0)
+ reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+ else
+ reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
- return IXGBE_SUCCESS;
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_fc_82598 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Sets up flow control.
+ **/
+s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* Validate the packetbuf configuration */
+ if (packetbuf_num < 0 || packetbuf_num > 7) {
+ DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
+ " 0-7\n", packetbuf_num);
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * Validate the water mark configuration. Zero water marks are invalid
+ * because it causes the controller to just blast out fc packets.
+ */
+ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * Validate the requested mode. Strict IEEE mode does not allow
+ * ixgbe_fc_rx_pause because it will cause testing anomalies.
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ /* Decide whether to use autoneg or not. */
+ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+ if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL))
+ ret_val = ixgbe_fc_autoneg(hw);
+
+ if (ret_val)
+ goto out;
+
+ ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
+
+out:
+ return ret_val;
}
/**
@@ -399,10 +484,8 @@ s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
{
- ixgbe_link_speed speed;
- bool link_up;
u32 autoc_reg;
u32 links_reg;
u32 i;
@@ -443,21 +526,8 @@ s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
}
}
- /*
- * We want to save off the original Flow Control configuration just in
- * case we get disconnected and then reconnected into a different hub
- * or switch with different Flow Control capabilities.
- */
- hw->fc.original_type = hw->fc.type;
- /*
- * Set up the SerDes link if in 1Gb mode, otherwise just set up
- * 10Gb flow control.
- */
- hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
- if (speed == IXGBE_LINK_SPEED_1GB_FULL)
- status = ixgbe_setup_fiber_serdes_link_82598(hw);
- else
- ixgbe_setup_fc_82598(hw, 0);
+ /* Set up flow control */
+ status = ixgbe_setup_fc_82598(hw, 0);
/* Add delay to filter out noises during initial link setup */
msec_delay(50);
@@ -474,11 +544,53 @@ s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete)
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
{
u32 links_reg;
u32 i;
+ u16 link_reg, adapt_comp_reg;
+
+ /*
+ * SERDES PHY requires us to read link status from undocumented
+ * register 0xC79F. Bit 0 set indicates link is up/ready; clear
+ * indicates link down. OxC00C is read to check that the XAUI lanes
+ * are active. Bit 0 clear indicates active; set indicates inactive.
+ */
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if ((link_reg & 1) &&
+ ((adapt_comp_reg & 1) == 0)) {
+ *link_up = TRUE;
+ break;
+ } else {
+ *link_up = FALSE;
+ }
+ msec_delay(100);
+ hw->phy.ops.read_reg(hw, 0xC79F,
+ IXGBE_TWINAX_DEV,
+ &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C,
+ IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ }
+ } else {
+ if ((link_reg & 1) &&
+ ((adapt_comp_reg & 1) == 0))
+ *link_up = TRUE;
+ else
+ *link_up = FALSE;
+ }
+
+ if (*link_up == FALSE)
+ goto out;
+ }
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (link_up_wait_to_complete) {
@@ -504,195 +616,6 @@ s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_configure_fiber_serdes_fc_82598 - Configure fiber flow control
- * @hw: pointer to hardware structure
- *
- * Reads PCS registers and sets flow control settings, based on
- * link-partner's abilities.
- **/
-s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw)
-{
- s32 ret_val = IXGBE_SUCCESS;
- u32 pcs_anadv_reg, pcs_lpab_reg, pcs_lstat_reg, i;
- DEBUGFUNC("ixgbe_configure_fiber_serdes_fc_82598");
-
- /* Check that autonegotiation has completed */
- for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
- msec_delay(10);
- pcs_lstat_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if (pcs_lstat_reg & IXGBE_PCS1GLSTA_LINK_OK) {
- if (pcs_lstat_reg & IXGBE_PCS1GLSTA_AN_COMPLETE) {
- if (!(pcs_lstat_reg &
- (IXGBE_PCS1GLSTA_AN_TIMED_OUT)))
- hw->mac.autoneg_failed = 0;
- else
- hw->mac.autoneg_failed = 1;
- break;
- } else {
- hw->mac.autoneg_failed = 1;
- break;
- }
- }
- }
-
- if (hw->mac.autoneg_failed) {
- /*
- * AutoNeg failed to achieve a link, so we will turn
- * flow control off.
- */
- hw->fc.type = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- ret_val = ixgbe_setup_fc_82598(hw, 0);
- goto out;
- }
-
- /*
- * Read the AN advertisement and LP ability registers and resolve
- * local flow control settings accordingly
- */
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.original_type == ixgbe_fc_full) {
- hw->fc.type = ixgbe_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.type = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- }
- } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.type = ixgbe_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.type = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.type = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- }
-
- ret_val = ixgbe_setup_fc_82598(hw, 0);
- if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * ixgbe_setup_fiber_serdes_link_82598 - Configure fiber serdes link
- * @hw: pointer to hardware structure
- *
- * Sets up PCS registers and sets flow control settings, based on
- * link-partner's abilities.
- **/
-s32 ixgbe_setup_fiber_serdes_link_82598(struct ixgbe_hw *hw)
-{
- u32 reg;
- s32 ret_val;
-
- DEBUGFUNC("ixgbe_setup_fiber_serdes_link_82598");
-
- /*
- * 10 gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.type == ixgbe_fc_default)
- hw->fc.type = ixgbe_fc_full;
-
- /*
- * 82598 fiber/serdes devices require that flow control be resolved in
- * software.
- */
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-
- /*
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- */
- switch (hw->fc.type) {
- case ixgbe_fc_none:
- /*
- * Flow control completely disabled by a software
- * over-ride.
- */
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by a software over-ride. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled, by a software over-ride.
- */
- reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
- break;
- case ixgbe_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a
- * software over-ride.
- */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
- goto out;
- break;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
- /* Set PCS register for autoneg */
- /* Enable and restart autoneg */
- reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
-
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; /* Disable AN timeout */
- DEBUGOUT1("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-
- /*
- * Configure flow control. If we aren't auto-negotiating,
- * just setup the flow control and do not worry about PCS autoneg.
- */
- ixgbe_configure_fiber_serdes_fc_82598(hw);
-
out:
return IXGBE_SUCCESS;
}
@@ -701,14 +624,14 @@ out:
* ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: TRUE if auto-negotiation enabled
- * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
+ * @autoneg: TRUE if autonegotiation enabled
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
-s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
@@ -737,7 +660,7 @@ s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
* ixgbe_hw This will write the AUTOC register based on the new
* stored values
*/
- ixgbe_setup_mac_link_82598(hw);
+ status = ixgbe_setup_mac_link_82598(hw);
}
return status;
@@ -753,7 +676,7 @@ s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
* phy and wait for autonegotiate to finish. Then synchronize the
* MAC and PHY.
**/
-s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
{
s32 status;
@@ -779,16 +702,16 @@ s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
-s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete)
+static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
@@ -901,7 +824,7 @@ s32 ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw)
* clears all interrupts, performing a PHY reset, and performing a link (MAC)
* reset.
**/
-s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u32 ctrl;
@@ -929,7 +852,7 @@ s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
&analog_val);
- analog_val &= ~ IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
analog_val);
@@ -1035,12 +958,12 @@ s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq clear index (not used in 82598, but elsewhere)
**/
-s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
- UNREFERENCED_PARAMETER(vmdq);
+ UNREFERENCED_PARAMETER(vmdq);
if (rar < rar_entries) {
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
@@ -1072,7 +995,7 @@ s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
u32 bits;
u32 vftabyte;
- if (vlan < 1 || vlan > 4095)
+ if (vlan > 4095)
return IXGBE_ERR_PARAM;
/* Determine 32-bit word position in array */
@@ -1109,7 +1032,7 @@ s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*
* Clears the VLAN filer table, and the VMDq index associated with the filter
**/
-s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
u32 offset;
u32 vlanbyte;
@@ -1120,7 +1043,7 @@ s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
+ 0);
return IXGBE_SUCCESS;
}
@@ -1130,7 +1053,7 @@ s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @index: led number to blink
**/
-s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
+static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
bool link_up = 0;
@@ -1162,18 +1085,186 @@ s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
-s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
+static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
autoc_reg &= ~IXGBE_AUTOC_FLU;
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
+ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return IXGBE_SUCCESS;
}
+
+/**
+ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 atlas_ctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+ *val = (u8)atlas_ctl;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 atlas_ctl;
+
+ atlas_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 sfp_addr = 0;
+ u16 sfp_data = 0;
+ u16 sfp_stat = 0;
+ u32 i;
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ /*
+ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+ * 0xC30D. These registers are used to talk to the SFP+
+ * module's EEPROM through the SDA/SCL (I2C) interface.
+ */
+ sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ sfp_addr);
+
+ /* Poll status */
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &sfp_stat);
+ sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+ break;
+ msec_delay(10);
+ }
+
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ DEBUGOUT("EEPROM read did not pass.\n");
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* Read data */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+ *eeprom_data = (u8)(sfp_data >> 8);
+ } else {
+ status = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ /* Default device ID is mezzanine card KX/KX4 */
+ physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX);
+ break;
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ break;
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ case IXGBE_DEV_ID_82598AT:
+ physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
+ IXGBE_PHYSICAL_LAYER_1000BASE_T);
+ break;
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ hw->phy.ops.identify_sfp(hw);
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_da_cu:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_sfp_type_sr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case ixgbe_sfp_type_lr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ }
+ break;
+
+ default:
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ }
+
+ return physical_layer;
+}
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 7c965b3..49dac7d 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -47,7 +47,7 @@ extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
* memset to 0 prior to calling this function. The following fields in
* hw structure should be filled in prior to calling this function:
* hw_addr, back, device_id, vendor_id, subsystem_device_id,
- * subsystem_vendor_id, and revision_id
+ * subsystem_vendor_id, and revision_id
**/
s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
{
@@ -85,13 +85,16 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AT:
- case IXGBE_DEV_ID_82598AT_DUAL_PORT:
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
hw->mac.type = ixgbe_mac_82598EB;
break;
default:
@@ -279,9 +282,8 @@ s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
s32 status = IXGBE_SUCCESS;
if (hw->phy.type == ixgbe_phy_unknown) {
- if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) {
- status = IXGBE_ERR_PHY;
- }
+ if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS)
+ status = IXGBE_ERR_PHY;
}
if (status == IXGBE_SUCCESS) {
@@ -766,11 +768,38 @@ s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
* ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
* @hw: pointer to hardware structure
*
- * Initializes the Unicast Table Arrays to zero on device load. This
- * is part of the Rx init addr execution path.
+ * Initializes the Unicast Table Arrays to zero on device load. This
+ * is part of the Rx init addr execution path.
**/
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
IXGBE_NOT_IMPLEMENTED);
}
+
+/**
+ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index f91f0e6..8022f90 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -110,5 +110,7 @@ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
#endif /* _IXGBE_API_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index fd4351d..53e4aff 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -85,11 +85,11 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.start_hw = &ixgbe_start_hw_generic;
mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
mac->ops.get_media_type = NULL;
+ mac->ops.get_supported_physical_layer = NULL;
mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_generic;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_generic;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
/* LEDs */
mac->ops.led_on = &ixgbe_led_on_generic;
@@ -111,8 +111,6 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.set_vfta = NULL;
mac->ops.init_uta_tables = NULL;
- /* Flow Control */
- mac->ops.setup_fc = NULL;
/* Link */
mac->ops.get_link_capabilities = NULL;
@@ -215,17 +213,16 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_MRFC);
IXGBE_READ_REG(hw, IXGBE_RLEC);
IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- IXGBE_READ_REG(hw, IXGBE_LXONRXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
for (i = 0; i < 8; i++) {
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
-
IXGBE_READ_REG(hw, IXGBE_PRC64);
IXGBE_READ_REG(hw, IXGBE_PRC127);
IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -272,7 +269,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_pba_num - Reads part number from EEPROM
+ * ixgbe_read_pba_num_generic - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
*
@@ -337,6 +334,7 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
**/
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
+ struct ixgbe_mac_info *mac = &hw->mac;
u16 link_status;
hw->bus.type = ixgbe_bus_type_pci_express;
@@ -374,10 +372,33 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
break;
}
+ mac->ops.set_lan_id(hw);
+
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+
+ /* check for a port swap */
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (reg & IXGBE_FACTPS_LFS)
+ bus->func ^= 0x1;
+}
+
+/**
* ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
@@ -425,9 +446,8 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS) {
+ if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS)
DEBUGOUT("PCI-E Master disable polling has failed.\n");
- }
return IXGBE_SUCCESS;
}
@@ -500,9 +520,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* change if a future EEPROM is not SPI.
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -510,8 +530,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
else
eeprom->address_bits = 8;
DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
}
return IXGBE_SUCCESS;
@@ -1156,7 +1176,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
if (status == IXGBE_SUCCESS) {
checksum = ixgbe_calc_eeprom_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
+ checksum);
} else {
DEBUGOUT("EEPROM read failed\n");
}
@@ -1453,7 +1473,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
if (hw->addr_ctrl.overflow_promisc) {
/* enable promisc if not already in overflow or set by user */
if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
- DEBUGOUT( " Entering address overflow promisc mode\n");
+ DEBUGOUT(" Entering address overflow promisc mode\n");
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
@@ -1684,6 +1704,148 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
+
+/**
+ * ixgbe_fc_autoneg - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Negotiates flow control capabilities with link partner using autoneg and
+ * applies the results.
+ **/
+s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 i, reg, pcs_anadv_reg, pcs_lpab_reg;
+
+ DEBUGFUNC("ixgbe_fc_autoneg");
+
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ret_val = -IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Set PCS register for autoneg */
+ /* Enable and restart autoneg */
+ reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ DEBUGOUT1("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+
+ /* See if autonegotiation has succeeded */
+ hw->mac.autoneg_succeeded = 0;
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ msec_delay(10);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if ((reg & (IXGBE_PCS1GLSTA_LINK_OK |
+ IXGBE_PCS1GLSTA_AN_COMPLETE)) ==
+ (IXGBE_PCS1GLSTA_LINK_OK |
+ IXGBE_PCS1GLSTA_AN_COMPLETE)) {
+ if (!(reg & IXGBE_PCS1GLSTA_AN_TIMED_OUT))
+ hw->mac.autoneg_succeeded = 1;
+ break;
+ }
+ }
+
+ if (!hw->mac.autoneg_succeeded) {
+ /* Autoneg failed to achieve a link, so we turn fc off */
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ goto out;
+ }
+
+ /*
+ * Read the AN advertisement and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+ if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+ } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+ !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+out:
+ return ret_val;
+}
+
+
/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
@@ -1792,44 +1954,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
ixgbe_release_eeprom_semaphore(hw);
}
-/**
- * ixgbe_read_analog_reg8_generic - Reads 8 bit Atlas analog register
- * @hw: pointer to hardware structure
- * @reg: analog register to read
- * @val: read value
- *
- * Performs read operation to Atlas analog register specified.
- **/
-s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
- u32 atlas_ctl;
-
- IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
- IXGBE_WRITE_FLUSH(hw);
- usec_delay(10);
- atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
- *val = (u8)atlas_ctl;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_write_analog_reg8_generic - Writes 8 bit Atlas analog register
- * @hw: pointer to hardware structure
- * @reg: atlas register to write
- * @val: value to write
- *
- * Performs write operation to Atlas analog register specified.
- **/
-s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
- u32 atlas_ctl;
-
- atlas_ctl = (reg << 8) | val;
- IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
- IXGBE_WRITE_FLUSH(hw);
- usec_delay(10);
-
- return IXGBE_SUCCESS;
-}
-
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index 0307624..d94f674 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -44,6 +44,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
@@ -70,6 +71,10 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index ac7a39c..be6f577 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -78,16 +78,21 @@
#endif
#define FALSE 0
+#define false 0 /* shared code requires this */
#define TRUE 1
+#define true 1
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef int32_t s32;
-typedef uint64_t u64;
-typedef boolean_t bool;
+#define IXGBE_HTONL htonl
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef boolean_t bool;
#define le16_to_cpu
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index 37c78eb..291d454 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -55,6 +55,8 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
phy->ops.check_link = NULL;
phy->ops.get_firmware_version = NULL;
+ phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
+ phy->sfp_type = ixgbe_sfp_type_unknown;
return IXGBE_SUCCESS;
}
@@ -131,7 +133,6 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
-
return status;
}
@@ -151,6 +152,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case QT2022_PHY_ID:
phy_type = ixgbe_phy_qt;
break;
+ case ATH_PHY_ID:
+ phy_type = ixgbe_phy_nl;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -217,9 +221,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
break;
- }
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
@@ -445,12 +448,11 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
*/
hw->phy.autoneg_advertised = 0;
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
- }
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
- }
/* Setup link based on the new speed settings */
hw->phy.ops.setup_link(hw);
@@ -523,3 +525,262 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_reset_phy_nl - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+ u16 phy_offset, control, eword, edata, block_crc;
+ bool end_data = FALSE;
+ u16 list_offset, data_offset;
+ u16 phy_data = 0;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 i;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+
+ /* reset the PHY and poll for completion */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+ break;
+ msec_delay(10);
+ }
+
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+ DEBUGOUT("PHY reset did not complete.\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+ /* Get init offsets */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+ data_offset++;
+ while (!end_data) {
+ /*
+ * Read control word from PHY init contents offset
+ */
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ control = (eword & IXGBE_CONTROL_MASK_NL) >>
+ IXGBE_CONTROL_SHIFT_NL;
+ edata = eword & IXGBE_DATA_MASK_NL;
+ switch (control) {
+ case IXGBE_DELAY_NL:
+ data_offset++;
+ DEBUGOUT1("DELAY: %d MS\n", edata);
+ msec_delay(edata);
+ break;
+ case IXGBE_DATA_NL:
+ DEBUGOUT("DATA: \n");
+ data_offset++;
+ hw->eeprom.ops.read(hw, data_offset++,
+ &phy_offset);
+ for (i = 0; i < edata; i++) {
+ hw->eeprom.ops.read(hw, data_offset, &eword);
+ hw->phy.ops.write_reg(hw, phy_offset,
+ IXGBE_TWINAX_DEV, eword);
+ DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
+ phy_offset);
+ data_offset++;
+ phy_offset++;
+ }
+ break;
+ case IXGBE_CONTROL_NL:
+ data_offset++;
+ DEBUGOUT("CONTROL: \n");
+ if (edata == IXGBE_CONTROL_EOL_NL) {
+ DEBUGOUT("EOL\n");
+ end_data = TRUE;
+ } else if (edata == IXGBE_CONTROL_SOL_NL) {
+ DEBUGOUT("SOL\n");
+ } else {
+ DEBUGOUT("Bad control value\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ DEBUGOUT("Bad control type\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[4] = {0, 0, 0, 0};
+ u8 transmission_media = 0;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ goto out;
+ }
+
+ if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
+ &transmission_media);
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ */
+ if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ if ((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE))
+ hw->phy.multispeed_fiber = TRUE;
+ /* Determine PHY vendor */
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.id = identifier;
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ switch (vendor_oui) {
+ case IXGBE_SFF_VENDOR_OUI_TYCO:
+ if (transmission_media &
+ IXGBE_SFF_TWIN_AX_CAPABLE)
+ hw->phy.type = ixgbe_phy_tw_tyco;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_FTL:
+ hw->phy.type = ixgbe_phy_sfp_ftl;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_AVAGO:
+ hw->phy.type = ixgbe_phy_sfp_avago;
+ break;
+ default:
+ if (transmission_media &
+ IXGBE_SFF_TWIN_AX_CAPABLE)
+ hw->phy.type = ixgbe_phy_tw_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
+ break;
+ }
+ }
+ status = IXGBE_SUCCESS;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ * @hw: pointer to hardware structure
+ * @list_offset: offset to the SFP ID list
+ * @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset)
+{
+ u16 sfp_id;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ /* Read offset to PHY init contents */
+ hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+ return IXGBE_ERR_PHY;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+
+ /*
+ * Find the matching SFP ID in the EEPROM
+ * and program the init sequence
+ */
+ hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+
+ while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+ if (sfp_id == hw->phy.sfp_type) {
+ (*list_offset)++;
+ hw->eeprom.ops.read(hw, *list_offset, data_offset);
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ DEBUGOUT("SFP+ module not supported\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ break;
+ }
+ } else {
+ (*list_offset) += 2;
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ return IXGBE_ERR_PHY;
+ }
+ }
+
+ if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ DEBUGOUT("No matching SFP+ module found\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 14becb0..7a1e4c7 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -36,6 +36,52 @@
#define _IXGBE_PHY_H_
#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
+
+/* Bitmasks */
+#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA 4
+#define IXGBE_I2C_T_LOW 5
+#define IXGBE_I2C_T_HIGH 4
+#define IXGBE_I2C_T_SU_STA 5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE 1
+#define IXGBE_I2C_T_FALL 1
+#define IXGBE_I2C_T_SU_STO 4
+#define IXGBE_I2C_T_BUF 5
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
@@ -60,4 +106,9 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
#endif /* _IXGBE_PHY_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index ad45aa9..f265768 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -41,12 +41,15 @@
#define IXGBE_INTEL_VENDOR_ID 0x8086
/* Device IDs */
+#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
#define IXGBE_DEV_ID_82598AT 0x10C8
-#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10D7
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
/* General Registers */
@@ -78,7 +81,9 @@
#define IXGBE_EIMC 0x00888
#define IXGBE_EIAC 0x00810
#define IXGBE_EIAM 0x00890
-#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + ((_i) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FFF
#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
@@ -94,12 +99,18 @@
#define IXGBE_TFCS 0x0CE00
/* Receive DMA Registers */
-#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
-#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
-#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
-#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
-#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
-#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + ((_i - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + ((_i - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + ((_i - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + ((_i - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + ((_i - 64) * 0x40)))
/*
* Split and Replication Receive Control Registers
* 00-15 : 0x02100 + n*4
@@ -118,8 +129,7 @@
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + ((_i - 64) * 0x40))))
-#define IXGBE_RDRXCTL 0x02F00
-#define IXGBE_RDRXCTRL_RSC_PUSH 0x80
+#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
/* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_RXCTRL 0x03000
@@ -129,12 +139,17 @@
/* Receive Registers */
#define IXGBE_RXCSUM 0x05000
#define IXGBE_RFCTL 0x05008
+#define IXGBE_DRECCCTL 0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
-#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
-#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
/* Packet split receive type */
-#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
@@ -164,7 +179,7 @@
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
#define IXGBE_TIPG 0x0CB00
-#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) /* 8 of these */
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
#define IXGBE_MNGTXMAP 0x0CD10
#define IXGBE_TIPG_FIBER_DEFAULT 3
#define IXGBE_TXPBSIZE_SHIFT 10
@@ -176,11 +191,72 @@
#define IXGBE_IPAV 0x05838
#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
-
-/* Music registers */
+#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
+ * Filter Table */
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all 6 wakeup filters*/
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@@ -192,33 +268,6 @@
#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-/* LinkSec (MacSec) Registers */
-#define IXGBE_LSECTXCTRL 0x08A04
-#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
-#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
-#define IXGBE_LSECTXSA 0x08A10
-#define IXGBE_LSECTXPN0 0x08A14
-#define IXGBE_LSECTXPN1 0x08A18
-#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECRXCTRL 0x08F04
-#define IXGBE_LSECRXSCL 0x08F08
-#define IXGBE_LSECRXSCH 0x08F0C
-#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
-
-/* IpSec Registers */
-#define IXGBE_IPSTXIDX 0x08900
-#define IXGBE_IPSTXSALT 0x08904
-#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXIDX 0x08E00
-#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSPI 0x08E14
-#define IXGBE_IPSRXIPIDX 0x08E18
-#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSALT 0x08E2C
-#define IXGBE_IPSRXMOD 0x08E30
/* Stats registers */
@@ -274,8 +323,9 @@
#define IXGBE_BPTC 0x040F4
#define IXGBE_XEC 0x04120
-#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
-#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
@@ -419,6 +469,7 @@
#define IXGBE_MHADD_MFS_SHIFT 16
/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
@@ -479,12 +530,14 @@
#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
/* Device Type definitions for new protocol MDIO commands */
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
@@ -505,6 +558,10 @@
#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+
/* MII clause 22/28 definitions */
#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
@@ -520,10 +577,23 @@
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define QT2022_PHY_ID 0x0043A400
+#define ATH_PHY_ID 0x03429050
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL 0xFFFF
+#define IXGBE_CONTROL_MASK_NL 0xF000
+#define IXGBE_DATA_MASK_NL 0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL 12
+#define IXGBE_DELAY_NL 0
+#define IXGBE_DATA_NL 1
+#define IXGBE_CONTROL_NL 0x000F
+#define IXGBE_CONTROL_EOL_NL 0x0FFF
+#define IXGBE_CONTROL_SOL_NL 0x0000
+
/* General purpose Interrupt Enable */
#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
@@ -585,8 +655,8 @@
#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* FCRTL Bit Masks */
-#define IXGBE_FCRTL_XONE 0x80000000 /* bit 31, XON enable */
-#define IXGBE_FCRTH_FCEN 0x80000000 /* Rx Flow control enable */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
/* PAP bit masks*/
#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
@@ -596,8 +666,8 @@
/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
#define IXGBE_RMCS_RAC 0x00000004
#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
-#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority flow control ena */
-#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
@@ -694,17 +764,20 @@
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
-#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP4 0x00000001 /* SDP4 Data Value */
-#define IXGBE_ESDP_SDP5 0x00000002 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000001
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
-#define IXGBE_ESDP_SDP5_DIR 0x00000008 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -727,6 +800,7 @@
#define IXGBE_LED_OFF 0xF
/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP 0xC0000000
#define IXGBE_AUTOC_KX4_SUPP 0x80000000
#define IXGBE_AUTOC_KX_SUPP 0x40000000
#define IXGBE_AUTOC_PAUSE 0x30000000
@@ -738,16 +812,16 @@
#define IXGBE_AUTOC_AN_RESTART 0x00001000
#define IXGBE_AUTOC_FLU 0x00000001
#define IXGBE_AUTOC_LMS_SHIFT 13
-#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
-#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
+#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
@@ -1019,6 +1093,7 @@
#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
#define IXGBE_RXD_CFI_SHIFT 12
+
/* SRRCTL bit definitions */
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
@@ -1062,7 +1137,6 @@
#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
-
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
@@ -1094,13 +1168,18 @@
#ifndef __le16
/* Little Endian defines */
-#define __le8 u8
#define __le16 u16
#define __le32 u32
#define __le64 u64
#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+#endif
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
@@ -1109,15 +1188,15 @@ struct ixgbe_legacy_tx_desc {
__le32 data;
struct {
__le16 length; /* Data buffer length */
- __le8 cso; /* Checksum offset */
- __le8 cmd; /* Descriptor control */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
} flags;
} lower;
union {
__le32 data;
struct {
- __le8 status; /* Descriptor status */
- __le8 css; /* Checksum start */
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
__le16 vlan;
} fields;
} upper;
@@ -1142,8 +1221,8 @@ struct ixgbe_legacy_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
__le16 length; /* Length of data DMAed into data buffer */
__le16 csum; /* Packet checksum */
- __le8 status; /* Descriptor status */
- __le8 errors; /* Descriptor Errors */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
__le16 vlan;
};
@@ -1221,7 +1300,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
@@ -1236,6 +1315,22 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -1253,9 +1348,32 @@ enum ixgbe_phy_type {
ixgbe_phy_tn,
ixgbe_phy_qt,
ixgbe_phy_xaui,
+ ixgbe_phy_nl,
+ ixgbe_phy_tw_tyco,
+ ixgbe_phy_tw_unknown,
+ ixgbe_phy_sfp_avago,
+ ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_unknown,
ixgbe_phy_generic
};
+/*
+ * SFP+ module type IDs:
+ *
+ * ID Module Type
+ * =============
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ */
+enum ixgbe_sfp_type {
+ ixgbe_sfp_type_da_cu = 0,
+ ixgbe_sfp_type_sr = 1,
+ ixgbe_sfp_type_lr = 2,
+ ixgbe_sfp_type_not_present = 0xFFFE,
+ ixgbe_sfp_type_unknown = 0xFFFF
+};
+
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
@@ -1265,7 +1383,7 @@ enum ixgbe_media_type {
};
/* Flow Control Settings */
-enum ixgbe_fc_type {
+enum ixgbe_fc_mode {
ixgbe_fc_none = 0,
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
@@ -1321,6 +1439,8 @@ struct ixgbe_bus_info {
enum ixgbe_bus_speed speed;
enum ixgbe_bus_width width;
enum ixgbe_bus_type type;
+
+ u16 func;
};
/* Flow control parameters */
@@ -1330,8 +1450,8 @@ struct ixgbe_fc_info {
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
- enum ixgbe_fc_type type; /* Type of flow control */
- enum ixgbe_fc_type original_type;
+ enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
};
/* Statistics counters collected by the MAC */
@@ -1415,9 +1535,11 @@ struct ixgbe_mac_operations {
s32 (*start_hw)(struct ixgbe_hw *);
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
+ void (*set_lan_id)(struct ixgbe_hw *);
s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
@@ -1457,6 +1579,7 @@ struct ixgbe_mac_operations {
struct ixgbe_phy_operations {
s32 (*identify)(struct ixgbe_hw *);
+ s32 (*identify_sfp)(struct ixgbe_hw *);
s32 (*reset)(struct ixgbe_hw *);
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
@@ -1465,6 +1588,10 @@ struct ixgbe_phy_operations {
bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
};
struct ixgbe_eeprom_info {
@@ -1488,9 +1615,10 @@ struct ixgbe_mac_info {
u32 max_rx_queues;
u32 link_attach_type;
u32 link_mode_select;
+ u32 link_kx4_kx_supp;
bool link_settings_loaded;
bool autoneg;
- bool autoneg_failed;
+ bool autoneg_succeeded;
};
struct ixgbe_phy_info {
@@ -1498,11 +1626,13 @@ struct ixgbe_phy_info {
enum ixgbe_phy_type type;
u32 addr;
u32 id;
+ enum ixgbe_sfp_type sfp_type;
u32 revision;
enum ixgbe_media_type media_type;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
bool autoneg_wait_to_complete;
+ bool multispeed_fiber;
};
struct ixgbe_hw {
@@ -1523,7 +1653,7 @@ struct ixgbe_hw {
};
#define ixgbe_call_func(hw, func, params, error) \
- (func != NULL) ? func params: error
+ (func != NULL) ? func params : error
/* Error Codes */
#define IXGBE_SUCCESS 0
@@ -1544,6 +1674,9 @@ struct ixgbe_hw {
#define IXGBE_ERR_RESET_FAILED -15
#define IXGBE_ERR_SWFW_SYNC -16
#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_ERR_I2C -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define UNREFERENCED_PARAMETER(_p)
OpenPOWER on IntegriCloud