summaryrefslogtreecommitdiffstats
path: root/sys/dev/ixgbe
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2008-07-30 18:15:18 +0000
committerjfv <jfv@FreeBSD.org>2008-07-30 18:15:18 +0000
commit6cfc47d2bdeb65da7831a9d200a136973967c8f4 (patch)
tree074d02c68ee1ec77108bc790582e1c4eb9b95287 /sys/dev/ixgbe
parent7fe3be07e011469d1b4c35c6bb275a7fcff37856 (diff)
downloadFreeBSD-src-6cfc47d2bdeb65da7831a9d200a136973967c8f4.zip
FreeBSD-src-6cfc47d2bdeb65da7831a9d200a136973967c8f4.tar.gz
This updates the ixgbe driver to Intel internal version 1.4.7
Shared code changes, core driver fixes, vlan event/filter support Also Kip Macy's fix to allow any number of queues, thanks Kip!
Diffstat (limited to 'sys/dev/ixgbe')
-rw-r--r--sys/dev/ixgbe/ixgbe.c265
-rw-r--r--sys/dev/ixgbe/ixgbe.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c143
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c39
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c139
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h4
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c146
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h76
10 files changed, 396 insertions, 423 deletions
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index 9055fc2..26f4066 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -36,6 +36,9 @@
#include "opt_device_polling.h"
#endif
+/* Undefine this if not using CURRENT */
+#define IXGBE_VLAN_EVENTS
+
#include "ixgbe.h"
/*********************************************************************
@@ -46,7 +49,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "1.4.4";
+char ixgbe_driver_version[] = "1.4.7";
/*********************************************************************
* PCI Device ID Table
@@ -133,7 +136,6 @@ static void ixgbe_print_hw_stats(struct adapter *);
static void ixgbe_print_debug_info(struct adapter *);
static void ixgbe_update_link_status(struct adapter *);
static int ixgbe_get_buf(struct rx_ring *, int);
-static void ixgbe_enable_hw_vlans(struct adapter * adapter);
static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
@@ -149,6 +151,11 @@ static void ixgbe_set_ivar(struct adapter *, u16, u8);
static void ixgbe_configure_ivars(struct adapter *);
static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+#ifdef IXGBE_VLAN_EVENTS
+static void ixgbe_register_vlan(void *, struct ifnet *, u16);
+static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
+#endif
+
/* Legacy (single vector interrupt handler */
static void ixgbe_legacy_irq(void *);
@@ -157,9 +164,9 @@ static void ixgbe_msix_tx(void *);
static void ixgbe_msix_rx(void *);
static void ixgbe_msix_link(void *);
+/* Legacy interrupts use deferred handlers */
static void ixgbe_handle_tx(void *context, int pending);
static void ixgbe_handle_rx(void *context, int pending);
-static void ixgbe_handle_link(void *context, int pending);
#ifndef NO_82598_A0_SUPPORT
static void desc_flip(void *);
@@ -206,7 +213,7 @@ TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
* interface must be reset (down/up) for it
* to take effect.
*/
-static int ixgbe_enable_lro = 1;
+static int ixgbe_enable_lro = 0;
TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
/*
@@ -222,7 +229,7 @@ TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
*/
static int ixgbe_tx_queues = 1;
TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
-static int ixgbe_rx_queues = 1;
+static int ixgbe_rx_queues = 4;
TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
/* Number of TX descriptors per ring */
@@ -257,7 +264,7 @@ ixgbe_probe(device_t dev)
u_int16_t pci_device_id = 0;
u_int16_t pci_subvendor_id = 0;
u_int16_t pci_subdevice_id = 0;
- char adapter_name[60];
+ char adapter_name[128];
INIT_DEBUGOUT("ixgbe_probe: begin");
@@ -452,6 +459,14 @@ ixgbe_attach(device_t dev)
/* Initialize statistics */
ixgbe_update_stats_counters(adapter);
+#ifdef IXGBE_VLAN_EVENTS
+ /* Register for VLAN events */
+ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
+ adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
+#endif
+
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@@ -503,17 +518,28 @@ ixgbe_detach(device_t dev)
IXGBE_CORE_UNLOCK(adapter);
for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
- taskqueue_drain(txr->tq, &txr->tx_task);
- taskqueue_free(txr->tq);
- txr->tq = NULL;
+ if (txr->tq) {
+ taskqueue_drain(txr->tq, &txr->tx_task);
+ taskqueue_free(txr->tq);
+ txr->tq = NULL;
+ }
}
for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
- taskqueue_drain(rxr->tq, &rxr->rx_task);
- taskqueue_free(rxr->tq);
- rxr->tq = NULL;
+ if (rxr->tq) {
+ taskqueue_drain(rxr->tq, &rxr->rx_task);
+ taskqueue_free(rxr->tq);
+ rxr->tq = NULL;
+ }
}
- taskqueue_drain(taskqueue_fast, &adapter->link_task);
+
+#ifdef IXGBE_VLAN_EVENTS
+ /* Unregister VLAN events */
+ if (adapter->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+ if (adapter->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+#endif
/* let hardware know driver is unloading */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
@@ -745,7 +771,7 @@ ixgbe_watchdog(struct adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
bool tx_hang = FALSE;
- mtx_assert(&adapter->core_mtx, MA_OWNED);
+ IXGBE_CORE_LOCK_ASSERT(adapter);
/*
* The timer is set to 5 every time ixgbe_start() queues a packet.
@@ -755,10 +781,22 @@ ixgbe_watchdog(struct adapter *adapter)
* set to 0.
*/
for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
- if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
+ u32 head, tail;
+
+ IXGBE_TX_LOCK(txr);
+ if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
+ IXGBE_TX_UNLOCK(txr);
continue;
- else {
+ } else {
+ head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ if (head == tail) { /* last minute check */
+ IXGBE_TX_UNLOCK(txr);
+ continue;
+ }
+ /* Well, seems something is really hung */
tx_hang = TRUE;
+ IXGBE_TX_UNLOCK(txr);
break;
}
}
@@ -770,8 +808,12 @@ ixgbe_watchdog(struct adapter *adapter)
* reset the hardware.
*/
if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
+ txr = adapter->tx_rings; /* reset pointer */
+ for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ IXGBE_TX_LOCK(txr);
txr->watchdog_timer = IXGBE_TX_TIMEOUT;
+ IXGBE_TX_UNLOCK(txr);
+ }
return;
}
@@ -781,14 +823,14 @@ ixgbe_watchdog(struct adapter *adapter)
device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
IXGBE_READ_REG(hw, IXGBE_TDH(i)),
IXGBE_READ_REG(hw, IXGBE_TDT(i)));
- device_printf(dev,"TX(%d) desc avail = %d, Next TX to Clean = %d\n",
+ device_printf(dev,"TX(%d) desc avail = %d,"
+ "Next TX to Clean = %d\n",
i, txr->tx_avail, txr->next_tx_to_clean);
}
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
ixgbe_init_locked(adapter);
-
}
/*********************************************************************
@@ -830,8 +872,16 @@ ixgbe_init_locked(struct adapter *adapter)
return;
}
- if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
- ixgbe_enable_hw_vlans(adapter);
+#ifndef IXGBE_VLAN_EVENTS
+ /* With events this is done when a vlan registers */
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ u32 ctrl;
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+#endif
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(adapter)) {
@@ -934,39 +984,19 @@ ixgbe_init(void *arg)
/*
-** Deferred Interrupt Handlers
+** Legacy Deferred Interrupt Handlers
*/
static void
-ixgbe_handle_link(void *context, int pending)
-{
- struct adapter *adapter = context;
- struct ifnet *ifp = adapter->ifp;
-
- IXGBE_CORE_LOCK(adapter);
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- goto out;
-
- callout_stop(&adapter->timer);
- ixgbe_update_link_status(adapter);
- callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
-out:
- IXGBE_CORE_UNLOCK(adapter);
- return;
-}
-
-static void
ixgbe_handle_rx(void *context, int pending)
{
struct rx_ring *rxr = context;
struct adapter *adapter = rxr->adapter;
- struct ifnet *ifp = adapter->ifp;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
- /* More to clean, schedule another task */
- taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ u32 loop = 0;
+ while (loop++ < MAX_INTR)
+ if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
+ break;
}
static void
@@ -975,15 +1005,15 @@ ixgbe_handle_tx(void *context, int pending)
struct tx_ring *txr = context;
struct adapter *adapter = txr->adapter;
struct ifnet *ifp = adapter->ifp;
+ u32 loop = 0;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXGBE_TX_LOCK(txr);
- if (ixgbe_txeof(txr) != 0)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
+ while (loop++ < MAX_INTR)
+ if (ixgbe_txeof(txr) == 0)
+ break;
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
ixgbe_start_locked(txr, ifp);
IXGBE_TX_UNLOCK(txr);
- }
}
@@ -1022,7 +1052,7 @@ ixgbe_legacy_irq(void *arg)
}
/* Link status change */
if (reg_eicr & IXGBE_EICR_LSC)
- taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
+ ixgbe_update_link_status(adapter);
return;
}
@@ -1039,15 +1069,14 @@ ixgbe_msix_tx(void *arg)
{
struct tx_ring *txr = arg;
struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
+ u32 loop = 0;
++txr->tx_irq;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_TX_LOCK(txr);
- if (ixgbe_txeof(txr) != 0)
- taskqueue_enqueue(txr->tq, &txr->tx_task);
- IXGBE_TX_UNLOCK(txr);
- }
+ IXGBE_TX_LOCK(txr);
+ while (loop++ < MAX_INTR)
+ if (ixgbe_txeof(txr) == 0)
+ break;
+ IXGBE_TX_UNLOCK(txr);
/* Reenable this interrupt */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
@@ -1065,12 +1094,12 @@ ixgbe_msix_rx(void *arg)
{
struct rx_ring *rxr = arg;
struct adapter *adapter = rxr->adapter;
- struct ifnet *ifp = adapter->ifp;
+ u32 loop = 0;
++rxr->rx_irq;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
- taskqueue_enqueue(rxr->tq, &rxr->rx_task);
+ while (loop++ < MAX_INTR)
+ if (ixgbe_rxeof(rxr, adapter->rx_process_limit) == 0)
+ break;
/* Reenable this interrupt */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
return;
@@ -1088,7 +1117,7 @@ ixgbe_msix_link(void *arg)
reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (reg_eicr & IXGBE_EICR_LSC)
- taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
+ ixgbe_update_link_status(adapter);
/* Check for fan failure */
if ((hw->phy.media_type == ixgbe_media_type_copper) &&
@@ -1097,6 +1126,7 @@ ixgbe_msix_link(void *arg)
"REPLACE IMMEDIATELY!!\n");
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
}
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return;
}
@@ -1612,7 +1642,6 @@ ixgbe_allocate_legacy(struct adapter *adapter)
*/
TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
- TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
taskqueue_thread_enqueue, &txr->tq);
rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
@@ -1672,12 +1701,6 @@ ixgbe_allocate_msix(struct adapter *adapter)
}
txr->msix = vector;
txr->eims = IXGBE_IVAR_TX_QUEUE(vector);
- /* Make tasklet for deferred handling - one per queue */
- TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
- txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
- taskqueue_thread_enqueue, &txr->tq);
- taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s rxq",
- device_get_nameunit(adapter->dev));
}
/* RX setup */
@@ -1702,11 +1725,6 @@ ixgbe_allocate_msix(struct adapter *adapter)
}
rxr->msix = vector;
rxr->eims = IXGBE_IVAR_RX_QUEUE(vector);
- TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
- rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
- taskqueue_thread_enqueue, &rxr->tq);
- taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
- device_get_nameunit(adapter->dev));
}
/* Now for Link changes */
@@ -1727,7 +1745,6 @@ ixgbe_allocate_msix(struct adapter *adapter)
return (error);
}
adapter->linkvec = vector;
- TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
return (0);
}
@@ -2319,9 +2336,8 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
/* Setup the Base and Length of the Tx Descriptor Ring */
for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
- u64 tdba = txr->txdma.dma_paddr;
+ u64 txhwb = 0, tdba = txr->txdma.dma_paddr;
u32 txctrl;
- vm_paddr_t txhwb = 0;
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
(tdba & 0x00000000ffffffffULL));
@@ -2330,7 +2346,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
/* Setup for Head WriteBack */
- txhwb = vtophys(&txr->tx_hwb);
+ txhwb = (u64)vtophys(&txr->tx_hwb);
txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
(txhwb & 0x00000000ffffffffULL));
@@ -2670,7 +2686,8 @@ ixgbe_txeof(struct tx_ring *txr)
{
struct adapter * adapter = txr->adapter;
struct ifnet *ifp = adapter->ifp;
- int first, last, done, num_avail;
+ u32 first, last, done, num_avail;
+ u32 cleaned = 0;
struct ixgbe_tx_buf *tx_buffer;
struct ixgbe_legacy_tx_desc *tx_desc;
@@ -2699,7 +2716,7 @@ ixgbe_txeof(struct tx_ring *txr)
tx_desc->upper.data = 0;
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
- num_avail++;
+ num_avail++; cleaned++;
if (tx_buffer->m_head) {
ifp->if_opackets++;
@@ -2746,11 +2763,11 @@ ixgbe_txeof(struct tx_ring *txr)
txr->tx_avail = num_avail;
return FALSE;
}
- /* Some were cleaned, so reset timer */
- else if (num_avail != txr->tx_avail)
- txr->watchdog_timer = IXGBE_TX_TIMEOUT;
}
+ /* Some were cleaned, so reset timer */
+ if (cleaned)
+ txr->watchdog_timer = IXGBE_TX_TIMEOUT;
txr->tx_avail = num_avail;
return TRUE;
}
@@ -3065,8 +3082,13 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
struct rx_ring *rxr = adapter->rx_rings;
struct ifnet *ifp = adapter->ifp;
u32 rxctrl, fctrl, srrctl, rxcsum;
- u32 reta, mrqc, hlreg, linkvec;
+ u32 mrqc, hlreg, linkvec;
u32 random[10];
+ int i,j;
+ union {
+ u8 c[128];
+ u32 i[32];
+ } reta;
/*
@@ -3100,7 +3122,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
/* Set Queue moderation rate */
- for (int i = 0; i < IXGBE_MSGS; i++)
+ for (i = 0; i < IXGBE_MSGS; i++)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
/* Set Link moderation lower */
@@ -3127,27 +3149,16 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
if (adapter->num_rx_queues > 1) {
/* set up random bits */
arc4rand(&random, sizeof(random), 0);
- switch (adapter->num_rx_queues) {
- case 8:
- case 4:
- reta = 0x00010203;
- break;
- case 2:
- reta = 0x00010001;
- break;
- default:
- reta = 0x00000000;
- }
+
+ /* Create reta data */
+ for (i = 0; i < 128; )
+ for (j = 0; j < adapter->num_rx_queues &&
+ i < 128; j++, i++)
+ reta.c[i] = j;
/* Set up the redirection table */
- for (int i = 0; i < 32; i++) {
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta);
- if (adapter->num_rx_queues > 4) {
- ++i;
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_RETA(i), 0x04050607);
- }
- }
+ for (i = 0; i < 32; i++)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta.i[i]);
/* Now fill our hash function seeds */
for (int i = 0; i < 10; i++)
@@ -3459,22 +3470,50 @@ ixgbe_rx_checksum(struct adapter *adapter,
return;
}
-
+#ifdef IXGBE_VLAN_EVENTS
+/*
+ * This routine is run via an vlan
+ * config EVENT
+ */
static void
-ixgbe_enable_hw_vlans(struct adapter *adapter)
+ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
- u32 ctrl;
+ struct adapter *adapter = ifp->if_softc;
+ u32 ctrl;
- ixgbe_disable_intr(adapter);
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME;
+ ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
ctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- ixgbe_enable_intr(adapter);
- return;
+ /* Make entry in the hardware filter table */
+ ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
}
+/*
+ * This routine is run via an vlan
+ * unconfig EVENT
+ */
+static void
+ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
+{
+ struct adapter *adapter = ifp->if_softc;
+
+ /* Remove entry in the hardware filter table */
+ ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
+
+ /* Have all vlans unregistered? */
+ if (adapter->ifp->if_vlantrunk == NULL) {
+ u32 ctrl;
+ /* Turn off the filter table */
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ ctrl &= ~IXGBE_VLNCTRL_VFE;
+ ctrl |= IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+}
+#endif /* IXGBE_VLAN_EVENTS */
static void
ixgbe_enable_intr(struct adapter *adapter)
@@ -3738,7 +3777,7 @@ ixgbe_print_debug_info(struct adapter *adapter)
IXGBE_READ_REG(hw, IXGBE_TDH(i)),
IXGBE_READ_REG(hw, IXGBE_TDT(i)));
device_printf(dev,"TX(%d) Packets Sent: %lu\n",
- txr->me, (long)txr->tx_irq);
+ txr->me, (long)txr->tx_packets);
device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
txr->me, (long)txr->tx_irq);
device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index c8a9197..32a8160 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -351,6 +351,9 @@ struct adapter {
int num_rx_queues;
u32 rx_process_limit;
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
+
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_alloc_failed;
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index b7f8224..b1307b3 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -66,6 +66,10 @@ s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw);
s32 ixgbe_setup_fiber_serdes_link_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index);
@@ -102,6 +106,9 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
/* Flow Control */
mac->ops.setup_fc = &ixgbe_setup_fc_82598;
@@ -116,9 +123,6 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
- case ixgbe_phy_nl:
- phy->ops.reset = &ixgbe_reset_phy_nl;
- break;
default:
break;
}
@@ -253,8 +257,6 @@ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
media_type = ixgbe_media_type_fiber;
break;
@@ -477,40 +479,6 @@ s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
{
u32 links_reg;
u32 i;
- u16 link_reg, adapt_comp_reg;
-
- if (hw->phy.type == ixgbe_phy_nl) {
- hw->phy.ops.read_reg(hw, 1, IXGBE_TWINAX_DEV, &link_reg);
- hw->phy.ops.read_reg(hw, 1, IXGBE_TWINAX_DEV, &link_reg);
- hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if ((link_reg & (1 << 2)) &&
- ((adapt_comp_reg & 1) == 0)) {
- *link_up = TRUE;
- break;
- } else {
- *link_up = FALSE;
- }
- msec_delay(100);
- hw->phy.ops.read_reg(hw, 1, IXGBE_TWINAX_DEV,
- &link_reg);
- hw->phy.ops.read_reg(hw, 0xC00C,
- IXGBE_TWINAX_DEV,
- &adapt_comp_reg);
- }
- } else {
- if ((link_reg & (1 << 2)) &&
- ((adapt_comp_reg & 1) == 0))
- *link_up = TRUE;
- else
- *link_up = FALSE;
- }
-
- if (*link_up == FALSE)
- goto out;
- }
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (link_up_wait_to_complete) {
@@ -536,7 +504,6 @@ s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
-out:
return IXGBE_SUCCESS;
}
@@ -1063,6 +1030,102 @@ s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ UNREFERENCED_PARAMETER(vmdq);
+
+ if (rar < rar_entries) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ }
+ } else {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vfta_82598 - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ u32 regindex;
+ u32 bitindex;
+ u32 bits;
+ u32 vftabyte;
+
+ if (vlan < 1 || vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* Determine 32-bit word position in array */
+ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
+
+ /* Determine the location of the (VMD) queue index */
+ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+
+ /* Set the nibble for VMD queue index */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+ bits &= (~(0x0F << bitindex));
+ bits |= (vind << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+ /* Determine the location of the bit for this VLAN id */
+ bitindex = vlan & 0x1F; /* lower five bits */
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ if (vlan_on)
+ /* Turn on this VLAN id */
+ bits |= (1 << bitindex);
+ else
+ /* Turn off this VLAN id */
+ bits &= ~(1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+ u32 offset;
+ u32 vlanbyte;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+ 0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_blink_led_start_82598 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 570d480..7c965b3 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -91,8 +91,6 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598AT_DUAL_PORT:
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
hw->mac.type = ixgbe_mac_82598EB;
break;
@@ -574,6 +572,19 @@ s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
}
/**
+ * ixgbe_clear_rar - Clear Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_set_vmdq - Associate a VMDq index with a receive address
* @hw: pointer to hardware structure
* @rar: receive address register index to associate with VMDq index
@@ -586,6 +597,18 @@ s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to disassociate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_init_rx_addrs - Initializes receive address filters.
* @hw: pointer to hardware structure
*
@@ -739,3 +762,15 @@ s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
val), IXGBE_NOT_IMPLEMENTED);
}
+/**
+ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the Unicast Table Arrays to zero on device load. This
+ * is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 79a8779..f91f0e6 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -87,7 +87,9 @@ s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
@@ -107,5 +109,6 @@ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
#endif /* _IXGBE_API_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 191cefd..fd4351d 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -99,14 +99,17 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
/* RAR, Multicast, VLAN */
mac->ops.set_rar = &ixgbe_set_rar_generic;
+ mac->ops.clear_rar = &ixgbe_clear_rar_generic;
mac->ops.set_vmdq = NULL;
+ mac->ops.clear_vmdq = NULL;
mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
mac->ops.enable_mc = &ixgbe_enable_mc_generic;
mac->ops.disable_mc = &ixgbe_disable_mc_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.clear_vfta = NULL;
+ mac->ops.set_vfta = NULL;
+ mac->ops.init_uta_tables = NULL;
/* Flow Control */
mac->ops.setup_fc = NULL;
@@ -480,6 +483,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
if (eeprom->type == ixgbe_eeprom_uninitialized) {
eeprom->type = ixgbe_eeprom_none;
+ /* Set default semaphore delay to 10ms which is a well
+ * tested value */
+ eeprom->semaphore_delay = 10;
/*
* Check for EEPROM present first.
@@ -569,8 +575,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
ixgbe_shift_out_eeprom_bits(hw, data, 16);
ixgbe_standby_eeprom(hw);
- msec_delay(10);
-
+ msec_delay(hw->eeprom.semaphore_delay);
/* Done with writing - release the EEPROM */
ixgbe_release_eeprom(hw);
}
@@ -1230,13 +1235,47 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
} else {
- DEBUGOUT("Current RAR index is out of range.");
+ DEBUGOUT1("RAR index %d is out of range.\n", index);
}
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_clear_rar_generic - Remove Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (index < rar_entries) {
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ } else {
+ DEBUGOUT1("RAR index %d is out of range.\n", index);
+ }
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_enable_rar - Enable Rx address register
* @hw: pointer to hardware structure
* @index: index into the RAR table
@@ -1327,6 +1366,8 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
for (i = 0; i < hw->mac.mcft_size; i++)
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+ ixgbe_init_uta_tables(hw);
+
return IXGBE_SUCCESS;
}
@@ -1644,73 +1685,6 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_clear_vfta_generic - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
-{
- u32 offset;
- u32 vlanbyte;
-
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
- for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_set_vfta_generic - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFTA
- * @vlan_on: boolean flag to turn on/off VLAN in VFTA
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
-{
- u32 VftaIndex;
- u32 BitOffset;
- u32 VftaReg;
- u32 VftaByte;
-
- /* Determine 32-bit word position in array */
- VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
-
- /* Determine the location of the (VMD) queue index */
- VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
- BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
-
- /* Set the nibble for VMD queue index */
- VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
- VftaReg &= (~(0x0F << BitOffset));
- VftaReg |= (vind << BitOffset);
- IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
-
- /* Determine the location of the bit for this VLAN id */
- BitOffset = vlan & 0x1F; /* lower five bits */
-
- VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
- if (vlan_on)
- /* Turn on this VLAN id */
- VftaReg |= (1 << BitOffset);
- else
- /* Turn off this VLAN id */
- VftaReg &= ~(1 << BitOffset);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*
@@ -1721,13 +1695,24 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
**/
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- u32 ctrl;
- s32 i;
+ u32 i;
+ u32 reg_val;
+ u32 number_of_queues;
s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- ctrl |= IXGBE_CTRL_GIO_DIS;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ /* Disable the receive unit by stopping each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+ }
+ }
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ reg_val |= IXGBE_CTRL_GIO_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index c916d93..0307624 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -60,6 +60,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
@@ -68,9 +69,6 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
u32 addr_count, ixgbe_mc_addr_itr func);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index 134e310..37c78eb 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -151,9 +151,6 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case QT2022_PHY_ID:
phy_type = ixgbe_phy_qt;
break;
- case ATH_PHY_ID:
- phy_type = ixgbe_phy_nl;
- break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -526,146 +523,3 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
return status;
}
-/**
- * ixgbe_reset_phy_nl - Performs a PHY reset
- * @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
-{
- u16 phy_offset, control, eword, edata, list_crc, block_crc, id, sfp_id;
- bool end_data = FALSE;
- u16 list_offset, data_offset;
- u16 phy_data = 0;
- s32 ret_val = IXGBE_SUCCESS;
- u32 i;
-
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
-
- /* reset the PHY and poll for completion */
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE,
- (phy_data | IXGBE_MDIO_PHY_XS_RESET));
-
- for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
- if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0 )
- break;
- msec_delay(10);
- }
-
- if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
- DEBUGOUT("PHY reset did not complete.\n");
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
-
- /* read offset to PHY init contents */
- hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, &list_offset);
-
- if ((!list_offset) || (list_offset == 0xFFFF)) {
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
-
- /* Acquire the CRC */
- hw->eeprom.ops.read(hw, list_offset, &list_crc);
-
- /* Shift offset to first ID word */
- list_offset++;
-
- /* determine the sfp sequence based on device ID */
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
- sfp_id = 0;
- break;
- case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
- sfp_id = 1;
- break;
- default:
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
-
- /*
- * Find the matching sfp ID in the EEPROM
- * and program the init sequence
- */
- hw->eeprom.ops.read(hw, list_offset, &id);
-
- while (!((id == IXGBE_CONTROL_EOL_NL) || (end_data == TRUE))) {
- if (id == sfp_id) {
- list_offset++;
- hw->eeprom.ops.read(hw, list_offset, &data_offset);
- if ((!data_offset) || (data_offset == 0xFFFF))
- break;
- ret_val = hw->eeprom.ops.read(hw, data_offset,
- &block_crc);
- data_offset++;
- while (!end_data) {
- /*
- * Read control word from PHY init contents
- * offset
- */
- ret_val = hw->eeprom.ops.read(hw, data_offset,
- &eword);
- control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
- edata = eword & IXGBE_DATA_MASK_NL;
- switch (control) {
- case IXGBE_DELAY_NL:
- data_offset++;
- DEBUGOUT1("DELAY: %d MS\n", edata);
- msec_delay(edata);
- break;
- case IXGBE_DATA_NL:
- DEBUGOUT("DATA: \n");
- data_offset++;
- hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
- for (i = 0; i < edata; i++) {
- hw->eeprom.ops.read(hw,
- data_offset,
- &eword);
- hw->phy.ops.write_reg(hw,
- phy_offset,
- IXGBE_TWINAX_DEV,
- eword);
- DEBUGOUT2("Wrote %4.4x to %4.4x\n",
- eword, phy_offset);
- data_offset++;
- phy_offset++;
- }
- break;
- case IXGBE_CONTROL_NL:
- data_offset++;
- DEBUGOUT("CONTROL: \n");
- if (edata == IXGBE_CONTROL_EOL_NL) {
- DEBUGOUT("EOL\n");
- end_data = TRUE;
- } else if (edata == IXGBE_CONTROL_SOL_NL) {
- DEBUGOUT("SOL\n");
- } else {
- DEBUGOUT("Bad control value\n");
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
- break;
- default:
- DEBUGOUT("Bad control type\n");
- ret_val = IXGBE_ERR_PHY;
- goto out;
- }
- }
- } else {
- list_offset += 2;
- ret_val = hw->eeprom.ops.read(hw, list_offset, &id);
- if (ret_val)
- goto out;
- }
- }
-
-out:
- return ret_val;
-}
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 0f31dd5..14becb0 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -60,5 +60,4 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
#endif /* _IXGBE_PHY_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 8df3f78..ad45aa9 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -47,8 +47,6 @@
#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10D7
#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
-#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
-#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
/* General Registers */
@@ -120,7 +118,8 @@
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + ((_i - 64) * 0x40))))
-#define IXGBE_RDRXCTL 0x02F00
+#define IXGBE_RDRXCTL 0x02F00
+#define IXGBE_RDRXCTRL_RSC_PUSH 0x80
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
/* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_RXCTRL 0x03000
@@ -402,6 +401,10 @@
#define IXGBE_ANLP2 0x042B4
#define IXGBE_ATLASCTL 0x04800
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
+#define IXGBE_RDRXCTL_MVMEN 0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
/* CTRL Bit Masks */
#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
@@ -482,7 +485,6 @@
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
-#define IXGBE_TWINAX_DEV 1
#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
@@ -518,22 +520,10 @@
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define QT2022_PHY_ID 0x0043A400
-#define ATH_PHY_ID 0x03429050
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
-/* Special PHY Init Routine */
-#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
-#define IXGBE_CONTROL_MASK_NL 0xF000
-#define IXGBE_DATA_MASK_NL 0x0FFF
-#define IXGBE_CONTROL_SHIFT_NL 12
-#define IXGBE_DELAY_NL 0
-#define IXGBE_DATA_NL 1
-#define IXGBE_CONTROL_NL 0x000F
-#define IXGBE_CONTROL_EOL_NL 0x0FFF
-#define IXGBE_CONTROL_SOL_NL 0x0000
-
/* General purpose Interrupt Enable */
#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
@@ -615,10 +605,10 @@
/* Extended Interrupt Cause Read */
#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -626,34 +616,34 @@
/* Extended Interrupt Cause Set */
#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
-#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
/* Extended Interrupt Mask Set */
#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
/* Extended Interrupt Mask Clear */
#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
-#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
@@ -801,7 +791,7 @@
#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
/* PCS1GLCTL Bit Masks */
-#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg timeout enable (bit 18) */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
@@ -926,6 +916,7 @@
#define IXGBE_RAH_VIND_MASK 0x003C0000
#define IXGBE_RAH_VIND_SHIFT 18
#define IXGBE_RAH_AV 0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
/* Header split receive */
#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
@@ -954,7 +945,7 @@
#define IXGBE_MAX_FRAME_SZ 0x40040000
#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
/* Receive Config masks */
#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
@@ -1014,7 +1005,7 @@
#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
-#define IXGBE_RXDADV_HBO 0x00800000
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
@@ -1135,7 +1126,7 @@ struct ixgbe_legacy_tx_desc {
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
- __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
@@ -1167,8 +1158,8 @@ union ixgbe_adv_rx_desc {
union {
__le32 data;
struct {
- __le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header, header len */
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
} hs_rss;
} lo_dword;
union {
@@ -1196,19 +1187,19 @@ struct ixgbe_adv_tx_context_desc {
};
/* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
-#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
@@ -1262,7 +1253,6 @@ enum ixgbe_phy_type {
ixgbe_phy_tn,
ixgbe_phy_qt,
ixgbe_phy_xaui,
- ixgbe_phy_nl,
ixgbe_phy_generic
};
@@ -1447,7 +1437,9 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
ixgbe_mc_addr_itr);
@@ -1457,6 +1449,7 @@ struct ixgbe_mac_operations {
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*init_uta_tables)(struct ixgbe_hw *);
/* Flow Control */
s32 (*setup_fc)(struct ixgbe_hw *, s32);
@@ -1477,6 +1470,7 @@ struct ixgbe_phy_operations {
struct ixgbe_eeprom_info {
struct ixgbe_eeprom_operations ops;
enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
u16 word_size;
u16 address_bits;
};
OpenPOWER on IntegriCloud