summaryrefslogtreecommitdiffstats
path: root/sys/dev/ixgbe
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2010-11-26 22:46:32 +0000
committerjfv <jfv@FreeBSD.org>2010-11-26 22:46:32 +0000
commit48bbd73f9a053707dcc36c64c6f44045e8178dc3 (patch)
treee2b66ea0565164dc8a27eec1d115e996508a5c83 /sys/dev/ixgbe
parentdfd9c1b97641876cc379f1deb217b1d89114b6ff (diff)
downloadFreeBSD-src-48bbd73f9a053707dcc36c64c6f44045e8178dc3.zip
FreeBSD-src-48bbd73f9a053707dcc36c64c6f44045e8178dc3.tar.gz
Update ixgbe driver to verion 2.3.6
- This adds a VM SRIOV interface, ixv, it is however transparent to the user, it links with the ixgbe.ko, but when ixgbe is loaded in a virtualized guest with SRIOV configured this will be detected. - Sync shared code to latest - Many bug fixes and improvements, thanks to everyone who has been using the driver and reporting issues.
Diffstat (limited to 'sys/dev/ixgbe')
-rw-r--r--sys/dev/ixgbe/ixgbe.c347
-rw-r--r--sys/dev/ixgbe/ixgbe.h22
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c157
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c1105
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c91
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h70
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c1061
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h10
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.c743
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.h113
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c175
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h9
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h240
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c495
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.h113
-rw-r--r--sys/dev/ixgbe/ixv.c3950
-rw-r--r--sys/dev/ixgbe/ixv.h414
18 files changed, 7881 insertions, 1239 deletions
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index f9605e5..b096007 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -46,7 +46,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "2.2.3";
+char ixgbe_driver_version[] = "2.3.6";
/*********************************************************************
* PCI Device ID Table
@@ -78,6 +78,8 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -119,7 +121,7 @@ static int ixgbe_allocate_queues(struct adapter *);
static int ixgbe_setup_msix(struct adapter *);
static void ixgbe_free_pci_resources(struct adapter *);
static void ixgbe_local_timer(void *);
-static int ixgbe_setup_interface(device_t, struct adapter *);
+static void ixgbe_setup_interface(device_t, struct adapter *);
static void ixgbe_config_link(struct adapter *);
static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
@@ -144,7 +146,6 @@ static bool ixgbe_txeof(struct tx_ring *);
static bool ixgbe_rxeof(struct ix_queue *, int);
static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
static void ixgbe_set_promisc(struct adapter *);
-static void ixgbe_disable_promisc(struct adapter *);
static void ixgbe_set_multi(struct adapter *);
static void ixgbe_print_hw_stats(struct adapter *);
static void ixgbe_print_debug_info(struct adapter *);
@@ -212,7 +213,7 @@ static driver_t ixgbe_driver = {
"ix", ixgbe_methods, sizeof(struct adapter),
};
-static devclass_t ixgbe_devclass;
+devclass_t ixgbe_devclass;
DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
@@ -257,7 +258,7 @@ TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
/*
* Header split: this causes the hardware to DMA
- * the header into a separate mbuf from the payload,
+ * the header into a seperate mbuf from the payload,
* it can be a performance win in some workloads, but
* in others it actually hurts, its off by default.
*/
@@ -289,13 +290,6 @@ TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
static int ixgbe_total_ports;
/*
-** Shadow VFTA table, this is needed because
-** the real filter table gets cleared during
-** a soft reset and we need to repopulate it.
-*/
-static u32 ixgbe_shadow_vfta[IXGBE_VFTA_SIZE];
-
-/*
** The number of scatter-gather segments
** differs for 82598 and 82599, default to
** the former.
@@ -446,6 +440,7 @@ ixgbe_attach(device_t dev)
ixgbe_num_segs = IXGBE_82599_SCATTER;
adapter->optics = IFM_10G_T;
default:
+ ixgbe_num_segs = IXGBE_82599_SCATTER;
break;
}
@@ -524,15 +519,6 @@ ixgbe_attach(device_t dev)
goto err_out;
}
- /* Allocate multicast array memory. */
- adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
- MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
- if (adapter->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
- error = ENOMEM;
- goto err_late;
- }
-
/* Initialize the shared code */
error = ixgbe_init_shared_code(hw);
if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
@@ -595,8 +581,7 @@ ixgbe_attach(device_t dev)
goto err_late;
/* Setup OS specific network interface */
- if (ixgbe_setup_interface(dev, adapter) != 0)
- goto err_late;
+ ixgbe_setup_interface(dev, adapter);
/* Sysctl for limiting the amount of work done in the taskqueue */
ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
@@ -642,10 +627,7 @@ err_late:
ixgbe_free_transmit_structures(adapter);
ixgbe_free_receive_structures(adapter);
err_out:
- if (adapter->ifp != NULL)
- if_free(adapter->ifp);
ixgbe_free_pci_resources(adapter);
- free(adapter->mta, M_DEVBUF);
return (error);
}
@@ -716,7 +698,6 @@ ixgbe_detach(device_t dev)
ixgbe_free_transmit_structures(adapter);
ixgbe_free_receive_structures(adapter);
- free(adapter->mta, M_DEVBUF);
IXGBE_CORE_LOCK_DESTROY(adapter);
return (0);
@@ -780,8 +761,8 @@ ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
ETHER_BPF_MTAP(ifp, m_head);
/* Set watchdog on */
- txr->watchdog_check = TRUE;
txr->watchdog_time = ticks;
+ txr->queue_status = IXGBE_QUEUE_WORKING;
}
return;
@@ -851,6 +832,10 @@ ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
return (err);
}
+ /* Call cleanup if number of TX descriptors low */
+ if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD)
+ ixgbe_txeof(txr);
+
enqueued = 0;
if (m == NULL) {
next = drbr_dequeue(ifp, txr->br);
@@ -883,7 +868,7 @@ ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
if (enqueued > 0) {
/* Set watchdog on */
- txr->watchdog_check = TRUE;
+ txr->queue_status = IXGBE_QUEUE_WORKING;
txr->watchdog_time = ticks;
}
@@ -948,7 +933,6 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
if ((ifp->if_flags ^ adapter->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
- ixgbe_disable_promisc(adapter);
ixgbe_set_promisc(adapter);
}
} else
@@ -987,6 +971,8 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_VLAN_HWFILTER)
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXGBE_CORE_LOCK(adapter);
ixgbe_init_locked(adapter);
@@ -1041,6 +1027,18 @@ ixgbe_init_locked(struct adapter *adapter)
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
hw->addr_ctrl.rar_used_count = 1;
+ /* Set the various hardware offload abilities */
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM) {
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ifp->if_hwassist |= CSUM_SCTP;
+#endif
+ }
+
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(adapter)) {
device_printf(dev,"Could not setup transmit structures\n");
@@ -1058,10 +1056,12 @@ ixgbe_init_locked(struct adapter *adapter)
** Determine the correct mbuf pool
** for doing jumbo/headersplit
*/
- if (ifp->if_mtu > ETHERMTU)
+ if (adapter->max_frame_size <= 2048)
+ adapter->rx_mbuf_sz = MCLBYTES;
+ else if (adapter->max_frame_size <= 4096)
adapter->rx_mbuf_sz = MJUMPAGESIZE;
else
- adapter->rx_mbuf_sz = MCLBYTES;
+ adapter->rx_mbuf_sz = MJUM9BYTES;
/* Prepare receive descriptors and buffers */
if (ixgbe_setup_receive_structures(adapter)) {
@@ -1092,18 +1092,6 @@ ixgbe_init_locked(struct adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
- if (hw->mac.type == ixgbe_mac_82599EB)
- ifp->if_hwassist |= CSUM_SCTP;
-#endif
- }
-
/* Set MTU size */
if (ifp->if_mtu > ETHERMTU) {
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
@@ -1146,7 +1134,7 @@ ixgbe_init_locked(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
}
- /* Set up VLAN offloads and filter */
+ /* Set up VLAN support and filter */
ixgbe_setup_vlan_hw_support(adapter);
/* Enable Receive engine */
@@ -1760,10 +1748,6 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
++txr->total_packets;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
- /* Do a clean if descriptors are low */
- if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD)
- ixgbe_txeof(txr);
-
return (0);
xmit_fail:
@@ -1775,11 +1759,13 @@ xmit_fail:
static void
ixgbe_set_promisc(struct adapter *adapter)
{
-
u_int32_t reg_rctl;
struct ifnet *ifp = adapter->ifp;
reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ reg_rctl &= (~IXGBE_FCTRL_UPE);
+ reg_rctl &= (~IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
if (ifp->if_flags & IFF_PROMISC) {
reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
@@ -1792,20 +1778,6 @@ ixgbe_set_promisc(struct adapter *adapter)
return;
}
-static void
-ixgbe_disable_promisc(struct adapter * adapter)
-{
- u_int32_t reg_rctl;
-
- reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
-
- reg_rctl &= (~IXGBE_FCTRL_UPE);
- reg_rctl &= (~IXGBE_FCTRL_MPE);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
-
- return;
-}
-
/*********************************************************************
* Multicast Update
@@ -1819,7 +1791,7 @@ static void
ixgbe_set_multi(struct adapter *adapter)
{
u32 fctrl;
- u8 *mta;
+ u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
u8 *update_ptr;
struct ifmultiaddr *ifma;
int mcnt = 0;
@@ -1827,10 +1799,6 @@ ixgbe_set_multi(struct adapter *adapter)
IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
- mta = adapter->mta;
- bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
- MAX_NUM_MULTICAST_ADDRESSES);
-
fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
if (ifp->if_flags & IFF_PROMISC)
@@ -1923,19 +1891,14 @@ ixgbe_local_timer(void *arg)
*/
if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
goto out;
+
/*
- ** Check for time since any descriptor was cleaned
+ ** Check status on the TX queues for a hang
*/
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- if (txr->watchdog_check == FALSE) {
- IXGBE_TX_UNLOCK(txr);
- continue;
- }
- if ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG)
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ if (txr->queue_status == IXGBE_QUEUE_HUNG)
goto hung;
- IXGBE_TX_UNLOCK(txr);
- }
+
out:
ixgbe_rearm_queues(adapter, adapter->que_mask);
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
@@ -1985,7 +1948,7 @@ ixgbe_update_link_status(struct adapter *adapter)
adapter->link_active = FALSE;
for (int i = 0; i < adapter->num_queues;
i++, txr++)
- txr->watchdog_check = FALSE;
+ txr->queue_status = IXGBE_QUEUE_IDLE;
}
}
@@ -2005,6 +1968,7 @@ ixgbe_stop(void *arg)
{
struct ifnet *ifp;
struct adapter *adapter = arg;
+ struct ixgbe_hw *hw = &adapter->hw;
ifp = adapter->ifp;
mtx_assert(&adapter->core_mtx, MA_OWNED);
@@ -2015,9 +1979,12 @@ ixgbe_stop(void *arg)
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
- ixgbe_reset_hw(&adapter->hw);
- adapter->hw.adapter_stopped = FALSE;
- ixgbe_stop_adapter(&adapter->hw);
+ ixgbe_reset_hw(hw);
+ hw->adapter_stopped = FALSE;
+ ixgbe_stop_adapter(hw);
+ /* Turn off the laser */
+ if (hw->phy.multispeed_fiber)
+ ixgbe_disable_tx_laser(hw);
callout_stop(&adapter->timer);
/* reprogram the RAR[0] in case user changed it. */
@@ -2242,6 +2209,9 @@ ixgbe_setup_msix(struct adapter *adapter)
if (ixgbe_num_queues != 0)
queues = ixgbe_num_queues;
+ /* Set max queues to 8 */
+ else if (queues > 8)
+ queues = 8;
/*
** Want one vector (RX/TX pair) per queue
@@ -2375,7 +2345,7 @@ mem:
* Setup networking device structure and register an interface.
*
**********************************************************************/
-static int
+static void
ixgbe_setup_interface(device_t dev, struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -2384,10 +2354,8 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
INIT_DEBUGOUT("ixgbe_setup_interface: begin");
ifp = adapter->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
+ if (ifp == NULL)
+ panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
ifp->if_baudrate = 1000000000;
@@ -2414,10 +2382,22 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
-
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
ifp->if_capenable = ifp->if_capabilities;
+ /* Don't enable LRO by default */
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ /*
+ ** Dont turn this on by default, if vlans are
+ ** created on another pseudo device (eg. lagg)
+ ** then vlan events are not passed thru, breaking
+ ** operation, but with HW FILTER off it works. If
+ ** using vlans directly on the em driver you can
+ ** enable this and get full hardware tag filtering.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
@@ -2435,7 +2415,7 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
- return (0);
+ return;
}
static void
@@ -2450,6 +2430,7 @@ ixgbe_config_link(struct adapter *adapter)
if (sfp) {
if (hw->phy.multispeed_fiber) {
hw->mac.ops.setup_sfp(hw);
+ ixgbe_enable_tx_laser(hw);
taskqueue_enqueue(adapter->tq, &adapter->msf_task);
} else
taskqueue_enqueue(adapter->tq, &adapter->mod_task);
@@ -2856,7 +2837,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
/* Setup Transmit Descriptor Cmd Settings */
txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
- txr->watchdog_check = FALSE;
+ txr->queue_status = IXGBE_QUEUE_IDLE;
/* Disable Head Writeback */
switch (hw->mac.type) {
@@ -3195,7 +3176,7 @@ ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
{
struct adapter *adapter = txr->adapter;
struct ix_queue *que;
- struct ixgbe_atr_input atr_input;
+ union ixgbe_atr_input atr_input;
struct ip *ip;
struct tcphdr *th;
struct udphdr *uh;
@@ -3239,7 +3220,7 @@ ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
return;
}
- memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+ memset(&atr_input, 0, sizeof(union ixgbe_atr_input));
vlan_id = htole16(mp->m_pkthdr.ether_vtag);
src_ipv4_addr = ip->ip_src.s_addr;
@@ -3274,15 +3255,18 @@ ixgbe_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
struct ifnet *ifp = adapter->ifp;
- u32 first, last, done;
+ u32 first, last, done, processed;
struct ixgbe_tx_buf *tx_buffer;
struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
mtx_assert(&txr->tx_mtx, MA_OWNED);
- if (txr->tx_avail == adapter->num_tx_desc)
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->queue_status = IXGBE_QUEUE_IDLE;
return FALSE;
+ }
+ processed = 0;
first = txr->next_to_clean;
tx_buffer = &txr->tx_buffers[first];
/* For cleanup we just use legacy struct */
@@ -3314,6 +3298,7 @@ ixgbe_txeof(struct tx_ring *txr)
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
++txr->tx_avail;
+ ++processed;
if (tx_buffer->m_head) {
txr->bytes +=
@@ -3356,6 +3341,15 @@ ixgbe_txeof(struct tx_ring *txr)
txr->next_to_clean = first;
/*
+ ** Watchdog calculation, we know there's
+ ** work outstanding or the first return
+ ** would have been taken, so none processed
+ ** for too long indicates a hang.
+ */
+ if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
+ txr->queue_status = IXGBE_QUEUE_HUNG;
+
+ /*
* If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
* it is OK to send packets. If there are no pending descriptors,
* clear the timeout. Otherwise, if some descriptors have been freed,
@@ -3364,7 +3358,7 @@ ixgbe_txeof(struct tx_ring *txr)
if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if (txr->tx_avail == adapter->num_tx_desc) {
- txr->watchdog_check = FALSE;
+ txr->queue_status = IXGBE_QUEUE_IDLE;
return FALSE;
}
}
@@ -3395,51 +3389,59 @@ ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
cleaned = -1; /* Signify no completions */
while (i != limit) {
rxbuf = &rxr->rx_buffers[i];
- if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
+ if (rxr->hdr_split == FALSE)
+ goto no_split;
+
+ if (rxbuf->m_head == NULL) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
if (mh == NULL)
goto update;
- mh->m_pkthdr.len = mh->m_len = MHLEN;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- m_adj(mh, ETHER_ALIGN);
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mh);
- goto update;
- }
- rxbuf->m_head = mh;
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.hdr_addr =
- htole64(hseg[0].ds_addr);
+ } else
+ mh = rxbuf->m_head;
+
+ mh->m_pkthdr.len = mh->m_len = MHLEN;
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->htag,
+ rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("Refresh mbufs: hdr dmamap load"
+ " failure - %d\n", error);
+ m_free(mh);
+ rxbuf->m_head = NULL;
+ goto update;
}
+ rxbuf->m_head = mh;
+ bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr);
+no_split:
if (rxbuf->m_pack == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (mp == NULL)
goto update;
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- goto update;
- }
- rxbuf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
+ } else
+ mp = rxbuf->m_pack;
+
+ mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("Refresh mbufs: payload dmamap load"
+ " failure - %d\n", error);
+ m_free(mp);
+ rxbuf->m_pack = NULL;
+ goto update;
}
+ rxbuf->m_pack = mp;
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.pkt_addr =
+ htole64(pseg[0].ds_addr);
cleaned = i;
/* Calculate next index */
@@ -3501,9 +3503,9 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MJUMPAGESIZE, /* maxsize */
+ MJUM9BYTES, /* maxsize */
1, /* nsegments */
- MJUMPAGESIZE, /* maxsegsize */
+ MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
@@ -3661,7 +3663,7 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
rxbuf = &rxr->rx_buffers[j];
/*
- ** Don't allocate mbufs if not
+ ** Dont allocate mbufs if not
** doing header split, its wasteful
*/
if (rxr->hdr_split == FALSE)
@@ -4027,25 +4029,33 @@ ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype
static __inline void
ixgbe_rx_discard(struct rx_ring *rxr, int i)
{
- struct adapter *adapter = rxr->adapter;
struct ixgbe_rx_buf *rbuf;
- struct mbuf *mh, *mp;
rbuf = &rxr->rx_buffers[i];
- if (rbuf->fmp != NULL) /* Partial chain ? */
- m_freem(rbuf->fmp);
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
+ if (rbuf->fmp != NULL) {/* Partial chain ? */
+ rbuf->fmp->m_flags |= M_PKTHDR;
+ m_freem(rbuf->fmp);
+ rbuf->fmp = NULL;
+ }
- /* Reuse loaded DMA map and just update mbuf chain */
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
+ /*
+ ** With advanced descriptors the writeback
+ ** clobbers the buffer addrs, so its easier
+ ** to just free the existing mbufs and take
+ ** the normal refresh path to get new buffers
+ ** and mapping.
+ */
+ if (rbuf->m_head) {
+ m_free(rbuf->m_head);
+ rbuf->m_head = NULL;
+ }
+
+ if (rbuf->m_pack) {
+ m_free(rbuf->m_pack);
+ rbuf->m_pack = NULL;
+ }
- mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
- mp->m_data = mp->m_ext.ext_buf;
- mp->m_next = NULL;
return;
}
@@ -4110,15 +4120,15 @@ ixgbe_rxeof(struct ix_queue *que, int count)
vtag = le16toh(cur->wb.upper.vlan);
eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
- /* Make sure all parts of a bad packet are discarded */
+ /* Make sure bad packets are discarded */
if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
(rxr->discard)) {
ifp->if_ierrors++;
rxr->rx_discarded++;
- if (!eop)
- rxr->discard = TRUE;
- else
+ if (eop)
rxr->discard = FALSE;
+ else
+ rxr->discard = TRUE;
ixgbe_rx_discard(rxr, i);
goto next_desc;
}
@@ -4129,7 +4139,7 @@ ixgbe_rxeof(struct ix_queue *que, int count)
** not be fragmented across sequential
** descriptors, rather the next descriptor
** is indicated in bits of the descriptor.
- ** This also means that we might process
+ ** This also means that we might proceses
** more than one packet at a time, something
** that has never been true before, it
** required eliminating global chain pointers
@@ -4210,7 +4220,8 @@ ixgbe_rxeof(struct ix_queue *que, int count)
} else {
/* Singlet, prepare to send */
sendmp = mh;
- if (staterr & IXGBE_RXD_STAT_VP) {
+ if ((adapter->num_vlans) &&
+ (staterr & IXGBE_RXD_STAT_VP)) {
sendmp->m_pkthdr.ether_vtag = vtag;
sendmp->m_flags |= M_VLANTAG;
}
@@ -4376,12 +4387,13 @@ ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
+ IXGBE_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
- ixgbe_shadow_vfta[index] |= (1 << bit);
+ adapter->shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
- /* Re-init to load the changes */
- ixgbe_init(adapter);
+ ixgbe_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
}
/*
@@ -4401,17 +4413,20 @@ ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
+ IXGBE_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
- ixgbe_shadow_vfta[index] &= ~(1 << bit);
+ adapter->shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
/* Re-init to load the changes */
- ixgbe_init(adapter);
+ ixgbe_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
}
static void
ixgbe_setup_vlan_hw_support(struct adapter *adapter)
{
+ struct ifnet *ifp = adapter->ifp;
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl;
@@ -4430,14 +4445,16 @@ ixgbe_setup_vlan_hw_support(struct adapter *adapter)
** we need to repopulate it now.
*/
for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
- if (ixgbe_shadow_vfta[i] != 0)
+ if (adapter->shadow_vfta[i] != 0)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
- ixgbe_shadow_vfta[i]);
+ adapter->shadow_vfta[i]);
- /* Enable the Filter Table */
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- ctrl |= IXGBE_VLNCTRL_VFE;
+ /* Enable the Filter Table if enabled */
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ }
if (hw->mac.type == ixgbe_mac_82598EB)
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -4478,14 +4495,14 @@ ixgbe_enable_intr(struct adapter *adapter)
/* With RSS we use auto clear */
if (adapter->msix_mem) {
mask = IXGBE_EIMS_ENABLE_MASK;
- /* Don't autoclear Link */
+ /* Dont autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
}
/*
- ** Now enable all queues, this is done separately to
+ ** Now enable all queues, this is done seperately to
** allow for handling the extended (beyond 32) MSIX
** vectors that can be used by 82599
*/
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index abfdfa1..60851d2 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -179,6 +179,9 @@
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
+#define IXGBE_QUEUE_IDLE 0
+#define IXGBE_QUEUE_WORKING 1
+#define IXGBE_QUEUE_HUNG 2
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
@@ -205,11 +208,6 @@
#define IXGBE_BULK_LATENCY 1200
#define IXGBE_LINK_ITR 2000
-/* Header split args for get_bug */
-#define IXGBE_CLEAN_HDR 1
-#define IXGBE_CLEAN_PKT 2
-#define IXGBE_CLEAN_ALL 3
-
/*
*****************************************************************************
* vendor_info_array
@@ -280,7 +278,7 @@ struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
- bool watchdog_check;
+ int queue_status;
int watchdog_time;
union ixgbe_adv_tx_desc *tx_base;
struct ixgbe_dma_alloc txdma;
@@ -374,7 +372,15 @@ struct adapter {
u16 num_vlans;
u16 num_queues;
- /* Info about the board itself */
+ /*
+ ** Shadow VFTA table, this is needed because
+ ** the real vlan filter table gets cleared during
+ ** a soft reset and the driver needs to be able
+ ** to repopulate it.
+ */
+ u32 shadow_vfta[IXGBE_VFTA_SIZE];
+
+ /* Info about the interface */
u32 optics;
int advertise; /* link speeds */
bool link_active;
@@ -421,8 +427,6 @@ struct adapter {
u64 que_mask;
u32 rx_process_limit;
- /* Multicast array memory */
- u8 *mta;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index 0570aa5..607a8e7 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -73,7 +73,6 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -186,6 +185,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 16;
+ mac->rx_pb_size = 512;
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
@@ -196,6 +196,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.flap_tx_laser = NULL;
mac->ops.get_link_capabilities =
&ixgbe_get_link_capabilities_82598;
@@ -385,11 +386,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_media_type_82598");
/* Detect if there is a copper PHY attached. */
- if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_aq) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
+ default:
+ break;
}
/* Media type for I82598 is based on device ID */
@@ -436,6 +440,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 rx_pba_size;
u32 link_speed = 0;
bool link_up;
@@ -463,7 +468,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val)
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
@@ -485,7 +490,8 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
- /* Flow control is disabled by software override or autoneg.
+ /*
+ * Flow control is disabled by software override or autoneg.
* The code below will actually disable it in the HW.
*/
break;
@@ -526,16 +532,19 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- hw->fc.low_water);
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 6;
+ if (hw->fc.send_xon)
+ reg |= IXGBE_FCRTL_XONE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ reg = (rx_pba_size - hw->fc.high_water) << 6;
+ reg |= IXGBE_FCRTH_FCEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
@@ -560,7 +569,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -601,6 +610,41 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return IXGBE_SUCCESS;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+ (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+ break;
+
+ msec_delay(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ DEBUGOUT("Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_check_mac_link_82598 - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -648,8 +692,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
&adapt_comp_reg);
}
} else {
- if ((link_reg & 1) &&
- ((adapt_comp_reg & 1) == 0))
+ if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
*link_up = TRUE;
else
*link_up = FALSE;
@@ -684,7 +727,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
- (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+ (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
*link_up = FALSE;
/* if link is down, zero out the current_mode */
@@ -692,7 +735,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->fc.current_mode = ixgbe_fc_none;
hw->fc.fc_was_autonegged = FALSE;
}
-
out:
return IXGBE_SUCCESS;
}
@@ -904,8 +946,9 @@ mac_reset_top:
if (hw->mac.orig_link_settings_stored == FALSE) {
hw->mac.orig_autoc = autoc;
hw->mac.orig_link_settings_stored = TRUE;
- } else if (autoc != hw->mac.orig_autoc)
+ } else if (autoc != hw->mac.orig_autoc) {
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ }
/* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
@@ -916,11 +959,10 @@ mac_reset_top:
*/
hw->mac.ops.init_rx_addrs(hw);
-
-
reset_hw_out:
if (phy_status != IXGBE_SUCCESS)
status = phy_status;
+
return status;
}
@@ -933,9 +975,16 @@ reset_hw_out:
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
DEBUGFUNC("ixgbe_set_vmdq_82598");
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
rar_high &= ~IXGBE_RAH_VIND_MASK;
rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
@@ -956,14 +1005,16 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
UNREFERENCED_PARAMETER(vmdq);
- if (rar < rar_entries) {
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
- if (rar_high & IXGBE_RAH_VIND_MASK) {
- rar_high &= ~IXGBE_RAH_VIND_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
- }
- } else {
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
}
return IXGBE_SUCCESS;
@@ -1173,8 +1224,10 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
/* Copper PHY must be checked before AUTOC LMS to determine correct
* physical layer because 10GBase-T PHYs use LMS = KX4/KX */
- if (hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_cu_unknown) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
@@ -1184,6 +1237,8 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
goto out;
+ default:
+ break;
}
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1263,7 +1318,8 @@ out:
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
- u16 pci_gen, pci_ctrl2;
+ u16 pci_gen = 0;
+ u16 pci_ctrl2 = 0;
DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
@@ -1286,41 +1342,6 @@ void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
/**
- * ixgbe_validate_link_ready - Function looks for phy link
- * @hw: pointer to hardware structure
- *
- * Function indicates success when phy link is available. If phy is not ready
- * within 5 seconds of MAC indicating link, the function returns error.
- **/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
-{
- u32 timeout;
- u16 an_reg;
-
- if (hw->device_id != IXGBE_DEV_ID_82598AT2)
- return IXGBE_SUCCESS;
-
- for (timeout = 0;
- timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
-
- if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
- (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
- break;
-
- msec_delay(100);
- }
-
- if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
- DEBUGOUT("Link was indicated but link is down\n");
- return IXGBE_ERR_LINK_SETUP;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
* @hw: pointer to hardware structure
*
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 8c5ff21..8660bfb 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -42,6 +42,9 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
@@ -78,16 +81,31 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
+ /* enable the laser control functions for SFP+ fiber */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+
+ } else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
- hw->phy.smart_speed == ixgbe_smart_speed_on))
+ hw->phy.smart_speed == ixgbe_smart_speed_on)) {
mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
- else
+ } else {
mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ }
}
}
@@ -147,6 +165,8 @@ init_phy_ops_out:
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
+ u32 reg_anlp1 = 0;
+ u32 i = 0;
u16 list_offset, data_offset, data_value;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
@@ -174,14 +194,34 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
}
- /* Now restart DSP by setting Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
/* Release the semaphore */
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
+
+ /* Now restart DSP by setting Restart_AN and clearing LMS */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msec_delay(4);
+ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+ DEBUGOUT("sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ /* Restart DSP by setting Restart_AN and return to SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -213,6 +253,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
/* MAC */
mac->ops.reset_hw = &ixgbe_reset_hw_82599;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599;
mac->ops.get_media_type = &ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_82599;
@@ -224,6 +265,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
@@ -234,6 +276,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
/* Link */
mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
@@ -243,10 +287,12 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 128;
+ mac->rx_pb_size = 512;
mac->max_tx_queues = 128;
mac->max_rx_queues = 128;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
return ret_val;
}
@@ -269,6 +315,13 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_82599");
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = TRUE;
+ goto out;
+ }
/*
* Determine link capabilities based on the stored value of AUTOC,
@@ -358,27 +411,35 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_media_type_82599");
/* Detect if there is a copper PHY attached. */
- if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_aq) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
+ default:
+ break;
}
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
media_type = ixgbe_media_type_cx4;
break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -390,6 +451,7 @@ out:
/**
* ixgbe_start_mac_link_82599 - Setup MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -415,8 +477,8 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
- || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
links_reg = 0; /* Just in case Autoneg time = 0 */
for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
@@ -439,6 +501,67 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to TRUE to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with TRUE clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = FALSE;
+ }
+}
+
+/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -452,7 +575,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
- ixgbe_link_speed link_speed;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -470,16 +593,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
speed &= link_speed;
/*
- * When the driver changes the link speeds that it can support,
- * it sets autotry_restart to TRUE to indicate that we need to
- * initiate a new autotry session with the link partner. To do
- * so, we set the speed then disable and re-enable the tx laser, to
- * alert the link partner that it also needs to restart autotry on its
- * end. This is consistent with TRUE clause 37 autoneg, which also
- * involves a loss of signal.
- */
-
- /*
* Try each speed one by one, highest priority first. We do this in
* software because 10gb fiber doesn't support speed autonegotiation.
*/
@@ -498,30 +611,20 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
/* Allow module to change analog characteristics (1G->10G) */
msec_delay(40);
- status = ixgbe_setup_mac_link_82599(
- hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
- autoneg_wait_to_complete);
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
/* Flap the tx laser if it has not already been done */
- if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- usec_delay(100);
-
- /* Enable tx laser; allow 2ms to light up per spec */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- msec_delay(2);
-
- hw->mac.autotry_restart = FALSE;
- }
+ ixgbe_flap_tx_laser(hw);
/*
* Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -560,30 +663,20 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
/* Allow module to change analog characteristics (10G->1G) */
msec_delay(40);
- status = ixgbe_setup_mac_link_82599(
- hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
- autoneg_wait_to_complete);
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
/* Flap the tx laser if it has not already been done */
- if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- usec_delay(100);
-
- /* Enable tx laser; allow 2ms to light up per spec */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- msec_delay(2);
-
- hw->mac.autotry_restart = FALSE;
- }
+ ixgbe_flap_tx_laser(hw);
/* Wait for the link partner to also set speed */
msec_delay(100);
@@ -633,7 +726,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
s32 status = IXGBE_SUCCESS;
- ixgbe_link_speed link_speed;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
s32 i, j;
bool link_up = FALSE;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -726,6 +819,9 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
autoneg_wait_to_complete);
out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ DEBUGOUT("Smartspeed has downgraded the link speed "
+ "from the maximum advertised\n");
return status;
}
@@ -775,8 +871,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
orig_autoc = autoc;
if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
@@ -788,8 +884,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
/* Switch from 1G SFI to 10G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
(pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -797,7 +893,7 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -810,7 +906,6 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
-
/* Restart link */
autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
@@ -870,6 +965,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
return status;
}
+
/**
* ixgbe_reset_hw_82599 - Perform hardware reset
* @hw: pointer to hardware structure
@@ -979,7 +1075,7 @@ mac_reset_top:
}
}
- /* Store the permanent mac address */
+ /* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
/*
@@ -1000,7 +1096,7 @@ mac_reset_top:
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
- }
+ }
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
@@ -1147,10 +1243,8 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1197,7 +1291,6 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* must be reduced. The new value is the current size minus
* flow director memory usage size.
*/
-
pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
(IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
@@ -1243,10 +1336,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1279,13 +1370,13 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
return IXGBE_SUCCESS;
}
-
/**
* ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
-u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
+u16 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ u32 key)
{
/*
* The algorithm is as follows:
@@ -1305,102 +1396,188 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
* To simplify for programming, the algorithm is implemented
* in software this way:
*
- * Key[31:0], Stream[335:0]
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
*
- * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
- * int_key[350:0] = tmp_key[351:1]
- * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
- * hash[15:0] = 0;
- * for (i = 0; i < 351; i++) {
- * if (int_key[i])
- * hash ^= int_stream[(i + 15):i];
+ * hi_hash_dword[31:0] ^= Stream[351:320];
+ *
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
+ *
+ */
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword;
+ u16 hash_result = 0;
+ u8 i;
+
+ /*
+ * the hi_hash_dword starts with vlan_id, the lo_hash_dword starts
+ * and ends with it, the vlan at the end is added via the word swapped
+ * xor with the hi_hash_dword a few lines down.
*/
+ hi_hash_dword = IXGBE_NTOHL(atr_input->dword_stream[0]) & 0x0000FFFF;
+ lo_hash_dword = hi_hash_dword;
- union {
- u64 fill[6];
- u32 key[11];
- u8 key_stream[44];
- } tmp_key;
+ /* generate common hash dword */
+ for (i = 1; i < 11; i++)
+ common_hash_dword ^= (u32)atr_input->dword_stream[i];
+ hi_hash_dword ^= IXGBE_NTOHL(common_hash_dword);
- u8 *stream = (u8 *)atr_input;
- u8 int_key[44]; /* upper-most bit unused */
- u8 hash_str[46]; /* upper-most 2 bits unused */
- u16 hash_result = 0;
- int i, j, k, h;
+ /* low dword is word swapped version of common with vlan added */
+ lo_hash_dword ^= (hi_hash_dword >> 16) | (hi_hash_dword << 16);
- DEBUGFUNC("ixgbe_atr_compute_hash_82599");
+ /* hi dword is common dword with l4type and vm_pool shifted */
+ hi_hash_dword ^= IXGBE_NTOHL(atr_input->dword_stream[10]) << 16;
/*
- * Initialize the fill member to prevent warnings
- * on some compilers
+ * Process all 32 bits of the 2 keys 2 bits at a time
+ *
+ * Bit flip vlan from hash result if hash key has bit 0 set, the
+ * reason for doing this is because the hash generation shouldn't
+ * start until bit 1 in the stream so we need to cancel out a vlan
+ * if it was added starting at bit 0.
*/
- tmp_key.fill[0] = 0;
-
- /* First load the temporary key stream */
- for (i = 0; i < 6; i++) {
- u64 fillkey = ((u64)key << 32) | key;
- tmp_key.fill[i] = fillkey;
+ if (key & 0x0001) {
+ hash_result ^= IXGBE_NTOHL(atr_input->dword_stream[0]) &
+ 0x0FFFF;
+ hash_result ^= lo_hash_dword;
+ }
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
+
+ /* process the remaining bits in the key */
+ for (i = 1; i < 16; i++) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
}
- /*
- * Set the interim key for the hashing. Bit 352 is unused, so we must
- * shift and compensate when building the key.
- */
+ return hash_result;
+}
- int_key[0] = tmp_key.key_stream[0] >> 1;
- for (i = 1, j = 0; i < 44; i++) {
- unsigned int this_key = tmp_key.key_stream[j] << 7;
- j++;
- int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
- }
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) { \
+ if (n == 0) \
+ common_hash ^= \
+ IXGBE_NTOHL(atr_input->dword_stream[0]) & \
+ 0x0000FFFF; \
+ common_hash ^= lo_hash_dword >> n; \
+ } else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) { \
+ if (n == 0) \
+ bucket_hash ^= \
+ IXGBE_NTOHL(atr_input->dword_stream[0]) & \
+ 0x0000FFFF; \
+ bucket_hash ^= lo_hash_dword >> n; \
+ } else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) { \
+ if (n == 0) \
+ sig_hash ^= IXGBE_NTOHL(atr_input->dword_stream[0]) & \
+ 0x0000FFFF; \
+ sig_hash ^= lo_hash_dword >> n; \
+ } \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x010000 << n)) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x010000 << n)) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x010000 << n)) \
+ sig_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
+ *
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_input *atr_input)
+{
+ u32 hi_hash_dword, lo_hash_dword;
+ u16 sig_hash = 0, bucket_hash = 0, common_hash = 0;
/*
- * Set the interim bit string for the hashing. Bits 368 and 367 are
- * unused, so shift and compensate when building the string.
+ * the hi_hash_dword starts with vlan_id, the lo_hash_dword starts
+ * and ends with it, the vlan at the end is added via the word swapped
+ * xor with the hi_hash_dword a few lines down. The part masked off
+ * is the part of the hash reserved to 0.
*/
- hash_str[0] = (stream[40] & 0x7f) >> 1;
- for (i = 1, j = 40; i < 46; i++) {
- unsigned int this_str = stream[j] << 7;
- j++;
- if (j > 41)
- j = 0;
- hash_str[i] = (u8)(this_str | (stream[j] >> 1));
- }
+ hi_hash_dword = IXGBE_NTOHL(atr_input->dword_stream[0]) & 0x0000FFFF;
+ lo_hash_dword = hi_hash_dword;
+
+ /* generate common hash dword */
+ hi_hash_dword ^= IXGBE_NTOHL(atr_input->dword_stream[1] ^
+ atr_input->dword_stream[2] ^
+ atr_input->dword_stream[3] ^
+ atr_input->dword_stream[4] ^
+ atr_input->dword_stream[5] ^
+ atr_input->dword_stream[6] ^
+ atr_input->dword_stream[7] ^
+ atr_input->dword_stream[8] ^
+ atr_input->dword_stream[9] ^
+ atr_input->dword_stream[10]);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword ^= (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* hi dword is common dword with l4type and vm_pool added */
+ hi_hash_dword ^= IXGBE_NTOHL(atr_input->dword_stream[10]) << 16;
/*
- * Now compute the hash. i is the index into hash_str, j is into our
- * key stream, k is counting the number of bits, and h interates within
- * each byte.
+ * Process all 32 bits of the 2 keys 2 bits at a time
+ *
+ * Bit flip vlan from hash result if hash key has bit 0 set, the
+ * reason for doing this is because the hash generation shouldn't
+ * start until bit 1 in the stream so we need to cancel out a vlan
+ * if it was added starting at bit 0.
*/
- for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
- for (h = 0; h < 8 && k < 351; h++, k++) {
- if (int_key[j] & (1 << h)) {
- /*
- * Key bit is set, XOR in the current 16-bit
- * string. Example of processing:
- * h = 0,
- * tmp = (hash_str[i - 2] & 0 << 16) |
- * (hash_str[i - 1] & 0xff << 8) |
- * (hash_str[i] & 0xff >> 0)
- * So tmp = hash_str[15 + k:k], since the
- * i + 2 clause rolls off the 16-bit value
- * h = 7,
- * tmp = (hash_str[i - 2] & 0x7f << 9) |
- * (hash_str[i - 1] & 0xff << 1) |
- * (hash_str[i] & 0x80 >> 7)
- */
- int tmp = (hash_str[i] >> h);
- tmp |= (hash_str[i - 1] << (8 - h));
- tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
- << (16 - h);
- hash_result ^= (u16)tmp;
- }
- }
- }
-
- return hash_result;
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ sig_hash ^= common_hash;
+ bucket_hash ^= common_hash;
+
+ /* return completed signature hash */
+ return ((u32)sig_hash << 16) | (bucket_hash & IXGBE_ATR_HASH_MASK);
}
/**
@@ -1408,12 +1585,11 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
* @input: input stream to modify
* @vlan: the VLAN id to load
**/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
+s32 ixgbe_atr_set_vlan_id_82599(union ixgbe_atr_input *input, __be16 vlan)
{
DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
+ input->formatted.vlan_id = vlan;
return IXGBE_SUCCESS;
}
@@ -1423,16 +1599,11 @@ s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
* @input: input stream to modify
* @src_addr: the IP address to load
**/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
+s32 ixgbe_atr_set_src_ipv4_82599(union ixgbe_atr_input *input, __be32 src_addr)
{
DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
- (src_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
- (src_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
+ input->formatted.src_ip[0] = src_addr;
return IXGBE_SUCCESS;
}
@@ -1442,16 +1613,11 @@ s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
* @input: input stream to modify
* @dst_addr: the IP address to load
**/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
+s32 ixgbe_atr_set_dst_ipv4_82599(union ixgbe_atr_input *input, __be32 dst_addr)
{
DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
- (dst_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
- (dst_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
+ input->formatted.dst_ip[0] = dst_addr;
return IXGBE_SUCCESS;
}
@@ -1459,44 +1625,21 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
/**
* ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
* @input: input stream to modify
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
+ * @src_addr_0: the first 4 bytes of the IP address to load
+ * @src_addr_1: the second 4 bytes of the IP address to load
+ * @src_addr_2: the third 4 bytes of the IP address to load
+ * @src_addr_3: the fourth 4 bytes of the IP address to load
**/
-s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
+s32 ixgbe_atr_set_src_ipv6_82599(union ixgbe_atr_input *input,
+ __be32 src_addr_0, __be32 src_addr_1,
+ __be32 src_addr_2, __be32 src_addr_3)
{
DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
- (src_addr_4 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
- (src_addr_4 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
- (src_addr_3 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
- (src_addr_3 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
- (src_addr_2 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
- (src_addr_2 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
- (src_addr_1 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
- (src_addr_1 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
+ input->formatted.src_ip[0] = src_addr_0;
+ input->formatted.src_ip[1] = src_addr_1;
+ input->formatted.src_ip[2] = src_addr_2;
+ input->formatted.src_ip[3] = src_addr_3;
return IXGBE_SUCCESS;
}
@@ -1504,44 +1647,21 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
/**
* ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
* @input: input stream to modify
- * @dst_addr_1: the first 4 bytes of the IP address to load
- * @dst_addr_2: the second 4 bytes of the IP address to load
- * @dst_addr_3: the third 4 bytes of the IP address to load
- * @dst_addr_4: the fourth 4 bytes of the IP address to load
+ * @dst_addr_0: the first 4 bytes of the IP address to load
+ * @dst_addr_1: the second 4 bytes of the IP address to load
+ * @dst_addr_2: the third 4 bytes of the IP address to load
+ * @dst_addr_3: the fourth 4 bytes of the IP address to load
**/
-s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
+s32 ixgbe_atr_set_dst_ipv6_82599(union ixgbe_atr_input *input,
+ __be32 dst_addr_0, __be32 dst_addr_1,
+ __be32 dst_addr_2, __be32 dst_addr_3)
{
DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
- (dst_addr_4 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
- (dst_addr_4 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
- (dst_addr_3 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
- (dst_addr_3 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
- (dst_addr_2 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
- (dst_addr_2 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
- (dst_addr_1 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
- (dst_addr_1 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
+ input->formatted.dst_ip[0] = dst_addr_0;
+ input->formatted.dst_ip[1] = dst_addr_1;
+ input->formatted.dst_ip[2] = dst_addr_2;
+ input->formatted.dst_ip[3] = dst_addr_3;
return IXGBE_SUCCESS;
}
@@ -1551,12 +1671,11 @@ s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
* @input: input stream to modify
* @src_port: the source port to load
**/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
+s32 ixgbe_atr_set_src_port_82599(union ixgbe_atr_input *input, __be16 src_port)
{
DEBUGFUNC("ixgbe_atr_set_src_port_82599");
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
+ input->formatted.src_port = src_port;
return IXGBE_SUCCESS;
}
@@ -1566,12 +1685,11 @@ s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
* @input: input stream to modify
* @dst_port: the destination port to load
**/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
+s32 ixgbe_atr_set_dst_port_82599(union ixgbe_atr_input *input, __be16 dst_port)
{
DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
+ input->formatted.dst_port = dst_port;
return IXGBE_SUCCESS;
}
@@ -1581,12 +1699,11 @@ s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
* @input: input stream to modify
* @flex_bytes: the flexible bytes to load
**/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
+s32 ixgbe_atr_set_flex_byte_82599(union ixgbe_atr_input *input, __be16 flex_bytes)
{
DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
+ input->formatted.flex_bytes = flex_bytes;
return IXGBE_SUCCESS;
}
@@ -1596,11 +1713,11 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
* @input: input stream to modify
* @vm_pool: the Virtual Machine pool to load
**/
-s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
+s32 ixgbe_atr_set_vm_pool_82599(union ixgbe_atr_input *input, u8 vm_pool)
{
DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
- input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
+ input->formatted.vm_pool = vm_pool;
return IXGBE_SUCCESS;
}
@@ -1609,12 +1726,15 @@ s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool)
* ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
* @input: input stream to modify
* @l4type: the layer 4 type value to load
+ *
+ * This call is deprecated and should be replaced with a direct access to
+ * input->formatted.flow_type.
**/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
+s32 ixgbe_atr_set_l4type_82599(union ixgbe_atr_input *input, u8 l4type)
{
DEBUGFUNC("ixgbe_atr_set_l4type_82599");
- input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
+ input->formatted.flow_type = l4type;
return IXGBE_SUCCESS;
}
@@ -1624,12 +1744,11 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
* @input: input stream to search
* @vlan: the VLAN id to load
**/
-s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
+s32 ixgbe_atr_get_vlan_id_82599(union ixgbe_atr_input *input, __be16 *vlan)
{
DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
- *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
- *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
+ *vlan = input->formatted.vlan_id;
return IXGBE_SUCCESS;
}
@@ -1639,14 +1758,11 @@ s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
* @input: input stream to search
* @src_addr: the IP address to load
**/
-s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
+s32 ixgbe_atr_get_src_ipv4_82599(union ixgbe_atr_input *input, __be32 *src_addr)
{
DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
- *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
+ *src_addr = input->formatted.src_ip[0];
return IXGBE_SUCCESS;
}
@@ -1656,14 +1772,11 @@ s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr)
* @input: input stream to search
* @dst_addr: the IP address to load
**/
-s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
+s32 ixgbe_atr_get_dst_ipv4_82599(union ixgbe_atr_input *input, __be32 *dst_addr)
{
DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
- *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
+ *dst_addr = input->formatted.dst_ip[0];
return IXGBE_SUCCESS;
}
@@ -1671,36 +1784,21 @@ s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr)
/**
* ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
* @input: input stream to search
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
+ * @src_addr_0: the first 4 bytes of the IP address to load
+ * @src_addr_1: the second 4 bytes of the IP address to load
+ * @src_addr_2: the third 4 bytes of the IP address to load
+ * @src_addr_3: the fourth 4 bytes of the IP address to load
**/
-s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 *src_addr_1, u32 *src_addr_2,
- u32 *src_addr_3, u32 *src_addr_4)
+s32 ixgbe_atr_get_src_ipv6_82599(union ixgbe_atr_input *input,
+ __be32 *src_addr_0, __be32 *src_addr_1,
+ __be32 *src_addr_2, __be32 *src_addr_3)
{
DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
+ *src_addr_0 = input->formatted.src_ip[0];
+ *src_addr_1 = input->formatted.src_ip[1];
+ *src_addr_2 = input->formatted.src_ip[2];
+ *src_addr_3 = input->formatted.src_ip[3];
return IXGBE_SUCCESS;
}
@@ -1708,36 +1806,21 @@ s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
/**
* ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
* @input: input stream to search
- * @dst_addr_1: the first 4 bytes of the IP address to load
- * @dst_addr_2: the second 4 bytes of the IP address to load
- * @dst_addr_3: the third 4 bytes of the IP address to load
- * @dst_addr_4: the fourth 4 bytes of the IP address to load
+ * @dst_addr_0: the first 4 bytes of the IP address to load
+ * @dst_addr_1: the second 4 bytes of the IP address to load
+ * @dst_addr_2: the third 4 bytes of the IP address to load
+ * @dst_addr_3: the fourth 4 bytes of the IP address to load
**/
-s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 *dst_addr_1, u32 *dst_addr_2,
- u32 *dst_addr_3, u32 *dst_addr_4)
+s32 ixgbe_atr_get_dst_ipv6_82599(union ixgbe_atr_input *input,
+ __be32 *dst_addr_0, __be32 *dst_addr_1,
+ __be32 *dst_addr_2, __be32 *dst_addr_3)
{
DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
-
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
-
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
-
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
+ *dst_addr_0 = input->formatted.dst_ip[0];
+ *dst_addr_1 = input->formatted.dst_ip[1];
+ *dst_addr_2 = input->formatted.dst_ip[2];
+ *dst_addr_3 = input->formatted.dst_ip[3];
return IXGBE_SUCCESS;
}
@@ -1752,12 +1835,11 @@ s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
* endianness when retrieving the data. This can be confusing since the
* internal hash engine expects it to be big-endian.
**/
-s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
+s32 ixgbe_atr_get_src_port_82599(union ixgbe_atr_input *input, __be16 *src_port)
{
DEBUGFUNC("ixgbe_atr_get_src_port_82599");
- *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
- *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+ *src_port = input->formatted.src_port;
return IXGBE_SUCCESS;
}
@@ -1772,12 +1854,11 @@ s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port)
* endianness when retrieving the data. This can be confusing since the
* internal hash engine expects it to be big-endian.
**/
-s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
+s32 ixgbe_atr_get_dst_port_82599(union ixgbe_atr_input *input, __be16 *dst_port)
{
DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
- *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
- *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+ *dst_port = input->formatted.dst_port;
return IXGBE_SUCCESS;
}
@@ -1787,12 +1868,11 @@ s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port)
* @input: input stream to modify
* @flex_bytes: the flexible bytes to load
**/
-s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
+s32 ixgbe_atr_get_flex_byte_82599(union ixgbe_atr_input *input, __be16 *flex_bytes)
{
DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
- *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
- *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+ *flex_bytes = input->formatted.flex_bytes;
return IXGBE_SUCCESS;
}
@@ -1802,11 +1882,11 @@ s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte)
* @input: input stream to modify
* @vm_pool: the Virtual Machine pool to load
**/
-s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
+s32 ixgbe_atr_get_vm_pool_82599(union ixgbe_atr_input *input, u8 *vm_pool)
{
DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
- *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
+ *vm_pool = input->formatted.vm_pool;
return IXGBE_SUCCESS;
}
@@ -1815,12 +1895,15 @@ s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool)
* ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
* @input: input stream to modify
* @l4type: the layer 4 type value to load
+ *
+ * This call is deprecated and should be replaced with a direct access to
+ * input->formatted.flow_type.
**/
-s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
+s32 ixgbe_atr_get_l4type_82599(union ixgbe_atr_input *input, u8 *l4type)
{
DEBUGFUNC("ixgbe_atr_get_l4type__82599");
- *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+ *l4type = input->formatted.flow_type;
return IXGBE_SUCCESS;
}
@@ -1832,67 +1915,89 @@ s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type)
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
u8 queue)
{
u64 fdirhashcmd;
- u64 fdircmd;
- u32 fdirhash;
- u16 bucket_hash, sig_hash;
- u8 l4type;
+ u32 fdircmd;
DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- sig_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /* Get the l4type in order to program FDIRCMD properly */
- /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
- ixgbe_atr_get_l4type_82599(input, &l4type);
-
/*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
break;
default:
- DEBUGOUT(" Error on l4type input\n");
+ DEBUGOUT(" Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
- fdircmd |= IXGBE_FDIRCMD_IPV6;
-
- fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
- fdirhashcmd = ((fdircmd << 32) | fdirhash);
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= ((u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
- DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = ((u64)fdircmd << 32) |
+ ixgbe_atr_compute_sig_hash_82599(input);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+ DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+ u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= IXGBE_NTOHS(input_masks->src_port_mask);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+
+/**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
@@ -1904,84 +2009,41 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4 = 0, dst_ipv4 = 0;
- u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
- u16 src_port, dst_port, vlan_id, flex_bytes;
- u16 bucket_hash;
- u8 l4type;
- u8 fdirm = 0;
+ u32 fdirport, fdirtcpm;
+ u32 fdirvlan;
+ /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+ u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+ IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
- /* Get our input values */
- ixgbe_atr_get_l4type_82599(input, &l4type);
-
/*
- * Check l4type formatting, and bail out before we touch the hardware
+ * Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+ DEBUGOUT(" Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
break;
default:
- DEBUGOUT(" Error on l4type input\n");
+ DEBUGOUT(" Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
- ixgbe_atr_get_src_port_82599(input, &src_port);
- ixgbe_atr_get_dst_port_82599(input, &dst_port);
- ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
- fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- /* Now figure out if we're IPv4 or IPv6 */
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
- /* IPv6 */
- ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
- &src_ipv6_3, &src_ipv6_4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
- /* The last 4 bytes is the same register as IPv4 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
- fdircmd |= IXGBE_FDIRCMD_IPV6;
- fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
- } else {
- /* IPv4 */
- ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
- }
-
- ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
- (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
@@ -1991,75 +2053,95 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
- if (src_ipv4 == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
- if (dst_ipv4 == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- if (src_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- input_masks->src_port_mask);
-
- if (dst_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (0xffff << 16)));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (input_masks->dst_port_mask << 16)));
+ /* Program FDIRM */
+ switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) {
+ case 0xEFFF:
+ /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
+ case 0xE000:
+ /* Unmask VLAN prio - bit 1 */
+ fdirm &= ~IXGBE_FDIRM_VLANP;
break;
- case IXGBE_ATR_L4TYPE_UDP:
- if (src_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- input_masks->src_port_mask);
-
- if (dst_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (0xffff << 16)));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (input_masks->src_port_mask << 16)));
+ case 0x0FFF:
+ /* Unmask VLAN ID - bit 0 */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
break;
- default:
- /* this already would have failed above */
+ case 0x0000:
+ /* do nothing, vlans already masked */
break;
+ default:
+ DEBUGOUT(" Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
}
- /* Program the last mask register, FDIRM */
- if (input_masks->vlan_id_mask || !vlan_id)
- /* Mask both VLAN and VLANP - bits 0 and 1 */
- fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP);
-
- if (input_masks->data_mask || !flex_bytes)
- /* Flex bytes need masking, so mask the whole thing - bit 4 */
- fdirm |= IXGBE_FDIRM_FLEX;
+ if (input_masks->flex_mask & 0xFFFF) {
+ if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+ DEBUGOUT(" Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ /* Unmask Flex Bytes - bit 4 */
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ }
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
- fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6);
-
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_masks->src_ip_mask[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_masks->dst_ip_mask[0]);
+
+ /* Apply masks to input data */
+ input->formatted.vlan_id &= input_masks->vlan_id_mask;
+ input->formatted.flex_bytes &= input_masks->flex_mask;
+ input->formatted.src_port &= input_masks->src_port_mask;
+ input->formatted.dst_port &= input_masks->dst_port_mask;
+ input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+ input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan =
+ IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes));
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* configure FDIRHASH register */
+ fdirhash = ixgbe_atr_compute_sig_hash_82599(input);
+
+ /* we only want the bucket hash so drop the upper 16 bits */
+ fdirhash &= IXGBE_ATR_HASH_MASK;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
fdircmd |= IXGBE_FDIRCMD_LAST;
fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return IXGBE_SUCCESS;
@@ -2115,46 +2197,30 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
- * Starts the hardware using the generic start_hw function.
- * Then performs revision-specific operations:
- * Clears the rate limiter registers.
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
**/
s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
{
- u32 i;
- u32 regval;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
- /* Clear the rate limiters */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
- }
- IXGBE_WRITE_FLUSH(hw);
-
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = TRUE;
if (ret_val == IXGBE_SUCCESS)
ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
return ret_val;
}
@@ -2174,8 +2240,14 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
- if (status != IXGBE_SUCCESS)
- status = ixgbe_identify_sfp_module_generic(hw);
+ if (status != IXGBE_SUCCESS) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ goto out;
+ else
+ status = ixgbe_identify_sfp_module_generic(hw);
+ }
+
/* Set PHY type none if no PHY detected */
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.type = ixgbe_phy_none;
@@ -2186,6 +2258,7 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_sfp_unsupported)
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+out:
return status;
}
@@ -2205,14 +2278,16 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u16 ext_ability = 0;
u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
hw->phy.ops.identify(hw);
- if (hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_aq ||
- hw->phy.type == ixgbe_phy_cu_unknown) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
@@ -2222,6 +2297,8 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
goto out;
+ default:
+ break;
}
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -2288,11 +2365,15 @@ sfp_check:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
default:
break;
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index a65686e..4b632d2 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -37,6 +37,7 @@
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
/**
* ixgbe_init_shared_code - Initialize the shared code
@@ -68,6 +69,9 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_82599EB:
status = ixgbe_init_ops_82599(hw);
break;
+ case ixgbe_mac_82599_vf:
+ status = ixgbe_init_ops_vf(hw);
+ break;
default:
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
@@ -110,10 +114,15 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_XAUI_LOM:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
+ case IXGBE_DEV_ID_82599_VF:
+ hw->mac.type = ixgbe_mac_82599_vf;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
@@ -281,6 +290,20 @@ s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
}
/**
+ * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+ (hw, bs),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_get_bus_info - Set PCI bus info
* @hw: pointer to hardware structure
*
@@ -330,6 +353,32 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * ixgbe_read_pba_length - Reads part number string length from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number length from the EEPROM.
+ * Returns expected buffer size in pba_num_size.
+ **/
+s32 ixgbe_read_pba_length(struct ixgbe_hw *hw, u32 *pba_num_size)
+{
+ return ixgbe_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
* ixgbe_read_pba_num - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
@@ -352,9 +401,7 @@ s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
s32 status = IXGBE_SUCCESS;
if (hw->phy.type == ixgbe_phy_unknown) {
- status = ixgbe_call_func(hw,
- hw->phy.ops.identify,
- (hw),
+ status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
IXGBE_NOT_IMPLEMENTED);
}
@@ -490,6 +537,44 @@ s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/**
+ * ixgbe_disable_tx_laser - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_enable_tx_laser - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support then
+ * flap the tx laser to alert the link partner to start autotry
+ * process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
* ixgbe_setup_link - Set link speed
* @hw: pointer to hardware structure
* @speed: new link speed
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 48c523c..a27dfe6 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -52,6 +52,8 @@ u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+s32 ixgbe_read_pba_length(struct ixgbe_hw *hw, u32 *pba_num_size);
s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
@@ -68,6 +70,9 @@ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@@ -119,43 +124,43 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
u8 queue);
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *masks,
u16 soft_id,
u8 queue);
-u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan_id);
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr);
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr);
-s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, u32 src_addr_1,
- u32 src_addr_2, u32 src_addr_3,
- u32 src_addr_4);
-s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 dst_addr_1,
- u32 dst_addr_2, u32 dst_addr_3,
- u32 dst_addr_4);
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port);
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port);
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte);
-s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool);
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type);
-s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan_id);
-s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr);
-s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr);
-s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, u32 *src_addr_1,
- u32 *src_addr_2, u32 *src_addr_3,
- u32 *src_addr_4);
-s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, u32 *dst_addr_1,
- u32 *dst_addr_2, u32 *dst_addr_3,
- u32 *dst_addr_4);
-s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port);
-s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port);
-s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 *flex_byte);
-s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool);
-s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type);
+u16 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *input, u32 key);
+s32 ixgbe_atr_set_vlan_id_82599(union ixgbe_atr_input *input, __be16 vlan_id);
+s32 ixgbe_atr_set_src_ipv4_82599(union ixgbe_atr_input *input, __be32 src_addr);
+s32 ixgbe_atr_set_dst_ipv4_82599(union ixgbe_atr_input *input, __be32 dst_addr);
+s32 ixgbe_atr_set_src_ipv6_82599(union ixgbe_atr_input *input, __be32 src_addr_0,
+ __be32 src_addr_1, __be32 src_addr_2,
+ __be32 src_addr_3);
+s32 ixgbe_atr_set_dst_ipv6_82599(union ixgbe_atr_input *input, __be32 dst_addr_0,
+ __be32 dst_addr_1, __be32 dst_addr_2,
+ __be32 dst_addr_3);
+s32 ixgbe_atr_set_src_port_82599(union ixgbe_atr_input *input, __be16 src_port);
+s32 ixgbe_atr_set_dst_port_82599(union ixgbe_atr_input *input, __be16 dst_port);
+s32 ixgbe_atr_set_flex_byte_82599(union ixgbe_atr_input *input, __be16 flex_byte);
+s32 ixgbe_atr_set_vm_pool_82599(union ixgbe_atr_input *input, u8 vm_pool);
+s32 ixgbe_atr_set_l4type_82599(union ixgbe_atr_input *input, u8 l4type);
+s32 ixgbe_atr_get_vlan_id_82599(union ixgbe_atr_input *input, __be16 *vlan_id);
+s32 ixgbe_atr_get_src_ipv4_82599(union ixgbe_atr_input *input, __be32 *src_addr);
+s32 ixgbe_atr_get_dst_ipv4_82599(union ixgbe_atr_input *input, __be32 *dst_addr);
+s32 ixgbe_atr_get_src_ipv6_82599(union ixgbe_atr_input *input, __be32 *src_addr_0,
+ __be32 *src_addr_1, __be32 *src_addr_2,
+ __be32 *src_addr_3);
+s32 ixgbe_atr_get_dst_ipv6_82599(union ixgbe_atr_input *input, __be32 *dst_addr_0,
+ __be32 *dst_addr_1, __be32 *dst_addr_2,
+ __be32 *dst_addr_3);
+s32 ixgbe_atr_get_src_port_82599(union ixgbe_atr_input *input, __be16 *src_port);
+s32 ixgbe_atr_get_dst_port_82599(union ixgbe_atr_input *input, __be16 *dst_port);
+s32 ixgbe_atr_get_flex_byte_82599(union ixgbe_atr_input *input,
+ __be16 *flex_byte);
+s32 ixgbe_atr_get_vm_pool_82599(union ixgbe_atr_input *input, u8 *vm_pool);
+s32 ixgbe_atr_get_l4type_82599(union ixgbe_atr_input *input, u8 *l4type);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
@@ -168,6 +173,7 @@ s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
#endif /* _IXGBE_API_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 217c477..a5b972c 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -33,6 +33,7 @@
/*$FreeBSD$*/
#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
#include "ixgbe_api.h"
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
@@ -50,6 +51,13 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset);
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+
s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
/**
@@ -138,7 +146,6 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
u32 ctrl_ext;
- s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_start_hw_generic");
@@ -165,7 +172,45 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Clear adapter stopped flag */
hw->adapter_stopped = FALSE;
- return ret_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return IXGBE_SUCCESS;
}
/**
@@ -180,7 +225,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
**/
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_SUCCESS;
+ s32 status;
DEBUGFUNC("ixgbe_init_hw_generic");
@@ -279,15 +324,194 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_BPTC);
for (i = 0; i < 16; i++) {
IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- IXGBE_READ_REG(hw, IXGBE_QBRC(i));
IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
}
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("ixgbe_read_pba_string_generic");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return IXGBE_SUCCESS;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_length_generic - Reads part number length from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number length from the EEPROM.
+ * Returns expected buffer size in pba_num_size
+ **/
+s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size)
+{
+ s32 ret_val;
+ u16 data;
+ u16 pba_ptr;
+ u16 length;
+
+ DEBUGFUNC("ixgbe_read_pba_length_generic");
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ *pba_num_size = 11;
+ return IXGBE_SUCCESS;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /*
+ * Convert from length in u16 values to u8 chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((u32)length * 2) - 1;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_read_pba_num_generic - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
@@ -305,6 +529,9 @@ s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
if (ret_val) {
DEBUGOUT("NVM Read Error\n");
return ret_val;
+ } else if (data == IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM Not supported\n");
+ return IXGBE_NOT_IMPLEMENTED;
}
*pba_num = (u32)(data << 16);
@@ -739,6 +966,49 @@ out:
}
/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ u32 eewr;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_write_eewr_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+out:
+ return status;
+}
+
+/**
* ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
* @ee_reg: EEPROM flag for polling
@@ -810,15 +1080,15 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
- }
- /* Setup EEPROM for Read/Write */
- if (status == IXGBE_SUCCESS) {
- /* Clear CS and SK */
- eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
- usec_delay(1);
+ /* Setup EEPROM for Read/Write */
+ if (status == IXGBE_SUCCESS) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ }
}
return status;
}
@@ -838,6 +1108,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_eeprom_semaphore");
+
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -1306,36 +1577,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
DEBUGFUNC("ixgbe_set_rar_generic");
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
/* setup VMDq pool selection before this RAR gets enabled */
hw->mac.ops.set_vmdq(hw, index, vmdq);
- /* Make sure we are using a valid rar index range */
- if (index < rar_entries) {
- /*
- * HW expects these in little endian so we reverse the byte
- * order from network order (big endian) to little endian
- */
- rar_low = ((u32)addr[0] |
- ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) |
- ((u32)addr[3] << 24));
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
- if (enable_addr != 0)
- rar_high |= IXGBE_RAH_AV;
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
- } else {
- DEBUGOUT1("RAR index %d is out of range.\n", index);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return IXGBE_SUCCESS;
}
@@ -1355,21 +1627,22 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
DEBUGFUNC("ixgbe_clear_rar_generic");
/* Make sure we are using a valid rar index range */
- if (index < rar_entries) {
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
- } else {
+ if (index >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
}
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
@@ -1598,7 +1871,6 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
u32 vector;
u32 vector_bit;
u32 vector_reg;
- u32 mta_reg;
DEBUGFUNC("ixgbe_set_mta");
@@ -1618,9 +1890,7 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
- IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
}
/**
@@ -1650,18 +1920,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
hw->addr_ctrl.num_mc_addrs = mc_addr_count;
hw->addr_ctrl.mta_in_use = 0;
- /* Clear the MTA */
+ /* Clear mta_shadow */
DEBUGOUT(" Clearing MTA\n");
- for (i = 0; i < hw->mac.mcft_size; i++)
- IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /* Add the new addresses */
+ /* Update mta_shadow */
for (i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n");
ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
}
/* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1720,12 +1993,13 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 rx_pba_size;
+ u32 fcrtl, fcrth;
DEBUGFUNC("ixgbe_fc_enable_generic");
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val)
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
@@ -1747,7 +2021,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
- /* Flow control is disabled by software override or autoneg.
+ /*
+ * Flow control is disabled by software override or autoneg.
* The code below will actually disable it in the HW.
*/
break;
@@ -1786,39 +2061,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
- /* Thresholds are different for link flow control when in DCB mode */
- if (reg & IXGBE_MTQC_RT_ENA) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
- /* Always disable XON for LFC when in DCB mode */
- reg = (rx_pba_size >> 5) & 0xFFE0;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+ fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+ fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
- reg = (rx_pba_size >> 2) & 0xFFE0;
- if (hw->fc.current_mode & ixgbe_fc_tx_pause)
- reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
- } else {
- /* Set up and enable Rx high/low water mark thresholds,
- * enable XON. */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water |
- IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
- }
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth |= IXGBE_FCRTH_FCEN;
+ if (hw->fc.send_xon)
+ fcrtl |= IXGBE_FCRTL_XONE;
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
@@ -1842,14 +2099,15 @@ out:
**/
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_SUCCESS;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
- u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- u32 links2, anlp1_reg, autoc_reg, links;
bool link_up;
DEBUGFUNC("ixgbe_fc_autoneg");
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
/*
* AN should have completed when the cable was plugged in.
* Look for reasons to bail out. Bail out if:
@@ -1860,153 +2118,201 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
* So use link_up_wait_to_complete=FALSE.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
-
- if (hw->fc.disable_fc_autoneg || (!link_up)) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
+ if (!link_up) {
+ ret_val = IXGBE_ERR_FLOW_CONTROL;
goto out;
}
- /*
- * On backplane, bail out if
- * - backplane autoneg was not completed, or if
- * - we are 82599 and link partner is not AN enabled
- */
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- links = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
- if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
- }
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
}
+out:
+ if (ret_val == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = TRUE;
+ } else {
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ * @speed:
+ * @link_up
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ s32 ret_val;
+
/*
* On multispeed fiber at 1g, bail out if
* - link is up but AN did not complete, or if
* - link is up and AN completed but timed out
*/
- if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
- linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
}
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ u32 links2, anlp1_reg, autoc_reg, links;
+ s32 ret_val;
+
/*
- * Bail out on
- * - copper or CX4 adapters
- * - fiber adapters running at 10gig
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
*/
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.media_type == ixgbe_media_type_cx4) ||
- ((hw->phy.media_type == ixgbe_media_type_fiber) &&
- (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
hw->fc.fc_was_autonegged = FALSE;
hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+ }
/*
- * Read the AN advertisement and LP ability registers and resolve
+ * Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
*/
- if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
- (hw->phy.media_type != ixgbe_media_type_backplane)) {
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control=RX PAUSE frames only\n");
- }
- } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- }
- }
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg)))
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
- * Read the 10g AN autoc and LP ability registers and resolve
- * local flow control settings accordingly
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
- if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control=RX PAUSE frames only\n");
- }
- } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
- !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
} else {
- hw->fc.current_mode = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control=RX PAUSE frames only\n");
}
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
}
- /* Record that current_mode is the result of a successful autoneg */
- hw->fc.fc_was_autonegged = TRUE;
-
-out:
- return ret_val;
+ return IXGBE_SUCCESS;
}
/**
@@ -2018,11 +2324,11 @@ out:
s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = IXGBE_SUCCESS;
- u32 reg;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
DEBUGFUNC("ixgbe_setup_fc");
-
/* Validate the packetbuf configuration */
if (packetbuf_num < 0 || packetbuf_num > 7) {
DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
@@ -2059,11 +2365,26 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
hw->fc.requested_mode = ixgbe_fc_full;
/*
- * Set up the 1G flow control advertisement registers so the HW will be
- * able to do fc autoneg once the cable is plugged in. If we end up
- * using 10g instead, this is harmless.
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
*/
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+ break;
+
+ default:
+ ;
+ }
/*
* The possible values of fc.requested_mode are:
@@ -2079,6 +2400,11 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
case ixgbe_fc_none:
/* Flow control completely disabled by software override. */
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_rx_pause:
/*
@@ -2090,6 +2416,11 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
* disable the adapter's ability to send PAUSE frames.
*/
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_tx_pause:
/*
@@ -2098,10 +2429,22 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
*/
reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+ }
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
default:
DEBUGOUT("Flow control param set incorrectly\n");
@@ -2110,6 +2453,10 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
break;
}
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
@@ -2121,64 +2468,20 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
/*
- * Set up the 10G flow control advertisement registers so the HW
- * can do fc autoneg once the cable is plugged in. If we end up
- * using 1g instead, this is harmless.
- */
- reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
- /*
- * The possible values of fc.requested_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
*/
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= (IXGBE_AUTOC_ASM_PAUSE);
- reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
}
- /*
- * AUTOC restart handles negotiation of 1G and 10G. There is
- * no need to set the PCS1GCTL register.
- */
- reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
- DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+ DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
out:
return ret_val;
}
@@ -2371,7 +2674,6 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
if (!link_up) {
-
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
@@ -2632,37 +2934,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
DEBUGFUNC("ixgbe_clear_vmdq_generic");
- if (rar < rar_entries) {
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
- if (!mpsar_lo && !mpsar_hi)
- goto done;
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
done:
return IXGBE_SUCCESS;
}
@@ -2680,18 +2983,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
DEBUGFUNC("ixgbe_set_vmdq_generic");
- if (rar < rar_entries) {
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- } else {
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return IXGBE_SUCCESS;
}
@@ -2980,8 +3285,11 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_1G_82599)
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_100_82599)
*speed = IXGBE_LINK_SPEED_100_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
/* if link is down, zero out the current_mode */
if (*link_up == FALSE) {
@@ -3038,3 +3346,138 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
wwn_prefix_out:
return IXGBE_SUCCESS;
}
+
+/**
+ * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
+{
+ u16 offset, caps, flags;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
+
+ /* clear output first */
+ *bs = ixgbe_fcoe_bootstatus_unavailable;
+
+ /* check if FCOE IBA block is present */
+ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+ status = hw->eeprom.ops.read(hw, offset, &caps);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+ goto out;
+
+ /* check if iSCSI FCOE block is populated */
+ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if ((offset == 0) || (offset == 0xFFFF))
+ goto out;
+
+ /* read fcoe flags in iSCSI FCOE block */
+ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+ status = hw->eeprom.ops.read(hw, offset, &flags);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+ *bs = ixgbe_fcoe_bootstatus_enabled;
+ else
+ *bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+ DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return IXGBE_SUCCESS;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for anti-spoofing
+ * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+ int j;
+ int pf_target_reg = pf >> 3;
+ int pf_target_shift = pf % 8;
+ u32 pfvfspoof = 0;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (enable)
+ pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+ /*
+ * PFVFSPOOF register array is size 8 with 8 bits assigned to
+ * MAC anti-spoof enables in each register array element.
+ */
+ for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /* If not enabling anti-spoofing then done */
+ if (!enable)
+ return;
+
+ /*
+ * The PF should be allowed to spoof so that it can support
+ * emulation mode NICs. Reset the bit assigned to the PF
+ */
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
+ pfvfspoof ^= (1 << pf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index f0707e2..bd58b52 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -47,8 +47,12 @@ u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -60,6 +64,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -111,4 +116,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
#endif /* IXGBE_COMMON */
diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c
new file mode 100644
index 0000000..fbc951d
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,743 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2010, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_mbx");
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ * ixgbe_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbe_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = IXGBE_SUCCESS;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns TRUE if the PF has set the reset done bit or else FALSE
+ **/
+static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = IXGBE_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+
+ DEBUGFUNC("ixgbe_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_vf(hw, 0);
+ ixgbe_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_vf");
+ UNREFERENCED_PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_vf;
+ mbx->ops.write = ixgbe_write_mbx_vf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_msg_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_ack_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst_pf");
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = IXGBE_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_pf;
+ mbx->ops.write = ixgbe_write_mbx_pf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h
new file mode 100644
index 0000000..0ebecec
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,113 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2010, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is TRUE if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index 151e63f..03ea5b5 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2009, Intel Corporation
+ Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -86,7 +86,8 @@
#define UNREFERENCED_PARAMETER(_p)
-#define IXGBE_HTONL htonl
+#define IXGBE_NTOHL(_i) ntohl(_i)
+#define IXGBE_NTOHS(_i) ntohs(_i)
typedef uint8_t u8;
typedef int8_t s8;
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index 7ec2981..a2e3e38 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -78,7 +78,6 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
- phy->ops.set_low_power_state = &ixgbe_tn_set_low_power_state;
return IXGBE_SUCCESS;
}
@@ -110,9 +109,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&ext_ability);
if (ext_ability &
- IXGBE_MDIO_PHY_10GBASET_ABILITY ||
- ext_ability &
- IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
hw->phy.type =
ixgbe_phy_cu_unknown;
else
@@ -124,6 +122,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
break;
}
}
+ /* clear value if nothing found */
if (status != IXGBE_SUCCESS)
hw->phy.addr = 0;
} else {
@@ -234,6 +233,11 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
goto out;
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -460,10 +464,10 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
}
/**
- * ixgbe_setup_phy_link_generic - Set and restart autoneg
- * @hw: pointer to hardware structure
+ * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * @hw: pointer to hardware structure
*
- * Restart autonegotiation and PHY and waits for completion.
+ * Restart autonegotiation and PHY and waits for completion.
**/
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
@@ -774,7 +778,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
return status;
}
-
/**
* ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
* @hw: pointer to hardware structure
@@ -794,7 +797,6 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
return status;
}
-
/**
* ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
* @hw: pointer to hardware structure
@@ -938,20 +940,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
status = IXGBE_ERR_SFP_NOT_PRESENT;
goto out;
- }
+ }
- status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_IDENTIFIER,
&identifier);
- if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- if (hw->phy.type != ixgbe_phy_nl) {
- hw->phy.id = 0;
- hw->phy.type = ixgbe_phy_unknown;
- }
- goto out;
- }
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
hw->mac.ops.set_lan_id(hw);
@@ -960,12 +958,31 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
} else {
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
- &comp_codes_1g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
- &cable_tech);
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
/* ID Module
* =========
@@ -978,6 +995,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 6 SFP_SR/LR_CORE1 - 82599-specific
* 7 SFP_act_lmt_DA_CORE0 - 82599-specific
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -1001,25 +1020,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw, IXGBE_SFF_CABLE_SPEC_COMP,
&cable_spec);
if (cable_spec &
- IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_act_lmt_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_act_lmt_core1;
- } else
+ } else {
hw->phy.sfp_type =
- ixgbe_sfp_type_unknown;
+ ixgbe_sfp_type_unknown;
+ }
} else if (comp_codes_10g &
(IXGBE_SFF_10GBASESR_CAPABLE |
- IXGBE_SFF_10GBASELR_CAPABLE)) {
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1039,20 +1066,37 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Determine PHY vendor */
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
- hw->phy.ops.read_i2c_eeprom(hw,
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]);
- hw->phy.ops.read_i2c_eeprom(hw,
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- hw->phy.ops.read_i2c_eeprom(hw,
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
vendor_oui =
- ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
- (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
- (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
@@ -1092,8 +1136,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
- /* 1G SFP modules are not supported */
- if (comp_codes_10g == 0) {
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1106,7 +1152,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
ixgbe_get_device_caps(hw, &enforce_sfp);
- if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
@@ -1122,6 +1170,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
out:
return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
}
/**
@@ -1152,10 +1208,15 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
return IXGBE_ERR_SFP_NOT_SUPPORTED;
- /* Limiting active cables must be initialized as SR modules */
- if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0)
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
- else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1)
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
@@ -1259,7 +1320,6 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
-
do {
if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
@@ -1472,7 +1532,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
for (i = 7; i >= 0; i--) {
status = ixgbe_clock_in_i2c_bit(hw, &bit);
- *data |= bit<<i;
+ *data |= bit << i;
if (status != IXGBE_SUCCESS)
break;
@@ -1761,7 +1821,7 @@ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
}
/**
- * ixgbe_check_overtemp - Checks if an overtemp occured.
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
@@ -1787,28 +1847,3 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
out:
return status;
}
-
-
-/**
- * ixgbe_set_tn_low_power_state - Sets the teranetics phy into low power state
- * @hw: pointer to hardware structure
- *
- * Sets the phy into low power mode when LASI temp alarm status is triggered
- **/
-s32 ixgbe_tn_set_low_power_state(struct ixgbe_hw *hw)
-{
- s32 status = IXGBE_SUCCESS;
- u16 phy_data = 0;
-
- DEBUGFUNC("ixgbe_set_tn_low_power_state");
-
- /* Set the phy into low power mode */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_CONTROL_ADDR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
- phy_data |= IXGBE_MDIO_PHY_LOW_POWER_MODE;
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_CONTROL_ADDR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, phy_data);
-
- return status;
-}
-
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 8f49aa8..5c5dfa6 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -52,9 +52,10 @@
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
@@ -64,6 +65,10 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
/* Bit-shift macros */
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
@@ -90,7 +95,6 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
-
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@@ -126,7 +130,6 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_tn_set_low_power_state(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 2e1f062..3681001 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -54,14 +54,18 @@
#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
-#define IXGBE_DEV_ID_82599_KX4 0x10F7
-#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
-#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
-#define IXGBE_DEV_ID_82599_CX4 0x10F9
-#define IXGBE_DEV_ID_82599_SFP 0x10FB
-#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
-#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_VF 0x10ED
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -90,17 +94,17 @@
#define IXGBE_GRC 0x10200
/* General Receive Control */
-#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
#define IXGBE_VPDDIAG0 0x10204
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_OUT 0x00000008
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
@@ -109,19 +113,19 @@
#define IXGBE_EIMC 0x00888
#define IXGBE_EIAC 0x00810
#define IXGBE_EIAM 0x00890
-#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
/* 82599 EITR is only 12 bits, with the lower 3 always zero */
/*
* 82598 EITR is 16 bits but set the limits based on the max
* supported by all ixgbe hardware
*/
-#define IXGBE_MAX_INT_RATE 488281
-#define IXGBE_MIN_INT_RATE 956
-#define IXGBE_MAX_EITR 0x00000FF8
-#define IXGBE_MIN_EITR 8
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
(0x012300 + (((_i) - 24) * 4)))
#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
@@ -231,9 +235,11 @@
#define IXGBE_MTQC 0x08120
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
#define IXGBE_QDE 0x2F04
#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
@@ -288,13 +294,14 @@
#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
#define IXGBE_DTXCTL 0x07E00
-#define IXGBE_DMATXCTL 0x04A80
-#define IXGBE_PFDTXGSWC 0x08220
-#define IXGBE_DTXMXSZRQ 0x08100
-#define IXGBE_DTXTCPFLGL 0x04A88
-#define IXGBE_DTXTCPFLGH 0x04A8C
-#define IXGBE_LBDRPEN 0x0CA00
-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
@@ -302,6 +309,12 @@
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -652,6 +665,8 @@
#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -1019,6 +1034,8 @@
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
@@ -1399,6 +1416,9 @@
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1543,6 +1563,7 @@
#define IXGBE_ANLP1_PAUSE 0x0C00
#define IXGBE_ANLP1_SYM_PAUSE 0x0400
#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1571,6 +1592,7 @@
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1580,7 +1602,11 @@
#define IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT 6
#define IXGBE_EEPROM_OPCODE_BITS 8
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
@@ -1660,15 +1686,20 @@
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
-#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
/* PCI Bus Info */
#define IXGBE_PCI_DEVICE_STATUS 0xAA
@@ -2061,6 +2092,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_LAST 0x00000800
#define IXGBE_FDIRCMD_COLLISION 0x00001000
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2238,34 +2270,41 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+/* Flow Control Macros */
+#define PAUSE_RTT 8
+#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+ PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
-
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET 0
-#define IXGBE_ATR_SRC_IPV6_OFFSET 2
-#define IXGBE_ATR_SRC_IPV4_OFFSET 14
-#define IXGBE_ATR_DST_IPV6_OFFSET 18
-#define IXGBE_ATR_DST_IPV4_OFFSET 30
-#define IXGBE_ATR_SRC_PORT_OFFSET 34
-#define IXGBE_ATR_DST_PORT_OFFSET 36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET 40
-#define IXGBE_ATR_L4TYPE_OFFSET 41
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define IXGBE_ATR_L4TYPE_UDP 0x1
#define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
/* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
+union ixgbe_atr_input {
/* Byte layout in order, all values with MSB first:
*
+ * rsvd0 - 2 bytes - space reserved must be 0.
* vlan_id - 2 bytes
* src_ip - 16 bytes
* dst_ip - 16 bytes
@@ -2273,18 +2312,41 @@ struct ixgbe_atr_input {
* dst_port - 2 bytes
* flex_bytes - 2 bytes
* vm_pool - 1 byte
- * l4type - 1 byte
+ * flow_type - 1 byte
*/
- u8 byte_stream[42];
+ struct {
+ __be16 rsvd0;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ u8 vm_pool;
+ u8 flow_type;
+ } formatted;
+ __be32 dword_stream[11];
};
struct ixgbe_atr_input_masks {
- u32 src_ip_mask;
- u32 dst_ip_mask;
- u16 src_port_mask;
- u16 dst_port_mask;
- u16 vlan_id_mask;
- u16 data_mask;
+ __be16 rsvd0;
+ __be16 vlan_id_mask;
+ __be32 dst_ip_mask[4];
+ __be32 src_ip_mask[4];
+ __be16 src_port_mask;
+ __be16 dst_port_mask;
+ __be16 flex_mask;
+};
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+ ixgbe_fcoe_bootstatus_disabled = 0,
+ ixgbe_fcoe_bootstatus_enabled = 1,
+ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
};
enum ixgbe_eeprom_type {
@@ -2298,6 +2360,7 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
+ ixgbe_mac_82599_vf,
ixgbe_num_macs
};
@@ -2345,6 +2408,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_srlr_core1 = 6,
ixgbe_sfp_type_da_act_lmt_core0 = 7,
ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2491,8 +2556,6 @@ struct ixgbe_hw_stats {
u64 mptc;
u64 bptc;
u64 xec;
- u64 rqsmr[16];
- u64 tqsmr[8];
u64 qprc[16];
u64 qptc[16];
u64 qbrc[16];
@@ -2544,6 +2607,7 @@ struct ixgbe_mac_operations {
s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
@@ -2555,6 +2619,9 @@ struct ixgbe_mac_operations {
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
/* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2582,6 +2649,8 @@ struct ixgbe_mac_operations {
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
s32 (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *, s32);
@@ -2605,7 +2674,6 @@ struct ixgbe_phy_operations {
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
void (*i2c_bus_clear)(struct ixgbe_hw *);
s32 (*check_overtemp)(struct ixgbe_hw *);
- s32 (*set_low_power_state)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@@ -2627,11 +2695,14 @@ struct ixgbe_mac_info {
u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
u16 wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ u32 mta_shadow[IXGBE_MAX_MTA];
s32 mc_filter_type;
u32 mcft_size;
u32 vft_size;
u32 num_rar_entries;
u32 rar_highwater;
+ u32 rx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
@@ -2657,6 +2728,38 @@ struct ixgbe_phy_info {
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
+ bool reset_if_overtemp;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
};
struct ixgbe_hw {
@@ -2668,6 +2771,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -2708,6 +2812,12 @@ struct ixgbe_hw {
#define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_ERR_NO_SPACE -25
#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_FLOW_CONTROL -29
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
new file mode 100644
index 0000000..2717899
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -0,0 +1,495 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2010, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixgbe_api.h"
+#include "ixgbe_type.h"
+#include "ixgbe_vf.h"
+
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_hw_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+ * ixgbe_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* MAC */
+ hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+ hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+ hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+ /* Cannot clear stats on VF */
+ hw->mac.ops.clear_hw_cntrs = NULL;
+ hw->mac.ops.get_media_type = NULL;
+ hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+ hw->mac.ops.stop_adapter = ixgbe_stop_hw_vf;
+ hw->mac.ops.get_bus_info = NULL;
+
+ /* Link */
+ hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+ hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+ hw->mac.ops.get_link_capabilities = NULL;
+
+ /* RAR, Multicast, VLAN */
+ hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+ hw->mac.ops.init_rx_addrs = NULL;
+ hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+ hw->mac.ops.enable_mc = NULL;
+ hw->mac.ops.disable_mc = NULL;
+ hw->mac.ops.clear_vfta = NULL;
+ hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+
+ hw->mac.max_tx_queues = 1;
+ hw->mac.max_rx_queues = 1;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = FALSE;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+ ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL);
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, (ctrl | IXGBE_CTRL_RST));
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ usec_delay(5);
+ }
+
+ if (timeout) {
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf,
+ IXGBE_VF_PERMADDR_MSG_LEN, 0);
+ if (!ret_val) {
+ if (msgbuf[0] == (IXGBE_VF_RESET |
+ IXGBE_VT_MSGTYPE_ACK)) {
+ memcpy(hw->mac.perm_addr, addr,
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type =
+ msgbuf[IXGBE_VF_MC_TYPE_WORD];
+ } else {
+ ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = TRUE;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbe_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ **/
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+ UNREFERENCED_PARAMETER(vmdq);
+ UNREFERENCED_PARAMETER(enable_addr);
+ UNREFERENCED_PARAMETER(index);
+
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ DEBUGOUT1("Hash value = 0x%03X\n", vector);
+ vector_list[i] = (u16)vector;
+ }
+
+ return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ * ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if TRUE then set bit, else clear bit
+ **/
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+ UNREFERENCED_PARAMETER(vind);
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return(mbx->ops.write_posted(hw, msgbuf, 2, 0));
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+ return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+ return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ * ixgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+ mac_addr[i] = hw->mac.perm_addr[i];
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: TRUE if autonegotiation enabled
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_PARAMETER(hw);
+ UNREFERENCED_PARAMETER(speed);
+ UNREFERENCED_PARAMETER(autoneg);
+ UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: TRUE is link is up, FALSE otherwise
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+ UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+
+ if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
+ *link_up = FALSE;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = TRUE;
+ else
+ *link_up = FALSE;
+
+ if ((links_reg & IXGBE_LINKS_SPEED_10G_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return IXGBE_SUCCESS;
+}
+
diff --git a/sys/dev/ixgbe/ixgbe_vf.h b/sys/dev/ixgbe/ixgbe_vf.h
new file mode 100644
index 0000000..d0c4b34
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_vf.h
@@ -0,0 +1,113 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2010, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+/* define IXGBE_VFPBACL still says TBD in EAS */
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c
new file mode 100644
index 0000000..6f3ad16
--- /dev/null
+++ b/sys/dev/ixgbe/ixv.c
@@ -0,0 +1,3950 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2010, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include "ixv.h"
+
+/*********************************************************************
+ * Driver version
+ *********************************************************************/
+char ixv_driver_version[] = "1.0.0";
+
+/*********************************************************************
+ * PCI Device ID Table
+ *
+ * Used by probe to select devices to load on
+ * Last field stores an index into ixv_strings
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixv_vendor_info_t ixv_vendor_info_array[] =
+{
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
+ /* required last entry */
+ {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ * Table of branding strings
+ *********************************************************************/
+
+static char *ixv_strings[] = {
+ "Intel(R) PRO/10GbE Virtual Function Network Driver"
+};
+
+/*********************************************************************
+ * Function prototypes
+ *********************************************************************/
+static int ixv_probe(device_t);
+static int ixv_attach(device_t);
+static int ixv_detach(device_t);
+static int ixv_shutdown(device_t);
+#if __FreeBSD_version < 800000
+static void ixv_start(struct ifnet *);
+static void ixv_start_locked(struct tx_ring *, struct ifnet *);
+#else
+static int ixv_mq_start(struct ifnet *, struct mbuf *);
+static int ixv_mq_start_locked(struct ifnet *,
+ struct tx_ring *, struct mbuf *);
+static void ixv_qflush(struct ifnet *);
+#endif
+static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
+static void ixv_init(void *);
+static void ixv_init_locked(struct adapter *);
+static void ixv_stop(void *);
+static void ixv_media_status(struct ifnet *, struct ifmediareq *);
+static int ixv_media_change(struct ifnet *);
+static void ixv_identify_hardware(struct adapter *);
+static int ixv_allocate_pci_resources(struct adapter *);
+static int ixv_allocate_msix(struct adapter *);
+static int ixv_allocate_queues(struct adapter *);
+static int ixv_setup_msix(struct adapter *);
+static void ixv_free_pci_resources(struct adapter *);
+static void ixv_local_timer(void *);
+static void ixv_setup_interface(device_t, struct adapter *);
+static void ixv_config_link(struct adapter *);
+
+static int ixv_allocate_transmit_buffers(struct tx_ring *);
+static int ixv_setup_transmit_structures(struct adapter *);
+static void ixv_setup_transmit_ring(struct tx_ring *);
+static void ixv_initialize_transmit_units(struct adapter *);
+static void ixv_free_transmit_structures(struct adapter *);
+static void ixv_free_transmit_buffers(struct tx_ring *);
+
+static int ixv_allocate_receive_buffers(struct rx_ring *);
+static int ixv_setup_receive_structures(struct adapter *);
+static int ixv_setup_receive_ring(struct rx_ring *);
+static void ixv_initialize_receive_units(struct adapter *);
+static void ixv_free_receive_structures(struct adapter *);
+static void ixv_free_receive_buffers(struct rx_ring *);
+
+static void ixv_enable_intr(struct adapter *);
+static void ixv_disable_intr(struct adapter *);
+static bool ixv_txeof(struct tx_ring *);
+static bool ixv_rxeof(struct ix_queue *, int);
+static void ixv_rx_checksum(u32, struct mbuf *, u32);
+static void ixv_set_multi(struct adapter *);
+static void ixv_update_link_status(struct adapter *);
+static void ixv_refresh_mbufs(struct rx_ring *, int);
+static int ixv_xmit(struct tx_ring *, struct mbuf **);
+static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
+static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
+static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixv_dma_malloc(struct adapter *, bus_size_t,
+ struct ixv_dma_alloc *, int);
+static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
+static void ixv_add_rx_process_limit(struct adapter *, const char *,
+ const char *, int *, int);
+static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static void ixv_set_ivar(struct adapter *, u8, u8, s8);
+static void ixv_configure_ivars(struct adapter *);
+static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+
+static void ixv_setup_vlan_support(struct adapter *);
+static void ixv_register_vlan(void *, struct ifnet *, u16);
+static void ixv_unregister_vlan(void *, struct ifnet *, u16);
+
+static void ixv_save_stats(struct adapter *);
+static void ixv_init_stats(struct adapter *);
+static void ixv_update_stats(struct adapter *);
+
+static __inline void ixv_rx_discard(struct rx_ring *, int);
+static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
+ struct mbuf *, u32);
+
+/* The MSI/X Interrupt handlers */
+static void ixv_msix_que(void *);
+static void ixv_msix_mbx(void *);
+
+/* Deferred interrupt tasklets */
+static void ixv_handle_que(void *, int);
+static void ixv_handle_mbx(void *, int);
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixv_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ixv_probe),
+ DEVMETHOD(device_attach, ixv_attach),
+ DEVMETHOD(device_detach, ixv_detach),
+ DEVMETHOD(device_shutdown, ixv_shutdown),
+ {0, 0}
+};
+
+static driver_t ixv_driver = {
+ "ix", ixv_methods, sizeof(struct adapter),
+};
+
+extern devclass_t ixgbe_devclass;
+DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
+MODULE_DEPEND(ixv, pci, 1, 1, 1);
+MODULE_DEPEND(ixv, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int ixv_enable_aim = FALSE;
+TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
+
+/* How many packets rxeof tries to clean at a time */
+static int ixv_rx_process_limit = 128;
+TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
+
+/* Flow control setting, default to full */
+static int ixv_flow_control = ixgbe_fc_full;
+TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
+
+/*
+ * Header split: this causes the hardware to DMA
+ * the header into a seperate mbuf from the payload,
+ * it can be a performance win in some workloads, but
+ * in others it actually hurts, its off by default.
+ */
+static bool ixv_header_split = FALSE;
+TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
+
+/*
+** Number of TX descriptors per ring,
+** setting higher than RX as this seems
+** the better performing choice.
+*/
+static int ixv_txd = DEFAULT_TXD;
+TUNABLE_INT("hw.ixv.txd", &ixv_txd);
+
+/* Number of RX descriptors per ring */
+static int ixv_rxd = DEFAULT_RXD;
+TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
+
+/*
+** Shadow VFTA table, this is needed because
+** the real filter table gets cleared during
+** a soft reset and we need to repopulate it.
+*/
+static u32 ixv_shadow_vfta[VFTA_SIZE];
+
+/*********************************************************************
+ * Device identification routine
+ *
+ * ixv_probe determines if the driver should be loaded on
+ * adapter based on PCI vendor/device id of the adapter.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_probe(device_t dev)
+{
+ ixv_vendor_info_t *ent;
+
+ u16 pci_vendor_id = 0;
+ u16 pci_device_id = 0;
+ u16 pci_subvendor_id = 0;
+ u16 pci_subdevice_id = 0;
+ char adapter_name[256];
+
+
+ pci_vendor_id = pci_get_vendor(dev);
+ if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
+ return (ENXIO);
+
+ pci_device_id = pci_get_device(dev);
+ pci_subvendor_id = pci_get_subvendor(dev);
+ pci_subdevice_id = pci_get_subdevice(dev);
+
+ ent = ixv_vendor_info_array;
+ while (ent->vendor_id != 0) {
+ if ((pci_vendor_id == ent->vendor_id) &&
+ (pci_device_id == ent->device_id) &&
+
+ ((pci_subvendor_id == ent->subvendor_id) ||
+ (ent->subvendor_id == 0)) &&
+
+ ((pci_subdevice_id == ent->subdevice_id) ||
+ (ent->subdevice_id == 0))) {
+ sprintf(adapter_name, "%s, Version - %s",
+ ixv_strings[ent->index],
+ ixv_driver_version);
+ device_set_desc_copy(dev, adapter_name);
+ return (0);
+ }
+ ent++;
+ }
+ return (ENXIO);
+}
+
+/*********************************************************************
+ * Device initialization routine
+ *
+ * The attach entry point is called when the driver is being loaded.
+ * This routine identifies the type of hardware, allocates all resources
+ * and initializes the hardware.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_attach(device_t dev)
+{
+ struct adapter *adapter;
+ struct ixgbe_hw *hw;
+ int error = 0;
+
+ INIT_DEBUGOUT("ixv_attach: begin");
+
+ /* Allocate, clear, and link in our adapter structure */
+ adapter = device_get_softc(dev);
+ adapter->dev = adapter->osdep.dev = dev;
+ hw = &adapter->hw;
+
+ /* Core Lock Init*/
+ IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+
+ /* SYSCTL APIs */
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
+ adapter, 0, ixv_sysctl_stats, "I", "Statistics");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
+ adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
+ adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+ &ixv_enable_aim, 1, "Interrupt Moderation");
+
+ /* Set up the timer callout */
+ callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
+
+ /* Determine hardware revision */
+ ixv_identify_hardware(adapter);
+
+ /* Do base PCI setup - map BAR0 */
+ if (ixv_allocate_pci_resources(adapter)) {
+ device_printf(dev, "Allocation of PCI resources failed\n");
+ error = ENXIO;
+ goto err_out;
+ }
+
+ /* Do descriptor calc and sanity checks */
+ if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
+ ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
+ device_printf(dev, "TXD config issue, using default!\n");
+ adapter->num_tx_desc = DEFAULT_TXD;
+ } else
+ adapter->num_tx_desc = ixv_txd;
+
+ if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
+ ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
+ device_printf(dev, "RXD config issue, using default!\n");
+ adapter->num_rx_desc = DEFAULT_RXD;
+ } else
+ adapter->num_rx_desc = ixv_rxd;
+
+ /* Allocate our TX/RX Queues */
+ if (ixv_allocate_queues(adapter)) {
+ error = ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ ** Initialize the shared code: its
+ ** at this point the mac type is set.
+ */
+ error = ixgbe_init_shared_code(hw);
+ if (error) {
+ device_printf(dev,"Shared Code Initialization Failure\n");
+ error = EIO;
+ goto err_late;
+ }
+
+ /* Setup the mailbox */
+ ixgbe_init_mbx_params_vf(hw);
+
+ ixgbe_reset_hw(hw);
+
+ /* Get Hardware Flow Control setting */
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.pause_time = IXV_FC_PAUSE;
+ hw->fc.low_water = IXV_FC_LO;
+ hw->fc.high_water = IXV_FC_HI;
+ hw->fc.send_xon = TRUE;
+
+ error = ixgbe_init_hw(hw);
+ if (error) {
+ device_printf(dev,"Hardware Initialization Failure\n");
+ error = EIO;
+ goto err_late;
+ }
+
+ error = ixv_allocate_msix(adapter);
+ if (error)
+ goto err_late;
+
+ /* Setup OS specific network interface */
+ ixv_setup_interface(dev, adapter);
+
+ /* Sysctl for limiting the amount of work done in the taskqueue */
+ ixv_add_rx_process_limit(adapter, "rx_processing_limit",
+ "max number of rx packets to process", &adapter->rx_process_limit,
+ ixv_rx_process_limit);
+
+ /* Do the stats setup */
+ ixv_save_stats(adapter);
+ ixv_init_stats(adapter);
+
+ /* Register for VLAN events */
+ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+ adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+ INIT_DEBUGOUT("ixv_attach: end");
+ return (0);
+
+err_late:
+ ixv_free_transmit_structures(adapter);
+ ixv_free_receive_structures(adapter);
+err_out:
+ ixv_free_pci_resources(adapter);
+ return (error);
+
+}
+
+/*********************************************************************
+ * Device removal routine
+ *
+ * The detach entry point is called when the driver is being removed.
+ * This routine stops the adapter and deallocates all the resources
+ * that were allocated for driver operation.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_detach(device_t dev)
+{
+ struct adapter *adapter = device_get_softc(dev);
+ struct ix_queue *que = adapter->queues;
+
+ INIT_DEBUGOUT("ixv_detach: begin");
+
+ /* Make sure VLANS are not using driver */
+ if (adapter->ifp->if_vlantrunk != NULL) {
+ device_printf(dev,"Vlan in use, detach first\n");
+ return (EBUSY);
+ }
+
+ IXV_CORE_LOCK(adapter);
+ ixv_stop(adapter);
+ IXV_CORE_UNLOCK(adapter);
+
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ if (que->tq) {
+ taskqueue_drain(que->tq, &que->que_task);
+ taskqueue_free(que->tq);
+ }
+ }
+
+ /* Drain the Link queue */
+ if (adapter->tq) {
+ taskqueue_drain(adapter->tq, &adapter->mbx_task);
+ taskqueue_free(adapter->tq);
+ }
+
+ /* Unregister VLAN events */
+ if (adapter->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+ if (adapter->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+
+ ether_ifdetach(adapter->ifp);
+ callout_drain(&adapter->timer);
+ ixv_free_pci_resources(adapter);
+ bus_generic_detach(dev);
+ if_free(adapter->ifp);
+
+ ixv_free_transmit_structures(adapter);
+ ixv_free_receive_structures(adapter);
+
+ IXV_CORE_LOCK_DESTROY(adapter);
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Shutdown entry point
+ *
+ **********************************************************************/
+static int
+ixv_shutdown(device_t dev)
+{
+ struct adapter *adapter = device_get_softc(dev);
+ IXV_CORE_LOCK(adapter);
+ ixv_stop(adapter);
+ IXV_CORE_UNLOCK(adapter);
+ return (0);
+}
+
+#if __FreeBSD_version < 800000
+/*********************************************************************
+ * Transmit entry point
+ *
+ * ixv_start is called by the stack to initiate a transmit.
+ * The driver will remain in this routine as long as there are
+ * packets to transmit and transmit resources are available.
+ * In case resources are not available stack is notified and
+ * the packet is requeued.
+ **********************************************************************/
+static void
+ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
+{
+ struct mbuf *m_head;
+ struct adapter *adapter = txr->adapter;
+
+ IXV_TX_LOCK_ASSERT(txr);
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return;
+ if (!adapter->link_active)
+ return;
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+
+ if (ixv_xmit(txr, &m_head)) {
+ if (m_head == NULL)
+ break;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ break;
+ }
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m_head);
+
+ /* Set watchdog on */
+ txr->watchdog_check = TRUE;
+ txr->watchdog_time = ticks;
+
+ }
+ return;
+}
+
+/*
+ * Legacy TX start - called by the stack, this
+ * always uses the first tx ring, and should
+ * not be used with multiqueue tx enabled.
+ */
+static void
+ixv_start(struct ifnet *ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXV_TX_LOCK(txr);
+ ixv_start_locked(txr, ifp);
+ IXV_TX_UNLOCK(txr);
+ }
+ return;
+}
+
+#else
+
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct ix_queue *que;
+ struct tx_ring *txr;
+ int i = 0, err = 0;
+
+ /* Which queue to use */
+ if ((m->m_flags & M_FLOWID) != 0)
+ i = m->m_pkthdr.flowid % adapter->num_queues;
+
+ txr = &adapter->tx_rings[i];
+ que = &adapter->queues[i];
+
+ if (IXV_TX_TRYLOCK(txr)) {
+ err = ixv_mq_start_locked(ifp, txr, m);
+ IXV_TX_UNLOCK(txr);
+ } else {
+ err = drbr_enqueue(ifp, txr->br, m);
+ taskqueue_enqueue(que->tq, &que->que_task);
+ }
+
+ return (err);
+}
+
+static int
+ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+ struct adapter *adapter = txr->adapter;
+ struct mbuf *next;
+ int enqueued, err = 0;
+
+ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING || adapter->link_active == 0) {
+ if (m != NULL)
+ err = drbr_enqueue(ifp, txr->br, m);
+ return (err);
+ }
+
+ /* Do a clean if descriptors are low */
+ if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
+ ixv_txeof(txr);
+
+ enqueued = 0;
+ if (m == NULL) {
+ next = drbr_dequeue(ifp, txr->br);
+ } else if (drbr_needs_enqueue(ifp, txr->br)) {
+ if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
+ next = drbr_dequeue(ifp, txr->br);
+ } else
+ next = m;
+
+ /* Process the queue */
+ while (next != NULL) {
+ if ((err = ixv_xmit(txr, &next)) != 0) {
+ if (next != NULL)
+ err = drbr_enqueue(ifp, txr->br, next);
+ break;
+ }
+ enqueued++;
+ drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, next);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
+ next = drbr_dequeue(ifp, txr->br);
+ }
+
+ if (enqueued > 0) {
+ /* Set watchdog on */
+ txr->watchdog_check = TRUE;
+ txr->watchdog_time = ticks;
+ }
+
+ return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+ixv_qflush(struct ifnet *ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct mbuf *m;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IXV_TX_LOCK(txr);
+ while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+ m_freem(m);
+ IXV_TX_UNLOCK(txr);
+ }
+ if_qflush(ifp);
+}
+
+#endif
+
+/*********************************************************************
+ * Ioctl entry point
+ *
+ * ixv_ioctl is called when the user wants to configure the
+ * interface.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error = 0;
+
+ switch (command) {
+
+ case SIOCSIFMTU:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+ error = EINVAL;
+ } else {
+ IXV_CORE_LOCK(adapter);
+ ifp->if_mtu = ifr->ifr_mtu;
+ adapter->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ixv_init_locked(adapter);
+ IXV_CORE_UNLOCK(adapter);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+ IXV_CORE_LOCK(adapter);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ ixv_init_locked(adapter);
+ } else
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixv_stop(adapter);
+ adapter->if_flags = ifp->if_flags;
+ IXV_CORE_UNLOCK(adapter);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXV_CORE_LOCK(adapter);
+ ixv_disable_intr(adapter);
+ ixv_set_multi(adapter);
+ ixv_enable_intr(adapter);
+ IXV_CORE_UNLOCK(adapter);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+ break;
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+ if (mask & IFCAP_HWCSUM)
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (mask & IFCAP_TSO4)
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXV_CORE_LOCK(adapter);
+ ixv_init_locked(adapter);
+ IXV_CORE_UNLOCK(adapter);
+ }
+ VLAN_CAPABILITIES(ifp);
+ break;
+ }
+
+ default:
+ IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+/*********************************************************************
+ * Init entry point
+ *
+ * This routine is used in two ways. It is used by the stack as
+ * init entry point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get to a
+ * consistent state.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+static void
+ixv_init_locked(struct adapter *adapter)
+{
+ struct ifnet *ifp = adapter->ifp;
+ device_t dev = adapter->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mhadd, gpie;
+
+ INIT_DEBUGOUT("ixv_init: begin");
+ mtx_assert(&adapter->core_mtx, MA_OWNED);
+ hw->adapter_stopped = FALSE;
+ ixgbe_stop_adapter(hw);
+ callout_stop(&adapter->timer);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* Get the latest mac address, User can use a LAA */
+ bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Prepare transmit descriptors and buffers */
+ if (ixv_setup_transmit_structures(adapter)) {
+ device_printf(dev,"Could not setup transmit structures\n");
+ ixv_stop(adapter);
+ return;
+ }
+
+ ixgbe_reset_hw(hw);
+ ixv_initialize_transmit_units(adapter);
+
+ /* Setup Multicast table */
+ ixv_set_multi(adapter);
+
+ /*
+ ** Determine the correct mbuf pool
+ ** for doing jumbo/headersplit
+ */
+ if (ifp->if_mtu > ETHERMTU)
+ adapter->rx_mbuf_sz = MJUMPAGESIZE;
+ else
+ adapter->rx_mbuf_sz = MCLBYTES;
+
+ /* Prepare receive descriptors and buffers */
+ if (ixv_setup_receive_structures(adapter)) {
+ device_printf(dev,"Could not setup receive structures\n");
+ ixv_stop(adapter);
+ return;
+ }
+
+ /* Configure RX settings */
+ ixv_initialize_receive_units(adapter);
+
+ /* Enable Enhanced MSIX mode */
+ gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
+ gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* Set the various hardware offload abilities */
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM) {
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+ ifp->if_hwassist |= CSUM_SCTP;
+#endif
+ }
+
+ /* Set MTU size */
+ if (ifp->if_mtu > ETHERMTU) {
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ mhadd &= ~IXGBE_MHADD_MFS_MASK;
+ mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
+
+ /* Set up VLAN offload and filter */
+ ixv_setup_vlan_support(adapter);
+
+ callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+
+ /* Set up MSI/X routing */
+ ixv_configure_ivars(adapter);
+
+ /* Set up auto-mask */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
+
+ /* Set moderation on the Link interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
+
+ /* Stats init */
+ ixv_init_stats(adapter);
+
+ /* Config/Enable Link */
+ ixv_config_link(adapter);
+
+ /* And now turn on interrupts */
+ ixv_enable_intr(adapter);
+
+ /* Now inform the stack we're ready */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ return;
+}
+
+static void
+ixv_init(void *arg)
+{
+ struct adapter *adapter = arg;
+
+ IXV_CORE_LOCK(adapter);
+ ixv_init_locked(adapter);
+ IXV_CORE_UNLOCK(adapter);
+ return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+
+static inline void
+ixv_enable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 queue = 1 << vector;
+ u32 mask;
+
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline void
+ixv_disable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
+
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
+}
+
+static inline void
+ixv_rearm_queues(struct adapter *adapter, u64 queues)
+{
+ u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
+}
+
+
+static void
+ixv_handle_que(void *context, int pending)
+{
+ struct ix_queue *que = context;
+ struct adapter *adapter = que->adapter;
+ struct tx_ring *txr = que->txr;
+ struct ifnet *ifp = adapter->ifp;
+ bool more;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ more = ixv_rxeof(que, adapter->rx_process_limit);
+ IXV_TX_LOCK(txr);
+ ixv_txeof(txr);
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, txr->br))
+ ixv_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ ixv_start_locked(txr, ifp);
+#endif
+ IXV_TX_UNLOCK(txr);
+ if (more) {
+ taskqueue_enqueue(que->tq, &que->que_task);
+ return;
+ }
+ }
+
+ /* Reenable this interrupt */
+ ixv_enable_queue(adapter, que->msix);
+ return;
+}
+
+/*********************************************************************
+ *
+ * MSI Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixv_msix_que(void *arg)
+{
+ struct ix_queue *que = arg;
+ struct adapter *adapter = que->adapter;
+ struct tx_ring *txr = que->txr;
+ struct rx_ring *rxr = que->rxr;
+ bool more_tx, more_rx;
+ u32 newitr = 0;
+
+ ixv_disable_queue(adapter, que->msix);
+ ++que->irqs;
+
+ more_rx = ixv_rxeof(que, adapter->rx_process_limit);
+
+ IXV_TX_LOCK(txr);
+ more_tx = ixv_txeof(txr);
+ IXV_TX_UNLOCK(txr);
+
+ more_rx = ixv_rxeof(que, adapter->rx_process_limit);
+
+ /* Do AIM now? */
+
+ if (ixv_enable_aim == FALSE)
+ goto no_calc;
+ /*
+ ** Do Adaptive Interrupt Moderation:
+ ** - Write out last calculated setting
+ ** - Calculate based on average size over
+ ** the last interval.
+ */
+ if (que->eitr_setting)
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_VTEITR(que->msix),
+ que->eitr_setting);
+
+ que->eitr_setting = 0;
+
+ /* Idle, do nothing */
+ if ((txr->bytes == 0) && (rxr->bytes == 0))
+ goto no_calc;
+
+ if ((txr->bytes) && (txr->packets))
+ newitr = txr->bytes/txr->packets;
+ if ((rxr->bytes) && (rxr->packets))
+ newitr = max(newitr,
+ (rxr->bytes / rxr->packets));
+ newitr += 24; /* account for hardware frame, crc */
+
+ /* set an upper boundary */
+ newitr = min(newitr, 3000);
+
+ /* Be nice to the mid range */
+ if ((newitr > 300) && (newitr < 1200))
+ newitr = (newitr / 3);
+ else
+ newitr = (newitr / 2);
+
+ newitr |= newitr << 16;
+
+ /* save for next interrupt */
+ que->eitr_setting = newitr;
+
+ /* Reset state */
+ txr->bytes = 0;
+ txr->packets = 0;
+ rxr->bytes = 0;
+ rxr->packets = 0;
+
+no_calc:
+ if (more_tx || more_rx)
+ taskqueue_enqueue(que->tq, &que->que_task);
+ else /* Reenable this interrupt */
+ ixv_enable_queue(adapter, que->msix);
+ return;
+}
+
+static void
+ixv_msix_mbx(void *arg)
+{
+ struct adapter *adapter = arg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ++adapter->mbx_irq;
+
+ /* First get the cause */
+ reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ /* Clear interrupt with write */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
+
+ /* Link status change */
+ if (reg & IXGBE_EICR_LSC)
+ taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+ struct adapter *adapter = ifp->if_softc;
+
+ INIT_DEBUGOUT("ixv_media_status: begin");
+ IXV_CORE_LOCK(adapter);
+ ixv_update_link_status(adapter);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!adapter->link_active) {
+ IXV_CORE_UNLOCK(adapter);
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_FDX;
+ break;
+ }
+
+ IXV_CORE_UNLOCK(adapter);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called when the user changes speed/duplex using
+ * media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixv_media_change(struct ifnet * ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct ifmedia *ifm = &adapter->media;
+
+ INIT_DEBUGOUT("ixv_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ break;
+ default:
+ device_printf(adapter->dev, "Only auto media type\n");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * This routine maps the mbufs to tx descriptors, allowing the
+ * TX engine to transmit the packets.
+ * - return 0 on success, positive on failure
+ *
+ **********************************************************************/
+
+static int
+ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+ struct adapter *adapter = txr->adapter;
+ u32 olinfo_status = 0, cmd_type_len;
+ u32 paylen = 0;
+ int i, j, error, nsegs;
+ int first, last = 0;
+ struct mbuf *m_head;
+ bus_dma_segment_t segs[32];
+ bus_dmamap_t map;
+ struct ixv_tx_buf *txbuf, *txbuf_mapped;
+ union ixgbe_adv_tx_desc *txd = NULL;
+
+ m_head = *m_headp;
+
+ /* Basic descriptor defines */
+ cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+ if (m_head->m_flags & M_VLANTAG)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ /*
+ * Important to capture the first descriptor
+ * used because it will contain the index of
+ * the one we tell the hardware to report back
+ */
+ first = txr->next_avail_desc;
+ txbuf = &txr->tx_buffers[first];
+ txbuf_mapped = txbuf;
+ map = txbuf->map;
+
+ /*
+ * Map the packet for DMA.
+ */
+ error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == EFBIG) {
+ struct mbuf *m;
+
+ m = m_defrag(*m_headp, M_DONTWAIT);
+ if (m == NULL) {
+ adapter->mbuf_defrag_failed++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ *m_headp = m;
+
+ /* Try it again */
+ error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == ENOMEM) {
+ adapter->no_tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
+ adapter->no_tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+ } else if (error == ENOMEM) {
+ adapter->no_tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
+ adapter->no_tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+
+ /* Make certain there are enough descriptors */
+ if (nsegs > txr->tx_avail - 2) {
+ txr->no_desc_avail++;
+ error = ENOBUFS;
+ goto xmit_fail;
+ }
+ m_head = *m_headp;
+
+ /*
+ ** Set up the appropriate offload context
+ ** this becomes the first descriptor of
+ ** a packet.
+ */
+ if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+ if (ixv_tso_setup(txr, m_head, &paylen)) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+ olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+ ++adapter->tso_tx;
+ } else
+ return (ENXIO);
+ } else if (ixv_tx_ctx_setup(txr, m_head))
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
+ /* Record payload length */
+ if (paylen == 0)
+ olinfo_status |= m_head->m_pkthdr.len <<
+ IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+ i = txr->next_avail_desc;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
+ bus_addr_t segaddr;
+
+ txbuf = &txr->tx_buffers[i];
+ txd = &txr->tx_base[i];
+ seglen = segs[j].ds_len;
+ segaddr = htole64(segs[j].ds_addr);
+
+ txd->read.buffer_addr = segaddr;
+ txd->read.cmd_type_len = htole32(txr->txd_cmd |
+ cmd_type_len |seglen);
+ txd->read.olinfo_status = htole32(olinfo_status);
+ last = i; /* descriptor that will get completion IRQ */
+
+ if (++i == adapter->num_tx_desc)
+ i = 0;
+
+ txbuf->m_head = NULL;
+ txbuf->eop_index = -1;
+ }
+
+ txd->read.cmd_type_len |=
+ htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
+ txr->tx_avail -= nsegs;
+ txr->next_avail_desc = i;
+
+ txbuf->m_head = m_head;
+ txbuf->map = map;
+ bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+ /* Set the index of the descriptor that will be marked done */
+ txbuf = &txr->tx_buffers[first];
+ txbuf->eop_index = last;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * Advance the Transmit Descriptor Tail (Tdt), this tells the
+ * hardware that this frame is available to transmit.
+ */
+ ++txr->total_packets;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
+
+ return (0);
+
+xmit_fail:
+ bus_dmamap_unload(txr->txtag, txbuf->map);
+ return (error);
+
+}
+
+
+/*********************************************************************
+ * Multicast Update
+ *
+ * This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+#define IXGBE_RAR_ENTRIES 16
+
+static void
+ixv_set_multi(struct adapter *adapter)
+{
+ u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 *update_ptr;
+ struct ifmultiaddr *ifma;
+ int mcnt = 0;
+ struct ifnet *ifp = adapter->ifp;
+
+ IOCTL_DEBUGOUT("ixv_set_multi: begin");
+
+#if __FreeBSD_version < 800000
+ IF_ADDR_LOCK(ifp);
+#else
+ if_maddr_rlock(ifp);
+#endif
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+ mcnt++;
+ }
+#if __FreeBSD_version < 800000
+ IF_ADDR_UNLOCK(ifp);
+#else
+ if_maddr_runlock(ifp);
+#endif
+
+ update_ptr = mta;
+
+ ixgbe_update_mc_addr_list(&adapter->hw,
+ update_ptr, mcnt, ixv_mc_array_itr);
+
+ return;
+}
+
+/*
+ * This is an iterator function now needed by the multicast
+ * shared code. It simply feeds the shared code routine the
+ * addresses in the array of ixv_set_multi() one by one.
+ */
+static u8 *
+ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
+{
+ u8 *addr = *update_ptr;
+ u8 *newptr;
+ *vmdq = 0;
+
+ newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+ *update_ptr = newptr;
+ return addr;
+}
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine checks for link status,updates statistics,
+ * and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixv_local_timer(void *arg)
+{
+ struct adapter *adapter = arg;
+ device_t dev = adapter->dev;
+ struct tx_ring *txr = adapter->tx_rings;
+ int i;
+
+ mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+ ixv_update_link_status(adapter);
+
+ /* Stats Update */
+ ixv_update_stats(adapter);
+
+ /*
+ * If the interface has been paused
+ * then don't do the watchdog check
+ */
+ if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
+ goto out;
+ /*
+ ** Check for time since any descriptor was cleaned
+ */
+ for (i = 0; i < adapter->num_queues; i++, txr++) {
+ IXV_TX_LOCK(txr);
+ if (txr->watchdog_check == FALSE) {
+ IXV_TX_UNLOCK(txr);
+ continue;
+ }
+ if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
+ goto hung;
+ IXV_TX_UNLOCK(txr);
+ }
+out:
+ ixv_rearm_queues(adapter, adapter->que_mask);
+ callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+ return;
+
+hung:
+ device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+ device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+ IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
+ IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
+ device_printf(dev,"TX(%d) desc avail = %d,"
+ "Next TX to Clean = %d\n",
+ txr->me, txr->tx_avail, txr->next_to_clean);
+ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ adapter->watchdog_events++;
+ IXV_TX_UNLOCK(txr);
+ ixv_init_locked(adapter);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+** the real check of the hardware only happens with
+** a link interrupt.
+*/
+static void
+ixv_update_link_status(struct adapter *adapter)
+{
+ struct ifnet *ifp = adapter->ifp;
+ struct tx_ring *txr = adapter->tx_rings;
+ device_t dev = adapter->dev;
+
+
+ if (adapter->link_up){
+ if (adapter->link_active == FALSE) {
+ if (bootverbose)
+ device_printf(dev,"Link is up %d Gbps %s \n",
+ ((adapter->link_speed == 128)? 10:1),
+ "Full Duplex");
+ adapter->link_active = TRUE;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+ } else { /* Link down */
+ if (adapter->link_active == TRUE) {
+ if (bootverbose)
+ device_printf(dev,"Link is Down\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ adapter->link_active = FALSE;
+ for (int i = 0; i < adapter->num_queues;
+ i++, txr++)
+ txr->watchdog_check = FALSE;
+ }
+ }
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixv_stop(void *arg)
+{
+ struct ifnet *ifp;
+ struct adapter *adapter = arg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ ifp = adapter->ifp;
+
+ mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+ INIT_DEBUGOUT("ixv_stop: begin\n");
+ ixv_disable_intr(adapter);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ ixgbe_reset_hw(hw);
+ adapter->hw.adapter_stopped = FALSE;
+ ixgbe_stop_adapter(hw);
+ callout_stop(&adapter->timer);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixv_identify_hardware(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ u16 pci_cmd_word;
+
+ /*
+ ** Make sure BUSMASTER is set, on a VM under
+ ** KVM it may not be and will break things.
+ */
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
+ (pci_cmd_word & PCIM_CMD_MEMEN))) {
+ INIT_DEBUGOUT("Memory Access and/or Bus Master "
+ "bits were not set!\n");
+ pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+ }
+
+ /* Save off the information about this board */
+ adapter->hw.vendor_id = pci_get_vendor(dev);
+ adapter->hw.device_id = pci_get_device(dev);
+ adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ adapter->hw.subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ adapter->hw.subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers
+ *
+ **********************************************************************/
+static int
+ixv_allocate_msix(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ix_queue *que = adapter->queues;
+ int error, rid, vector = 0;
+
+ for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+ rid = vector + 1;
+ que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (que->res == NULL) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: que interrupt [%d]\n", vector);
+ return (ENXIO);
+ }
+ /* Set the handler function */
+ error = bus_setup_intr(dev, que->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixv_msix_que, que, &que->tag);
+ if (error) {
+ que->res = NULL;
+ device_printf(dev, "Failed to register QUE handler");
+ return (error);
+ }
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+ que->msix = vector;
+ adapter->que_mask |= (u64)(1 << que->msix);
+ /*
+ ** Bind the msix vector, and thus the
+ ** ring to the corresponding cpu.
+ */
+ if (adapter->num_queues > 1)
+ bus_bind_intr(dev, que->res, i);
+
+ TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
+ que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(adapter->dev));
+ }
+
+ /* and Mailbox */
+ rid = vector + 1;
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!adapter->res) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: MBX interrupt [%d]\n", rid);
+ return (ENXIO);
+ }
+ /* Set the mbx handler function */
+ error = bus_setup_intr(dev, adapter->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixv_msix_mbx, adapter, &adapter->tag);
+ if (error) {
+ adapter->res = NULL;
+ device_printf(dev, "Failed to register LINK handler");
+ return (error);
+ }
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
+#endif
+ adapter->mbxvec = vector;
+ /* Tasklets for Mailbox */
+ TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
+ adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
+ taskqueue_thread_enqueue, &adapter->tq);
+ taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
+ device_get_nameunit(adapter->dev));
+ /*
+ ** Due to a broken design QEMU will fail to properly
+ ** enable the guest for MSIX unless the vectors in
+ ** the table are all set up, so we must rewrite the
+ ** ENABLE in the MSIX control register again at this
+ ** point to cause it to successfully initialize us.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
+ int msix_ctrl;
+ pci_find_extcap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
+
+ return (0);
+}
+
+/*
+ * Setup MSIX resources, note that the VF
+ * device MUST use MSIX, there is no fallback.
+ */
+static int
+ixv_setup_msix(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ int rid, vectors, want = 2;
+
+
+ /* First try MSI/X */
+ rid = PCIR_BAR(3);
+ adapter->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (!adapter->msix_mem) {
+ device_printf(adapter->dev,
+ "Unable to map MSIX table \n");
+ goto out;
+ }
+
+ vectors = pci_msix_count(dev);
+ if (vectors < 2) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rid, adapter->msix_mem);
+ adapter->msix_mem = NULL;
+ goto out;
+ }
+
+ /*
+ ** Want two vectors: one for a queue,
+ ** plus an additional for mailbox.
+ */
+ if (pci_alloc_msix(dev, &want) == 0) {
+ device_printf(adapter->dev,
+ "Using MSIX interrupts with %d vectors\n", want);
+ return (want);
+ }
+out:
+ device_printf(adapter->dev,"MSIX config error\n");
+ return (ENXIO);
+}
+
+
+static int
+ixv_allocate_pci_resources(struct adapter *adapter)
+{
+ int rid;
+ device_t dev = adapter->dev;
+
+ rid = PCIR_BAR(0);
+ adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(adapter->pci_mem)) {
+ device_printf(dev,"Unable to allocate bus resource: memory\n");
+ return (ENXIO);
+ }
+
+ adapter->osdep.mem_bus_space_tag =
+ rman_get_bustag(adapter->pci_mem);
+ adapter->osdep.mem_bus_space_handle =
+ rman_get_bushandle(adapter->pci_mem);
+ adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+
+ adapter->num_queues = 1;
+ adapter->hw.back = &adapter->osdep;
+
+ /*
+ ** Now setup MSI/X, should
+ ** return us the number of
+ ** configured vectors.
+ */
+ adapter->msix = ixv_setup_msix(adapter);
+ if (adapter->msix == ENXIO)
+ return (ENXIO);
+ else
+ return (0);
+}
+
+static void
+ixv_free_pci_resources(struct adapter * adapter)
+{
+ struct ix_queue *que = adapter->queues;
+ device_t dev = adapter->dev;
+ int rid, memrid;
+
+ memrid = PCIR_BAR(MSIX_BAR);
+
+ /*
+ ** There is a slight possibility of a failure mode
+ ** in attach that will result in entering this function
+ ** before interrupt resources have been initialized, and
+ ** in that case we do not want to execute the loops below
+ ** We can detect this reliably by the state of the adapter
+ ** res pointer.
+ */
+ if (adapter->res == NULL)
+ goto mem;
+
+ /*
+ ** Release all msix queue resources:
+ */
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
+ }
+ if (que->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ }
+
+
+ /* Clean the Legacy or Link interrupt last */
+ if (adapter->mbxvec) /* we are doing MSIX */
+ rid = adapter->mbxvec + 1;
+ else
+ (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (adapter->tag != NULL) {
+ bus_teardown_intr(dev, adapter->res, adapter->tag);
+ adapter->tag = NULL;
+ }
+ if (adapter->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+ if (adapter->msix)
+ pci_release_msi(dev);
+
+ if (adapter->msix_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ memrid, adapter->msix_mem);
+
+ if (adapter->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), adapter->pci_mem);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static void
+ixv_setup_interface(device_t dev, struct adapter *adapter)
+{
+ struct ifnet *ifp;
+
+ INIT_DEBUGOUT("ixv_setup_interface: begin");
+
+ ifp = adapter->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL)
+ panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = 1000000000;
+ ifp->if_init = ixv_init;
+ ifp->if_softc = adapter;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ixv_ioctl;
+#if __FreeBSD_version >= 800000
+ ifp->if_transmit = ixv_mq_start;
+ ifp->if_qflush = ixv_qflush;
+#else
+ ifp->if_start = ixv_start;
+#endif
+ ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
+
+ ether_ifattach(ifp, adapter->hw.mac.addr);
+
+ adapter->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ /*
+ * Tell the upper layer(s) we support long frames.
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
+
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
+ ixv_media_status);
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+ return;
+}
+
+static void
+ixv_config_link(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 autoneg, err = 0;
+ bool negotiate = TRUE;
+
+ if (hw->mac.ops.check_link)
+ err = hw->mac.ops.check_link(hw, &autoneg,
+ &adapter->link_up, FALSE);
+ if (err)
+ goto out;
+
+ if (hw->mac.ops.setup_link)
+ err = hw->mac.ops.setup_link(hw, autoneg,
+ negotiate, adapter->link_up);
+out:
+ return;
+}
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+static int
+ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
+ struct ixv_dma_alloc *dma, int mapflags)
+{
+ device_t dev = adapter->dev;
+ int r;
+
+ r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
+ DBA_ALIGN, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &dma->dma_tag);
+ if (r != 0) {
+ device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
+ "error %u\n", r);
+ goto fail_0;
+ }
+ r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
+ BUS_DMA_NOWAIT, &dma->dma_map);
+ if (r != 0) {
+ device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
+ "error %u\n", r);
+ goto fail_1;
+ }
+ r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+ size,
+ ixv_dmamap_cb,
+ &dma->dma_paddr,
+ mapflags | BUS_DMA_NOWAIT);
+ if (r != 0) {
+ device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
+ "error %u\n", r);
+ goto fail_2;
+ }
+ dma->dma_size = size;
+ return (0);
+fail_2:
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+fail_1:
+ bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+ dma->dma_map = NULL;
+ dma->dma_tag = NULL;
+ return (r);
+}
+
+static void
+ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
+{
+ bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+ bus_dma_tag_destroy(dma->dma_tag);
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for the transmit and receive rings, and then
+ * the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+ixv_allocate_queues(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ix_queue *que;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rsize, tsize, error = 0;
+ int txconf = 0, rxconf = 0;
+
+ /* First allocate the top level queue structs */
+ if (!(adapter->queues =
+ (struct ix_queue *) malloc(sizeof(struct ix_queue) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* First allocate the TX ring struct memory */
+ if (!(adapter->tx_rings =
+ (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate TX ring memory\n");
+ error = ENOMEM;
+ goto tx_fail;
+ }
+
+ /* Next allocate the RX */
+ if (!(adapter->rx_rings =
+ (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate RX ring memory\n");
+ error = ENOMEM;
+ goto rx_fail;
+ }
+
+ /* For the ring itself */
+ tsize = roundup2(adapter->num_tx_desc *
+ sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
+
+ /*
+ * Now set up the TX queues, txconf is needed to handle the
+ * possibility that things fail midcourse and we need to
+ * undo memory gracefully
+ */
+ for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+ /* Set up some basics */
+ txr = &adapter->tx_rings[i];
+ txr->adapter = adapter;
+ txr->me = i;
+
+ /* Initialize the TX side lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), txr->me);
+ mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+ if (ixv_dma_malloc(adapter, tsize,
+ &txr->txdma, BUS_DMA_NOWAIT)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+ txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
+ bzero((void *)txr->tx_base, tsize);
+
+ /* Now allocate transmit buffers for the ring */
+ if (ixv_allocate_transmit_buffers(txr)) {
+ device_printf(dev,
+ "Critical Failure setting up transmit buffers\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+#if __FreeBSD_version >= 800000
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
+ M_WAITOK, &txr->tx_mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up buf ring\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+#endif
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+ for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+ rxr = &adapter->rx_rings[i];
+ /* Set up some basics */
+ rxr->adapter = adapter;
+ rxr->me = i;
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), rxr->me);
+ mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (ixv_dma_malloc(adapter, rsize,
+ &rxr->rxdma, BUS_DMA_NOWAIT)) {
+ device_printf(dev,
+ "Unable to allocate RxDescriptor memory\n");
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
+ rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+ bzero((void *)rxr->rx_base, rsize);
+
+ /* Allocate receive buffers for the ring*/
+ if (ixv_allocate_receive_buffers(rxr)) {
+ device_printf(dev,
+ "Critical Failure setting up receive buffers\n");
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
+ }
+
+ /*
+ ** Finally set up the queue holding structs
+ */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ que = &adapter->queues[i];
+ que->adapter = adapter;
+ que->txr = &adapter->tx_rings[i];
+ que->rxr = &adapter->rx_rings[i];
+ }
+
+ return (0);
+
+err_rx_desc:
+ for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+ ixv_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+ for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+ ixv_dma_free(adapter, &txr->txdma);
+ free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+ free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+ free(adapter->queues, M_DEVBUF);
+fail:
+ return (error);
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for tx_buffer structures. The tx_buffer stores all
+ * the information needed to transmit a packet on the wire. This is
+ * called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+static int
+ixv_allocate_transmit_buffers(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ device_t dev = adapter->dev;
+ struct ixv_tx_buf *txbuf;
+ int error, i;
+
+ /*
+ * Setup DMA descriptor areas.
+ */
+ if ((error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ IXV_TSO_SIZE, /* maxsize */
+ 32, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txr->txtag))) {
+ device_printf(dev,"Unable to allocate TX DMA tag\n");
+ goto fail;
+ }
+
+ if (!(txr->tx_buffers =
+ (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
+ adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* Create the descriptor buffer dma maps */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+ error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+ if (error != 0) {
+ device_printf(dev, "Unable to create TX DMA map\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ /* We free all, it handles case where we are in the middle */
+ ixv_free_transmit_structures(adapter);
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+ixv_setup_transmit_ring(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ixv_tx_buf *txbuf;
+ int i;
+
+ /* Clear the old ring contents */
+ IXV_TX_LOCK(txr);
+ bzero((void *)txr->tx_base,
+ (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
+ /* Reset indices */
+ txr->next_avail_desc = 0;
+ txr->next_to_clean = 0;
+
+ /* Free any existing tx buffers. */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+ if (txbuf->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag, txbuf->map);
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ }
+ /* Clear the EOP index */
+ txbuf->eop_index = -1;
+ }
+
+ /* Set number of descriptors available */
+ txr->tx_avail = adapter->num_tx_desc;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ IXV_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ * Initialize all transmit rings.
+ *
+ **********************************************************************/
+static int
+ixv_setup_transmit_structures(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ ixv_setup_transmit_ring(txr);
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+ixv_initialize_transmit_units(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ u64 tdba = txr->txdma.dma_paddr;
+ u32 txctrl, txdctl;
+
+ /* Set WTHRESH to 8, burst writeback */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ /* Now enable */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+ /* Set the HW Tx Head and Tail indices */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
+
+ /* Setup Transmit Descriptor Cmd Settings */
+ txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+ txr->watchdog_check = FALSE;
+
+ /* Set Ring parameters */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+ (tdba & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+ adapter->num_tx_desc *
+ sizeof(struct ixgbe_legacy_tx_desc));
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
+ break;
+ }
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Free all transmit rings.
+ *
+ **********************************************************************/
+static void
+ixv_free_transmit_structures(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IXV_TX_LOCK(txr);
+ ixv_free_transmit_buffers(txr);
+ ixv_dma_free(adapter, &txr->txdma);
+ IXV_TX_UNLOCK(txr);
+ IXV_TX_LOCK_DESTROY(txr);
+ }
+ free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ * Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+ixv_free_transmit_buffers(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ixv_tx_buf *tx_buffer;
+ int i;
+
+ INIT_DEBUGOUT("free_transmit_ring: begin");
+
+ if (txr->tx_buffers == NULL)
+ return;
+
+ tx_buffer = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+ if (tx_buffer->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, tx_buffer->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
+ if (tx_buffer->map != NULL) {
+ bus_dmamap_destroy(txr->txtag,
+ tx_buffer->map);
+ tx_buffer->map = NULL;
+ }
+ } else if (tx_buffer->map != NULL) {
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ bus_dmamap_destroy(txr->txtag,
+ tx_buffer->map);
+ tx_buffer->map = NULL;
+ }
+ }
+#if __FreeBSD_version >= 800000
+ if (txr->br != NULL)
+ buf_ring_free(txr->br, M_DEVBUF);
+#endif
+ if (txr->tx_buffers != NULL) {
+ free(txr->tx_buffers, M_DEVBUF);
+ txr->tx_buffers = NULL;
+ }
+ if (txr->txtag != NULL) {
+ bus_dma_tag_destroy(txr->txtag);
+ txr->txtag = NULL;
+ }
+ return;
+}
+
+/*********************************************************************
+ *
+ * Advanced Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static boolean_t
+ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ixgbe_adv_tx_context_desc *TXD;
+ struct ixv_tx_buf *tx_buffer;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ int ehdrlen, ip_hlen = 0;
+ u16 etype;
+ u8 ipproto = 0;
+ bool offload = TRUE;
+ int ctxd = txr->next_avail_desc;
+ u16 vtag = 0;
+
+
+ if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+ offload = FALSE;
+
+
+ tx_buffer = &txr->tx_buffers[ctxd];
+ TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+ /*
+ ** In advanced descriptors the vlan tag must
+ ** be placed into the descriptor itself.
+ */
+ if (mp->m_flags & M_VLANTAG) {
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+ } else if (offload == FALSE)
+ return FALSE;
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present,
+ * helpful for QinQ too.
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ ehdrlen = ETHER_HDR_LEN;
+ }
+
+ /* Set the ether header length */
+ vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+ if (mp->m_len < ehdrlen + ip_hlen)
+ return (FALSE);
+ ipproto = ip->ip_p;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ ip_hlen = sizeof(struct ip6_hdr);
+ if (mp->m_len < ehdrlen + ip_hlen)
+ return (FALSE);
+ ipproto = ip6->ip6_nxt;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+ break;
+ default:
+ offload = FALSE;
+ break;
+ }
+
+ vlan_macip_lens |= ip_hlen;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+
+ case IPPROTO_UDP:
+ if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ break;
+
+#if __FreeBSD_version >= 800000
+ case IPPROTO_SCTP:
+ if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+#endif
+ default:
+ offload = FALSE;
+ break;
+ }
+
+ /* Now copy bits into descriptor */
+ TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+ TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+ TXD->seqnum_seed = htole32(0);
+ TXD->mss_l4len_idx = htole32(0);
+
+ tx_buffer->m_head = NULL;
+ tx_buffer->eop_index = -1;
+
+ /* We've consumed the first desc, adjust counters */
+ if (++ctxd == adapter->num_tx_desc)
+ ctxd = 0;
+ txr->next_avail_desc = ctxd;
+ --txr->tx_avail;
+
+ return (offload);
+}
+
+/**********************************************************************
+ *
+ * Setup work for hardware segmentation offload (TSO) on
+ * adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+static boolean_t
+ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ixgbe_adv_tx_context_desc *TXD;
+ struct ixv_tx_buf *tx_buffer;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ u32 mss_l4len_idx = 0;
+ u16 vtag = 0;
+ int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct tcphdr *th;
+
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ else
+ ehdrlen = ETHER_HDR_LEN;
+
+ /* Ensure we have at least the IP+TCP header in the first mbuf. */
+ if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
+ return FALSE;
+
+ ctxd = txr->next_avail_desc;
+ tx_buffer = &txr->tx_buffers[ctxd];
+ TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ if (ip->ip_p != IPPROTO_TCP)
+ return FALSE; /* 0 */
+ ip->ip_sum = 0;
+ ip_hlen = ip->ip_hl << 2;
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ tcp_hlen = th->th_off << 2;
+ hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+ /* This is used in the transmit desc in encap */
+ *paylen = mp->m_pkthdr.len - hdrlen;
+
+ /* VLAN MACLEN IPLEN */
+ if (mp->m_flags & M_VLANTAG) {
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+ }
+
+ vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= ip_hlen;
+ TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+
+ /* ADV DTYPE TUCMD */
+ type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+ TXD->seqnum_seed = htole32(0);
+ tx_buffer->m_head = NULL;
+ tx_buffer->eop_index = -1;
+
+ if (++ctxd == adapter->num_tx_desc)
+ ctxd = 0;
+
+ txr->tx_avail--;
+ txr->next_avail_desc = ctxd;
+ return TRUE;
+}
+
+
+/**********************************************************************
+ *
+ * Examine each tx_buffer in the used queue. If the hardware is done
+ * processing the packet then free associated resources. The
+ * tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+static boolean_t
+ixv_txeof(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ifnet *ifp = adapter->ifp;
+ u32 first, last, done;
+ struct ixv_tx_buf *tx_buffer;
+ struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
+
+ mtx_assert(&txr->tx_mtx, MA_OWNED);
+
+ if (txr->tx_avail == adapter->num_tx_desc)
+ return FALSE;
+
+ first = txr->next_to_clean;
+ tx_buffer = &txr->tx_buffers[first];
+ /* For cleanup we just use legacy struct */
+ tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+ last = tx_buffer->eop_index;
+ if (last == -1)
+ return FALSE;
+ eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+
+ /*
+ ** Get the index of the first descriptor
+ ** BEYOND the EOP and call that 'done'.
+ ** I do this so the comparison in the
+ ** inner while loop below can be simple
+ */
+ if (++last == adapter->num_tx_desc) last = 0;
+ done = last;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+ /*
+ ** Only the EOP descriptor of a packet now has the DD
+ ** bit set, this is what we look for...
+ */
+ while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
+ /* We clean the range of the packet */
+ while (first != done) {
+ tx_desc->upper.data = 0;
+ tx_desc->lower.data = 0;
+ tx_desc->buffer_addr = 0;
+ ++txr->tx_avail;
+
+ if (tx_buffer->m_head) {
+ bus_dmamap_sync(txr->txtag,
+ tx_buffer->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
+ tx_buffer->map = NULL;
+ }
+ tx_buffer->eop_index = -1;
+ txr->watchdog_time = ticks;
+
+ if (++first == adapter->num_tx_desc)
+ first = 0;
+
+ tx_buffer = &txr->tx_buffers[first];
+ tx_desc =
+ (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+ }
+ ++ifp->if_opackets;
+ /* See if there is more work now */
+ last = tx_buffer->eop_index;
+ if (last != -1) {
+ eop_desc =
+ (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+ /* Get next done point */
+ if (++last == adapter->num_tx_desc) last = 0;
+ done = last;
+ } else
+ break;
+ }
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ txr->next_to_clean = first;
+
+ /*
+ * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
+ * it is OK to send packets. If there are no pending descriptors,
+ * clear the timeout. Otherwise, if some descriptors have been freed,
+ * restart the timeout.
+ */
+ if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->watchdog_check = FALSE;
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/*********************************************************************
+ *
+ * Refresh mbuf buffers for RX descriptor rings
+ * - now keeps its own state so discards due to resource
+ * exhaustion are unnecessary, if an mbuf cannot be obtained
+ * it just returns, keeping its placeholder, thus it can simply
+ * be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+ struct adapter *adapter = rxr->adapter;
+ bus_dma_segment_t hseg[1];
+ bus_dma_segment_t pseg[1];
+ struct ixv_rx_buf *rxbuf;
+ struct mbuf *mh, *mp;
+ int i, nsegs, error, cleaned;
+
+ i = rxr->next_to_refresh;
+ cleaned = -1; /* Signify no completions */
+ while (i != limit) {
+ rxbuf = &rxr->rx_buffers[i];
+ if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
+ mh = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (mh == NULL)
+ goto update;
+ mh->m_pkthdr.len = mh->m_len = MHLEN;
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ m_adj(mh, ETHER_ALIGN);
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->htag,
+ rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("GET BUF: dmamap load"
+ " failure - %d\n", error);
+ m_free(mh);
+ goto update;
+ }
+ rxbuf->m_head = mh;
+ bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.hdr_addr =
+ htole64(hseg[0].ds_addr);
+ }
+
+ if (rxbuf->m_pack == NULL) {
+ mp = m_getjcl(M_DONTWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
+ if (mp == NULL)
+ goto update;
+ mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("GET BUF: dmamap load"
+ " failure - %d\n", error);
+ m_free(mp);
+ goto update;
+ }
+ rxbuf->m_pack = mp;
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+ rxr->rx_base[i].read.pkt_addr =
+ htole64(pseg[0].ds_addr);
+ }
+
+ cleaned = i;
+ /* Calculate next index */
+ if (++i == adapter->num_rx_desc)
+ i = 0;
+ /* This is the work marker for refresh */
+ rxr->next_to_refresh = i;
+ }
+update:
+ if (cleaned != -1) /* If we refreshed some, bump tail */
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_VFRDT(rxr->me), cleaned);
+ return;
+}
+
+/*********************************************************************
+ *
+ * Allocate memory for rx_buffer structures. Since we use one
+ * rx_buffer per received packet, the maximum number of rx_buffer's
+ * that we'll need is equal to the number of receive descriptors
+ * that we've allocated.
+ *
+ **********************************************************************/
+static int
+ixv_allocate_receive_buffers(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ device_t dev = adapter->dev;
+ struct ixv_rx_buf *rxbuf;
+ int i, bsize, error;
+
+ bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
+ if (!(rxr->rx_buffers =
+ (struct ixv_rx_buf *) malloc(bsize,
+ M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate rx_buffer memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MSIZE, /* maxsize */
+ 1, /* nsegments */
+ MSIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &rxr->htag))) {
+ device_printf(dev, "Unable to create RX DMA tag\n");
+ goto fail;
+ }
+
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUMPAGESIZE, /* maxsize */
+ 1, /* nsegments */
+ MJUMPAGESIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &rxr->ptag))) {
+ device_printf(dev, "Unable to create RX DMA tag\n");
+ goto fail;
+ }
+
+ for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+ rxbuf = &rxr->rx_buffers[i];
+ error = bus_dmamap_create(rxr->htag,
+ BUS_DMA_NOWAIT, &rxbuf->hmap);
+ if (error) {
+ device_printf(dev, "Unable to create RX head map\n");
+ goto fail;
+ }
+ error = bus_dmamap_create(rxr->ptag,
+ BUS_DMA_NOWAIT, &rxbuf->pmap);
+ if (error) {
+ device_printf(dev, "Unable to create RX pkt map\n");
+ goto fail;
+ }
+ }
+
+ return (0);
+
+fail:
+ /* Frees all, but can handle partial completion */
+ ixv_free_receive_structures(adapter);
+ return (error);
+}
+
+static void
+ixv_free_receive_ring(struct rx_ring *rxr)
+{
+ struct adapter *adapter;
+ struct ixv_rx_buf *rxbuf;
+ int i;
+
+ adapter = rxr->adapter;
+ for (i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+ rxbuf->m_head->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_head);
+ }
+ if (rxbuf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+ rxbuf->m_pack->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_pack);
+ }
+ rxbuf->m_head = NULL;
+ rxbuf->m_pack = NULL;
+ }
+}
+
+
+/*********************************************************************
+ *
+ * Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+ixv_setup_receive_ring(struct rx_ring *rxr)
+{
+ struct adapter *adapter;
+ struct ifnet *ifp;
+ device_t dev;
+ struct ixv_rx_buf *rxbuf;
+ bus_dma_segment_t pseg[1], hseg[1];
+ struct lro_ctrl *lro = &rxr->lro;
+ int rsize, nsegs, error = 0;
+
+ adapter = rxr->adapter;
+ ifp = adapter->ifp;
+ dev = adapter->dev;
+
+ /* Clear the ring contents */
+ IXV_RX_LOCK(rxr);
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+ bzero((void *)rxr->rx_base, rsize);
+
+ /* Free current RX buffer structs and their mbufs */
+ ixv_free_receive_ring(rxr);
+
+ /* Configure header split? */
+ if (ixv_header_split)
+ rxr->hdr_split = TRUE;
+
+ /* Now replenish the mbufs */
+ for (int j = 0; j != adapter->num_rx_desc; ++j) {
+ struct mbuf *mh, *mp;
+
+ rxbuf = &rxr->rx_buffers[j];
+ /*
+ ** Dont allocate mbufs if not
+ ** doing header split, its wasteful
+ */
+ if (rxr->hdr_split == FALSE)
+ goto skip_head;
+
+ /* First the header */
+ rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
+ if (rxbuf->m_head == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_adj(rxbuf->m_head, ETHER_ALIGN);
+ mh = rxbuf->m_head;
+ mh->m_len = mh->m_pkthdr.len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->htag,
+ rxbuf->hmap, rxbuf->m_head, hseg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) /* Nothing elegant to do here */
+ goto fail;
+ bus_dmamap_sync(rxr->htag,
+ rxbuf->hmap, BUS_DMASYNC_PREREAD);
+ /* Update descriptor */
+ rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+ /* Now the payload cluster */
+ rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
+ if (rxbuf->m_pack == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ mp = rxbuf->m_pack;
+ mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ rxbuf->pmap, mp, pseg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+ bus_dmamap_sync(rxr->ptag,
+ rxbuf->pmap, BUS_DMASYNC_PREREAD);
+ /* Update descriptor */
+ rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+ }
+
+
+ /* Setup our descriptor indices */
+ rxr->next_to_check = 0;
+ rxr->next_to_refresh = 0;
+ rxr->lro_enabled = FALSE;
+ rxr->rx_split_packets = 0;
+ rxr->rx_bytes = 0;
+
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /*
+ ** Now set up the LRO interface:
+ */
+ if (ifp->if_capenable & IFCAP_LRO) {
+ int err = tcp_lro_init(lro);
+ if (err) {
+ device_printf(dev, "LRO Initialization failed!\n");
+ goto fail;
+ }
+ INIT_DEBUGOUT("RX Soft LRO Initialized\n");
+ rxr->lro_enabled = TRUE;
+ lro->ifp = adapter->ifp;
+ }
+
+ IXV_RX_UNLOCK(rxr);
+ return (0);
+
+fail:
+ ixv_free_receive_ring(rxr);
+ IXV_RX_UNLOCK(rxr);
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+ixv_setup_receive_structures(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+ int j;
+
+ for (j = 0; j < adapter->num_queues; j++, rxr++)
+ if (ixv_setup_receive_ring(rxr))
+ goto fail;
+
+ return (0);
+fail:
+ /*
+ * Free RX buffers allocated so far, we will only handle
+ * the rings that completed, the failing case will have
+ * cleaned up for itself. 'j' failed, so its the terminus.
+ */
+ for (int i = 0; i < j; ++i) {
+ rxr = &adapter->rx_rings[i];
+ ixv_free_receive_ring(rxr);
+ }
+
+ return (ENOBUFS);
+}
+
+/*********************************************************************
+ *
+ * Setup receive registers and features.
+ *
+ **********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void
+ixv_initialize_receive_units(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ifnet *ifp = adapter->ifp;
+ u32 bufsz, fctrl, rxcsum, hlreg;
+
+
+ /* Enable broadcasts */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF;
+ fctrl |= IXGBE_FCTRL_PMCF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ /* Set for Jumbo Frames? */
+ hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ if (ifp->if_mtu > ETHERMTU) {
+ hlreg |= IXGBE_HLREG0_JUMBOEN;
+ bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ } else {
+ hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+ bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ u64 rdba = rxr->rxdma.dma_paddr;
+ u32 reg, rxdctl;
+
+ /* Do the queue enabling first */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+ for (int k = 0; k < 10; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msec_delay(1);
+ }
+ wmb();
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+ (rdba & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+ (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+ adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+ /* Set up the SRRCTL register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+ reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ reg |= bufsz;
+ if (rxr->hdr_split) {
+ /* Use a standard mbuf for the header */
+ reg |= ((IXV_RX_HDR <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
+ & IXGBE_SRRCTL_BSIZEHDR_MASK);
+ reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else
+ reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
+ adapter->num_rx_desc - 1);
+ }
+
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+
+ if (!(rxcsum & IXGBE_RXCSUM_PCSD))
+ rxcsum |= IXGBE_RXCSUM_IPPCSE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Free all receive rings.
+ *
+ **********************************************************************/
+static void
+ixv_free_receive_structures(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ struct lro_ctrl *lro = &rxr->lro;
+ ixv_free_receive_buffers(rxr);
+ /* Free LRO memory */
+ tcp_lro_free(lro);
+ /* Free the ring memory as well */
+ ixv_dma_free(adapter, &rxr->rxdma);
+ }
+
+ free(adapter->rx_rings, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ * Free receive ring data structures
+ *
+ **********************************************************************/
+static void
+ixv_free_receive_buffers(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ struct ixv_rx_buf *rxbuf;
+
+ INIT_DEBUGOUT("free_receive_structures: begin");
+
+ /* Cleanup any existing buffers */
+ if (rxr->rx_buffers != NULL) {
+ for (int i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+ rxbuf->m_head->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_head);
+ }
+ if (rxbuf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+ rxbuf->m_pack->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->m_pack);
+ }
+ rxbuf->m_head = NULL;
+ rxbuf->m_pack = NULL;
+ if (rxbuf->hmap != NULL) {
+ bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
+ rxbuf->hmap = NULL;
+ }
+ if (rxbuf->pmap != NULL) {
+ bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+ rxbuf->pmap = NULL;
+ }
+ }
+ if (rxr->rx_buffers != NULL) {
+ free(rxr->rx_buffers, M_DEVBUF);
+ rxr->rx_buffers = NULL;
+ }
+ }
+
+ if (rxr->htag != NULL) {
+ bus_dma_tag_destroy(rxr->htag);
+ rxr->htag = NULL;
+ }
+ if (rxr->ptag != NULL) {
+ bus_dma_tag_destroy(rxr->ptag);
+ rxr->ptag = NULL;
+ }
+
+ return;
+}
+
+static __inline void
+ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+
+ /*
+ * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+ * should be computed by hardware. Also it should not have VLAN tag in
+ * ethernet header.
+ */
+ if (rxr->lro_enabled &&
+ (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+ (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+ (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
+ (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+ /*
+ * Send to the stack if:
+ ** - LRO not enabled, or
+ ** - no LRO resources, or
+ ** - lro enqueue fails
+ */
+ if (rxr->lro.lro_cnt != 0)
+ if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+ return;
+ }
+ (*ifp->if_input)(ifp, m);
+}
+
+static __inline void
+ixv_rx_discard(struct rx_ring *rxr, int i)
+{
+ struct adapter *adapter = rxr->adapter;
+ struct ixv_rx_buf *rbuf;
+ struct mbuf *mh, *mp;
+
+ rbuf = &rxr->rx_buffers[i];
+ if (rbuf->fmp != NULL) /* Partial chain ? */
+ m_freem(rbuf->fmp);
+
+ mh = rbuf->m_head;
+ mp = rbuf->m_pack;
+
+ /* Reuse loaded DMA map and just update mbuf chain */
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_next = NULL;
+
+ mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * This routine executes in interrupt context. It replenishes
+ * the mbufs in the descriptor and sends data which has been
+ * dma'ed into host memory to upper layer.
+ *
+ * We loop at most count times if count is > 0, or until done if
+ * count < 0.
+ *
+ * Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+static bool
+ixv_rxeof(struct ix_queue *que, int count)
+{
+ struct adapter *adapter = que->adapter;
+ struct rx_ring *rxr = que->rxr;
+ struct ifnet *ifp = adapter->ifp;
+ struct lro_ctrl *lro = &rxr->lro;
+ struct lro_entry *queued;
+ int i, nextp, processed = 0;
+ u32 staterr = 0;
+ union ixgbe_adv_rx_desc *cur;
+ struct ixv_rx_buf *rbuf, *nbuf;
+
+ IXV_RX_LOCK(rxr);
+
+ for (i = rxr->next_to_check; count != 0;) {
+ struct mbuf *sendmp, *mh, *mp;
+ u32 rsc, ptype;
+ u16 hlen, plen, hdr, vtag;
+ bool eop;
+
+ /* Sync the ring. */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cur = &rxr->rx_base[i];
+ staterr = le32toh(cur->wb.upper.status_error);
+
+ if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+ break;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+
+ count--;
+ sendmp = NULL;
+ nbuf = NULL;
+ rsc = 0;
+ cur->wb.upper.status_error = 0;
+ rbuf = &rxr->rx_buffers[i];
+ mh = rbuf->m_head;
+ mp = rbuf->m_pack;
+
+ plen = le16toh(cur->wb.upper.length);
+ ptype = le32toh(cur->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_PKTTYPE_MASK;
+ hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+ vtag = le16toh(cur->wb.upper.vlan);
+ eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+ /* Make sure all parts of a bad packet are discarded */
+ if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
+ (rxr->discard)) {
+ ifp->if_ierrors++;
+ rxr->rx_discarded++;
+ if (!eop)
+ rxr->discard = TRUE;
+ else
+ rxr->discard = FALSE;
+ ixv_rx_discard(rxr, i);
+ goto next_desc;
+ }
+
+ if (!eop) {
+ nextp = i + 1;
+ if (nextp == adapter->num_rx_desc)
+ nextp = 0;
+ nbuf = &rxr->rx_buffers[nextp];
+ prefetch(nbuf);
+ }
+ /*
+ ** The header mbuf is ONLY used when header
+ ** split is enabled, otherwise we get normal
+ ** behavior, ie, both header and payload
+ ** are DMA'd into the payload buffer.
+ **
+ ** Rather than using the fmp/lmp global pointers
+ ** we now keep the head of a packet chain in the
+ ** buffer struct and pass this along from one
+ ** descriptor to the next, until we get EOP.
+ */
+ if (rxr->hdr_split && (rbuf->fmp == NULL)) {
+ /* This must be an initial descriptor */
+ hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXV_RX_HDR)
+ hlen = IXV_RX_HDR;
+ mh->m_len = hlen;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_next = NULL;
+ mh->m_pkthdr.len = mh->m_len;
+ /* Null buf pointer so it is refreshed */
+ rbuf->m_head = NULL;
+ /*
+ ** Check the payload length, this
+ ** could be zero if its a small
+ ** packet.
+ */
+ if (plen > 0) {
+ mp->m_len = plen;
+ mp->m_next = NULL;
+ mp->m_flags &= ~M_PKTHDR;
+ mh->m_next = mp;
+ mh->m_pkthdr.len += mp->m_len;
+ /* Null buf pointer so it is refreshed */
+ rbuf->m_pack = NULL;
+ rxr->rx_split_packets++;
+ }
+ /*
+ ** Now create the forward
+ ** chain so when complete
+ ** we wont have to.
+ */
+ if (eop == 0) {
+ /* stash the chain head */
+ nbuf->fmp = mh;
+ /* Make forward chain */
+ if (plen)
+ mp->m_next = nbuf->m_pack;
+ else
+ mh->m_next = nbuf->m_pack;
+ } else {
+ /* Singlet, prepare to send */
+ sendmp = mh;
+ if (staterr & IXGBE_RXD_STAT_VP) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
+ }
+ } else {
+ /*
+ ** Either no header split, or a
+ ** secondary piece of a fragmented
+ ** split packet.
+ */
+ mp->m_len = plen;
+ /*
+ ** See if there is a stored head
+ ** that determines what we are
+ */
+ sendmp = rbuf->fmp;
+ rbuf->m_pack = rbuf->fmp = NULL;
+
+ if (sendmp != NULL) /* secondary frag */
+ sendmp->m_pkthdr.len += mp->m_len;
+ else {
+ /* first desc of a non-ps chain */
+ sendmp = mp;
+ sendmp->m_flags |= M_PKTHDR;
+ sendmp->m_pkthdr.len = mp->m_len;
+ if (staterr & IXGBE_RXD_STAT_VP) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
+ }
+ /* Pass the head pointer on */
+ if (eop == 0) {
+ nbuf->fmp = sendmp;
+ sendmp = NULL;
+ mp->m_next = nbuf->m_pack;
+ }
+ }
+ ++processed;
+ /* Sending this frame? */
+ if (eop) {
+ sendmp->m_pkthdr.rcvif = ifp;
+ ifp->if_ipackets++;
+ rxr->rx_packets++;
+ /* capture data for AIM */
+ rxr->bytes += sendmp->m_pkthdr.len;
+ rxr->rx_bytes += sendmp->m_pkthdr.len;
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ ixv_rx_checksum(staterr, sendmp, ptype);
+#if __FreeBSD_version >= 800000
+ sendmp->m_pkthdr.flowid = que->msix;
+ sendmp->m_flags |= M_FLOWID;
+#endif
+ }
+next_desc:
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Advance our pointers to the next descriptor. */
+ if (++i == adapter->num_rx_desc)
+ i = 0;
+
+ /* Now send to the stack or do LRO */
+ if (sendmp != NULL)
+ ixv_rx_input(rxr, ifp, sendmp, ptype);
+
+ /* Every 8 descriptors we go to refresh mbufs */
+ if (processed == 8) {
+ ixv_refresh_mbufs(rxr, i);
+ processed = 0;
+ }
+ }
+
+ /* Refresh any remaining buf structs */
+ if (processed != 0) {
+ ixv_refresh_mbufs(rxr, i);
+ processed = 0;
+ }
+
+ rxr->next_to_check = i;
+
+ /*
+ * Flush any outstanding LRO work
+ */
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+
+ IXV_RX_UNLOCK(rxr);
+
+ /*
+ ** We still have cleaning to do?
+ ** Schedule another interrupt if so.
+ */
+ if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
+ ixv_rearm_queues(adapter, (u64)(1 << que->msix));
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ * Verify that the hardware indicated that the checksum is valid.
+ * Inform the stack about the status of checksum so that stack
+ * doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
+{
+ u16 status = (u16) staterr;
+ u8 errors = (u8) (staterr >> 24);
+ bool sctp = FALSE;
+
+ if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+ (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
+ sctp = TRUE;
+
+ if (status & IXGBE_RXD_STAT_IPCS) {
+ if (!(errors & IXGBE_RXD_ERR_IPE)) {
+ /* IP Checksum Good */
+ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+ mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+ } else
+ mp->m_pkthdr.csum_flags = 0;
+ }
+ if (status & IXGBE_RXD_STAT_L4CS) {
+ u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+ if (sctp)
+ type = CSUM_SCTP_VALID;
+#endif
+ if (!(errors & IXGBE_RXD_ERR_TCPE)) {
+ mp->m_pkthdr.csum_flags |= type;
+ if (!sctp)
+ mp->m_pkthdr.csum_data = htons(0xffff);
+ }
+ }
+ return;
+}
+
+static void
+ixv_setup_vlan_support(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ctrl, vid, vfta, retry;
+
+
+ /*
+ ** We get here thru init_locked, meaning
+ ** a soft reset, this has already cleared
+ ** the VFTA and other state, so if there
+ ** have been no vlan's registered do nothing.
+ */
+ if (adapter->num_vlans == 0)
+ return;
+
+ /* Enable the queues */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
+ }
+
+ /*
+ ** A soft reset zero's out the VFTA, so
+ ** we need to repopulate it now.
+ */
+ for (int i = 0; i < VFTA_SIZE; i++) {
+ if (ixv_shadow_vfta[i] == 0)
+ continue;
+ vfta = ixv_shadow_vfta[i];
+ /*
+ ** Reconstruct the vlan id's
+ ** based on the bits set in each
+ ** of the array ints.
+ */
+ for ( int j = 0; j < 32; j++) {
+ retry = 0;
+ if ((vfta & (1 << j)) == 0)
+ continue;
+ vid = (i * 32) + j;
+ /* Call the shared code mailbox routine */
+ while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
+ if (++retry > 5)
+ break;
+ }
+ }
+ }
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct adapter *adapter = ifp->if_softc;
+ u16 index, bit;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ ixv_shadow_vfta[index] |= (1 << bit);
+ ++adapter->num_vlans;
+ /* Re-init to load the changes */
+ ixv_init(adapter);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct adapter *adapter = ifp->if_softc;
+ u16 index, bit;
+
+ if (ifp->if_softc != arg)
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ ixv_shadow_vfta[index] &= ~(1 << bit);
+ --adapter->num_vlans;
+ /* Re-init to load the changes */
+ ixv_init(adapter);
+}
+
+static void
+ixv_enable_intr(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ix_queue *que = adapter->queues;
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+
+ for (int i = 0; i < adapter->num_queues; i++, que++)
+ ixv_enable_queue(adapter, que->msix);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return;
+}
+
+static void
+ixv_disable_intr(struct adapter *adapter)
+{
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ return;
+}
+
+/*
+** Setup the correct IVAR register for a particular MSIX interrupt
+** - entry is the register array entry
+** - vector is the MSIX vector for this queue
+** - type is RX/TX/MISC
+*/
+static void
+ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ivar, index;
+
+ vector |= IXGBE_IVAR_ALLOC_VAL;
+
+ if (type == -1) { /* MISC IVAR */
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else { /* RX/TX IVARS */
+ index = (16 * (entry & 1)) + (8 * type);
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
+ }
+}
+
+static void
+ixv_configure_ivars(struct adapter *adapter)
+{
+ struct ix_queue *que = adapter->queues;
+
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ /* First the RX queue entry */
+ ixv_set_ivar(adapter, i, que->msix, 0);
+ /* ... and the TX */
+ ixv_set_ivar(adapter, i, que->msix, 1);
+ /* Set an initial value in EITR */
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
+ }
+
+ /* For the Link interrupt */
+ ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
+}
+
+
+/*
+** Tasklet handler for MSIX MBX interrupts
+** - do outside interrupt since it might sleep
+*/
+static void
+ixv_handle_mbx(void *context, int pending)
+{
+ struct adapter *adapter = context;
+
+ ixgbe_check_link(&adapter->hw,
+ &adapter->link_speed, &adapter->link_up, 0);
+ ixv_update_link_status(adapter);
+}
+
+/*
+** The VF stats registers never have a truely virgin
+** starting point, so this routine tries to make an
+** artificial one, marking ground zero on attach as
+** it were.
+*/
+static void
+ixv_save_stats(struct adapter *adapter)
+{
+ if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
+ adapter->stats.saved_reset_vfgprc +=
+ adapter->stats.vfgprc - adapter->stats.base_vfgprc;
+ adapter->stats.saved_reset_vfgptc +=
+ adapter->stats.vfgptc - adapter->stats.base_vfgptc;
+ adapter->stats.saved_reset_vfgorc +=
+ adapter->stats.vfgorc - adapter->stats.base_vfgorc;
+ adapter->stats.saved_reset_vfgotc +=
+ adapter->stats.vfgotc - adapter->stats.base_vfgotc;
+ adapter->stats.saved_reset_vfmprc +=
+ adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+ }
+}
+
+static void
+ixv_init_stats(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_STAT_32(reg, last, count) \
+{ \
+ u32 current = IXGBE_READ_REG(hw, reg); \
+ if (current < last) \
+ count += 0x100000000LL; \
+ last = current; \
+ count &= 0xFFFFFFFF00000000LL; \
+ count |= current; \
+}
+
+#define UPDATE_STAT_36(lsb, msb, last, count) \
+{ \
+ u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
+ u64 cur_msb = IXGBE_READ_REG(hw, msb); \
+ u64 current = ((cur_msb << 32) | cur_lsb); \
+ if (current < last) \
+ count += 0x1000000000LL; \
+ last = current; \
+ count &= 0xFFFFFFF000000000LL; \
+ count |= current; \
+}
+
+/*
+** ixv_update_stats - Update the board statistics counters.
+*/
+void
+ixv_update_stats(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc, adapter->stats.vfgorc);
+ UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc, adapter->stats.vfgotc);
+ UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+}
+
+/**********************************************************************
+ *
+ * This routine is called only when ixgbe_display_debug_stats is enabled.
+ * This routine provides a way to take a look at important statistics
+ * maintained by the driver and hardware.
+ *
+ **********************************************************************/
+static void
+ixv_print_hw_stats(struct adapter * adapter)
+{
+ device_t dev = adapter->dev;
+
+ device_printf(dev,"Std Mbuf Failed = %lu\n",
+ adapter->mbuf_defrag_failed);
+ device_printf(dev,"Driver dropped packets = %lu\n",
+ adapter->dropped_pkts);
+ device_printf(dev, "watchdog timeouts = %ld\n",
+ adapter->watchdog_events);
+
+ device_printf(dev,"Good Packets Rcvd = %llu\n",
+ (long long)adapter->stats.vfgprc);
+ device_printf(dev,"Good Packets Xmtd = %llu\n",
+ (long long)adapter->stats.vfgptc);
+ device_printf(dev,"TSO Transmissions = %lu\n",
+ adapter->tso_tx);
+
+}
+
+/**********************************************************************
+ *
+ * This routine is called only when em_display_debug_stats is enabled.
+ * This routine provides a way to take a look at important statistics
+ * maintained by the driver and hardware.
+ *
+ **********************************************************************/
+static void
+ixv_print_debug_info(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ix_queue *que = adapter->queues;
+ struct rx_ring *rxr;
+ struct tx_ring *txr;
+ struct lro_ctrl *lro;
+
+ device_printf(dev,"Error Byte Count = %u \n",
+ IXGBE_READ_REG(hw, IXGBE_ERRBC));
+
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ txr = que->txr;
+ rxr = que->rxr;
+ lro = &rxr->lro;
+ device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
+ que->msix, (long)que->irqs);
+ device_printf(dev,"RX(%d) Packets Received: %lld\n",
+ rxr->me, (long long)rxr->rx_packets);
+ device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
+ rxr->me, (long long)rxr->rx_split_packets);
+ device_printf(dev,"RX(%d) Bytes Received: %lu\n",
+ rxr->me, (long)rxr->rx_bytes);
+ device_printf(dev,"RX(%d) LRO Queued= %d\n",
+ rxr->me, lro->lro_queued);
+ device_printf(dev,"RX(%d) LRO Flushed= %d\n",
+ rxr->me, lro->lro_flushed);
+ device_printf(dev,"TX(%d) Packets Sent: %lu\n",
+ txr->me, (long)txr->total_packets);
+ device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
+ txr->me, (long)txr->no_desc_avail);
+ }
+
+ device_printf(dev,"MBX IRQ Handled: %lu\n",
+ (long)adapter->mbx_irq);
+ return;
+}
+
+static int
+ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int result;
+ struct adapter *adapter;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ adapter = (struct adapter *) arg1;
+ ixv_print_hw_stats(adapter);
+ }
+ return error;
+}
+
+static int
+ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
+{
+ int error, result;
+ struct adapter *adapter;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ adapter = (struct adapter *) arg1;
+ ixv_print_debug_info(adapter);
+ }
+ return error;
+}
+
+/*
+** Set flow control using sysctl:
+** Flow control values:
+** 0 - off
+** 1 - rx pause
+** 2 - tx pause
+** 3 - full
+*/
+static int
+ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ struct adapter *adapter;
+
+ error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
+
+ if (error)
+ return (error);
+
+ adapter = (struct adapter *) arg1;
+ switch (ixv_flow_control) {
+ case ixgbe_fc_rx_pause:
+ case ixgbe_fc_tx_pause:
+ case ixgbe_fc_full:
+ adapter->hw.fc.requested_mode = ixv_flow_control;
+ break;
+ case ixgbe_fc_none:
+ default:
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ }
+
+ ixgbe_fc_enable(&adapter->hw, 0);
+ return error;
+}
+
+static void
+ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
+ const char *description, int *limit, int value)
+{
+ *limit = value;
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+ OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
diff --git a/sys/dev/ixgbe/ixv.h b/sys/dev/ixgbe/ixv.h
new file mode 100644
index 0000000..7ae913d
--- /dev/null
+++ b/sys/dev/ixgbe/ixv.h
@@ -0,0 +1,414 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2010, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXV_H_
+#define _IXV_H_
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+
+#include <machine/in_cksum.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+
+#include "ixgbe_api.h"
+#include "ixgbe_vf.h"
+
+/* Tunables */
+
+/*
+ * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of transmit descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more transmits. Each descriptor is 16
+ * bytes. Performance tests have show the 2K value to be optimal for top
+ * performance.
+ */
+#define DEFAULT_TXD 1024
+#define PERFORM_TXD 2048
+#define MAX_TXD 4096
+#define MIN_TXD 64
+
+/*
+ * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of receive descriptors allocated for each RX queue. Increasing this
+ * value allows the driver to buffer more incoming packets. Each descriptor
+ * is 16 bytes. A receive buffer is also allocated for each descriptor.
+ *
+ * Note: with 8 rings and a dual port card, it is possible to bump up
+ * against the system mbuf pool limit, you can tune nmbclusters
+ * to adjust for this.
+ */
+#define DEFAULT_RXD 1024
+#define PERFORM_RXD 2048
+#define MAX_RXD 4096
+#define MIN_RXD 64
+
+/* Alignment for rings */
+#define DBA_ALIGN 128
+
+/*
+ * This parameter controls the maximum no of times the driver will loop in
+ * the isr. Minimum Value = 1
+ */
+#define MAX_LOOP 10
+
+/*
+ * This is the max watchdog interval, ie. the time that can
+ * pass between any two TX clean operations, such only happening
+ * when the TX hardware is functioning.
+ */
+#define IXV_WATCHDOG (10 * hz)
+
+/*
+ * This parameters control when the driver calls the routine to reclaim
+ * transmit descriptors.
+ */
+#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
+#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
+
+#define IXV_MAX_FRAME_SIZE 0x3F00
+
+/* Flow control constants */
+#define IXV_FC_PAUSE 0xFFFF
+#define IXV_FC_HI 0x20000
+#define IXV_FC_LO 0x10000
+
+/* Defines for printing debug information */
+#define DEBUG_INIT 0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW 0
+
+#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
+#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
+#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
+#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
+
+#define MAX_NUM_MULTICAST_ADDRESSES 128
+#define IXV_EITR_DEFAULT 128
+#define IXV_SCATTER 32
+#define IXV_RX_HDR 128
+#define MSIX_BAR 3
+#define IXV_TSO_SIZE 65535
+#define IXV_BR_SIZE 4096
+#define IXV_LINK_ITR 2000
+#define TX_BUFFER_SIZE ((u32) 1514)
+#define VFTA_SIZE 128
+
+/* Offload bits in mbuf flag */
+#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+
+/*
+ *****************************************************************************
+ * vendor_info_array
+ *
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ *
+ *****************************************************************************
+ */
+typedef struct _ixv_vendor_info_t {
+ unsigned int vendor_id;
+ unsigned int device_id;
+ unsigned int subvendor_id;
+ unsigned int subdevice_id;
+ unsigned int index;
+} ixv_vendor_info_t;
+
+
+struct ixv_tx_buf {
+ u32 eop_index;
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+};
+
+struct ixv_rx_buf {
+ struct mbuf *m_head;
+ struct mbuf *m_pack;
+ struct mbuf *fmp;
+ bus_dmamap_t hmap;
+ bus_dmamap_t pmap;
+};
+
+/*
+ * Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free.
+ */
+struct ixv_dma_alloc {
+ bus_addr_t dma_paddr;
+ caddr_t dma_vaddr;
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+ bus_dma_segment_t dma_seg;
+ bus_size_t dma_size;
+ int dma_nseg;
+};
+
+/*
+** Driver queue struct: this is the interrupt container
+** for the associated tx and rx ring.
+*/
+struct ix_queue {
+ struct adapter *adapter;
+ u32 msix; /* This queue's MSIX vector */
+ u32 eims; /* This queue's EIMS bit */
+ u32 eitr_setting;
+ u32 eitr; /* cached reg */
+ struct resource *res;
+ void *tag;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ struct task que_task;
+ struct taskqueue *tq;
+ u64 irqs;
+};
+
+/*
+ * The transmit ring, one per queue
+ */
+struct tx_ring {
+ struct adapter *adapter;
+ struct mtx tx_mtx;
+ u32 me;
+ bool watchdog_check;
+ int watchdog_time;
+ union ixgbe_adv_tx_desc *tx_base;
+ struct ixv_dma_alloc txdma;
+ u32 next_avail_desc;
+ u32 next_to_clean;
+ struct ixv_tx_buf *tx_buffers;
+ volatile u16 tx_avail;
+ u32 txd_cmd;
+ bus_dma_tag_t txtag;
+ char mtx_name[16];
+ struct buf_ring *br;
+ /* Soft Stats */
+ u32 bytes;
+ u32 packets;
+ u64 no_desc_avail;
+ u64 total_packets;
+};
+
+
+/*
+ * The Receive ring, one per rx queue
+ */
+struct rx_ring {
+ struct adapter *adapter;
+ struct mtx rx_mtx;
+ u32 me;
+ union ixgbe_adv_rx_desc *rx_base;
+ struct ixv_dma_alloc rxdma;
+ struct lro_ctrl lro;
+ bool lro_enabled;
+ bool hdr_split;
+ bool discard;
+ u32 next_to_refresh;
+ u32 next_to_check;
+ char mtx_name[16];
+ struct ixv_rx_buf *rx_buffers;
+ bus_dma_tag_t htag;
+ bus_dma_tag_t ptag;
+
+ u32 bytes; /* Used for AIM calc */
+ u32 packets;
+
+ /* Soft stats */
+ u64 rx_irq;
+ u64 rx_split_packets;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_discarded;
+};
+
+/* Our adapter structure */
+struct adapter {
+ struct ifnet *ifp;
+ struct ixgbe_hw hw;
+
+ struct ixgbe_osdep osdep;
+ struct device *dev;
+
+ struct resource *pci_mem;
+ struct resource *msix_mem;
+
+ /*
+ * Interrupt resources: this set is
+ * either used for legacy, or for Link
+ * when doing MSIX
+ */
+ void *tag;
+ struct resource *res;
+
+ struct ifmedia media;
+ struct callout timer;
+ int msix;
+ int if_flags;
+
+ struct mtx core_mtx;
+
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
+
+ u16 num_vlans;
+ u16 num_queues;
+
+ /* Info about the board itself */
+ bool link_active;
+ u16 max_frame_size;
+ u32 link_speed;
+ bool link_up;
+ u32 mbxvec;
+
+ /* Mbuf cluster size */
+ u32 rx_mbuf_sz;
+
+ /* Support for pluggable optics */
+ struct task mbx_task; /* Mailbox tasklet */
+ struct taskqueue *tq;
+
+ /*
+ ** Queues:
+ ** This is the irq holder, it has
+ ** and RX/TX pair or rings associated
+ ** with it.
+ */
+ struct ix_queue *queues;
+
+ /*
+ * Transmit rings:
+ * Allocated at run time, an array of rings.
+ */
+ struct tx_ring *tx_rings;
+ int num_tx_desc;
+
+ /*
+ * Receive rings:
+ * Allocated at run time, an array of rings.
+ */
+ struct rx_ring *rx_rings;
+ int num_rx_desc;
+ u64 que_mask;
+ u32 rx_process_limit;
+
+ /* Misc stats maintained by the driver */
+ unsigned long dropped_pkts;
+ unsigned long mbuf_defrag_failed;
+ unsigned long mbuf_header_failed;
+ unsigned long mbuf_packet_failed;
+ unsigned long no_tx_map_avail;
+ unsigned long no_tx_dma_setup;
+ unsigned long watchdog_events;
+ unsigned long tso_tx;
+ unsigned long mbx_irq;
+
+ struct ixgbevf_hw_stats stats;
+};
+
+
+#define IXV_CORE_LOCK_INIT(_sc, _name) \
+ mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF)
+#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
+#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
+#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
+#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
+#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
+#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
+#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
+#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
+#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
+#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
+#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
+#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
+
+/* Workaround to make 8.0 buildable */
+#if __FreeBSD_version < 800504
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ return (1);
+#endif
+ return (!buf_ring_empty(br));
+}
+#endif
+#endif /* _IXV_H_ */
OpenPOWER on IntegriCloud