summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2015-03-17 18:32:28 +0000
committerjfv <jfv@FreeBSD.org>2015-03-17 18:32:28 +0000
commit06710b088448f3def482afa52f72ce3aac5cb61a (patch)
tree8b4de7f1d675fe05d6b77387b2cf8fd4603882bd
parentc00b4703949f8f1b7a7d2bd81919b332794cbd7f (diff)
downloadFreeBSD-src-06710b088448f3def482afa52f72ce3aac5cb61a.zip
FreeBSD-src-06710b088448f3def482afa52f72ce3aac5cb61a.tar.gz
Update to the Intel ixgbe driver:
- Split the driver into independent pf and vf loadables. This is in preparation for SRIOV support which will be following shortly. This also allows us to keep a seperate revision control over the two parts, making for easier sustaining. - Make the TX/RX code a shared/seperated file, in the old code base the ixv code would miss fixes that went into ixgbe, this model will eliminate that problem. - The driver loadables will now match the device names, something that has been requested for some time. - Rather than a modules/ixgbe there is now modules/ix and modules/ixv - It will also be possible to make your static kernel with only one or the other for streamlined installs, or both. Enjoy! Submitted by: jfv and erj
-rw-r--r--sys/amd64/conf/GENERIC3
-rw-r--r--sys/conf/NOTES3
-rw-r--r--sys/conf/files28
-rw-r--r--sys/dev/ixgbe/LICENSE2
-rw-r--r--sys/dev/ixgbe/if_ix.c (renamed from sys/dev/ixgbe/ixgbe.c)2990
-rw-r--r--sys/dev/ixgbe/if_ixv.c2107
-rw-r--r--sys/dev/ixgbe/ix_txrx.c2259
-rw-r--r--sys/dev/ixgbe/ixgbe.h137
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c92
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c1115
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.h9
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c330
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h45
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c800
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h27
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.c32
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb_82598.c4
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb_82599.c14
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.c27
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c887
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h34
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h677
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c70
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.c339
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.h8
-rw-r--r--sys/dev/ixgbe/ixv.c4006
-rw-r--r--sys/dev/ixgbe/ixv.h438
-rw-r--r--sys/modules/ix/Makefile (renamed from sys/modules/ixgbe/Makefile)4
-rw-r--r--sys/modules/ixv/Makefile15
35 files changed, 8390 insertions, 8129 deletions
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index dff36ac..bdaca33 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -216,7 +216,8 @@ device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel PRO/1000 Gigabit Ethernet Family
device igb # Intel PRO/1000 PCIE Server Gigabit Family
-device ixgbe # Intel PRO/10GbE PCIE Ethernet Family
+device ix # Intel PRO/10GbE PCIE PF Ethernet
+device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index c0d5879..057ed09 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -2100,7 +2100,8 @@ device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel Pro/1000 Gigabit Ethernet
device igb # Intel Pro/1000 PCIE Gigabit Ethernet
device ixgb # Intel Pro/10Gbe PCI-X Ethernet
-device ixgbe # Intel Pro/10Gbe PCIE Ethernet
+device ix # Intel Pro/10Gbe PCIE Ethernet
+device ixv # Intel Pro/10Gbe PCIE Ethernet VF
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device mxge # Myricom Myri-10G 10GbE NIC
device nxge # Neterion Xframe 10GbE Server/Storage Adapter
diff --git a/sys/conf/files b/sys/conf/files
index a351e9f..f5da73b 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1769,31 +1769,31 @@ iwn6050.fw optional iwn6050fw | iwnfw \
dev/ixgb/if_ixgb.c optional ixgb
dev/ixgb/ixgb_ee.c optional ixgb
dev/ixgb/ixgb_hw.c optional ixgb
-dev/ixgbe/ixgbe.c optional ixgbe inet \
+dev/ixgbe/if_ix.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
-dev/ixgbe/ixv.c optional ixgbe inet \
- compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_phy.c optional ixgbe inet \
+dev/ixgbe/if_ixv.c optional ixv inet \
+ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
+dev/ixgbe/ixgbe_phy.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_api.c optional ixgbe inet \
+dev/ixgbe/ixgbe_api.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_common.c optional ixgbe inet \
+dev/ixgbe/ixgbe_common.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_mbx.c optional ixgbe inet \
+dev/ixgbe/ixgbe_mbx.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_vf.c optional ixgbe inet \
+dev/ixgbe/ixgbe_vf.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_82598.c optional ixgbe inet \
+dev/ixgbe/ixgbe_82598.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_82599.c optional ixgbe inet \
+dev/ixgbe/ixgbe_82599.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_x540.c optional ixgbe inet \
+dev/ixgbe/ixgbe_x540.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_dcb.c optional ixgbe inet \
+dev/ixgbe/ixgbe_dcb.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_dcb_82598.c optional ixgbe inet \
+dev/ixgbe/ixgbe_dcb_82598.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
-dev/ixgbe/ixgbe_dcb_82599.c optional ixgbe inet \
+dev/ixgbe/ixgbe_dcb_82599.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/jme/if_jme.c optional jme pci
dev/joy/joy.c optional joy
diff --git a/sys/dev/ixgbe/LICENSE b/sys/dev/ixgbe/LICENSE
index d446282..394b094 100644
--- a/sys/dev/ixgbe/LICENSE
+++ b/sys/dev/ixgbe/LICENSE
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/if_ix.c
index db202fd..3c16330 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,13 +33,16 @@
/*$FreeBSD$*/
+#ifndef IXGBE_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
+#endif
+
#include "ixgbe.h"
#ifdef RSS
-#include <net/rss_config.h>
+#include <netinet/in_rss.h>
#endif
/*********************************************************************
@@ -50,7 +53,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "2.5.15";
+char ixgbe_driver_version[] = "2.7.4";
/*********************************************************************
* PCI Device ID Table
@@ -87,7 +90,13 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -107,20 +116,14 @@ static int ixgbe_probe(device_t);
static int ixgbe_attach(device_t);
static int ixgbe_detach(device_t);
static int ixgbe_shutdown(device_t);
-#ifdef IXGBE_LEGACY_TX
-static void ixgbe_start(struct ifnet *);
-static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
-#else /* ! IXGBE_LEGACY_TX */
-static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
-static int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
-static void ixgbe_qflush(struct ifnet *);
-static void ixgbe_deferred_mq_start(void *, int);
-#endif /* IXGBE_LEGACY_TX */
static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
static void ixgbe_init(void *);
static void ixgbe_init_locked(struct adapter *);
static void ixgbe_stop(void *);
+#if __FreeBSD_version >= 1100036
static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
+#endif
+static void ixgbe_add_media_types(struct adapter *);
static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
static int ixgbe_media_change(struct ifnet *);
static void ixgbe_identify_hardware(struct adapter *);
@@ -128,49 +131,27 @@ static int ixgbe_allocate_pci_resources(struct adapter *);
static void ixgbe_get_slot_info(struct ixgbe_hw *);
static int ixgbe_allocate_msix(struct adapter *);
static int ixgbe_allocate_legacy(struct adapter *);
-static int ixgbe_allocate_queues(struct adapter *);
static int ixgbe_setup_msix(struct adapter *);
static void ixgbe_free_pci_resources(struct adapter *);
static void ixgbe_local_timer(void *);
static int ixgbe_setup_interface(device_t, struct adapter *);
static void ixgbe_config_link(struct adapter *);
+static void ixgbe_rearm_queues(struct adapter *, u64);
-static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
-static int ixgbe_setup_transmit_structures(struct adapter *);
-static void ixgbe_setup_transmit_ring(struct tx_ring *);
static void ixgbe_initialize_transmit_units(struct adapter *);
-static void ixgbe_free_transmit_structures(struct adapter *);
-static void ixgbe_free_transmit_buffers(struct tx_ring *);
-
-static int ixgbe_allocate_receive_buffers(struct rx_ring *);
-static int ixgbe_setup_receive_structures(struct adapter *);
-static int ixgbe_setup_receive_ring(struct rx_ring *);
static void ixgbe_initialize_receive_units(struct adapter *);
-static void ixgbe_free_receive_structures(struct adapter *);
-static void ixgbe_free_receive_buffers(struct rx_ring *);
-static void ixgbe_setup_hw_rsc(struct rx_ring *);
+static void ixgbe_enable_rx_drop(struct adapter *);
+static void ixgbe_disable_rx_drop(struct adapter *);
static void ixgbe_enable_intr(struct adapter *);
static void ixgbe_disable_intr(struct adapter *);
static void ixgbe_update_stats_counters(struct adapter *);
-static void ixgbe_txeof(struct tx_ring *);
-static bool ixgbe_rxeof(struct ix_queue *);
-static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
static void ixgbe_set_promisc(struct adapter *);
static void ixgbe_set_multi(struct adapter *);
static void ixgbe_update_link_status(struct adapter *);
-static void ixgbe_refresh_mbufs(struct rx_ring *, int);
-static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
static int ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
-static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
- struct ixgbe_dma_alloc *, int);
-static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
-static int ixgbe_tx_ctx_setup(struct tx_ring *,
- struct mbuf *, u32 *, u32 *);
-static int ixgbe_tso_setup(struct tx_ring *,
- struct mbuf *, u32 *, u32 *);
static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
static void ixgbe_configure_ivars(struct adapter *);
static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
@@ -181,13 +162,6 @@ static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
static void ixgbe_add_hw_stats(struct adapter *adapter);
-static __inline void ixgbe_rx_discard(struct rx_ring *, int);
-static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u32);
-
-static void ixgbe_enable_rx_drop(struct adapter *);
-static void ixgbe_disable_rx_drop(struct adapter *);
-
/* Support for pluggable optic modules */
static bool ixgbe_sfp_probe(struct adapter *);
static void ixgbe_setup_optics(struct adapter *);
@@ -206,10 +180,10 @@ static void ixgbe_handle_msf(void *, int);
static void ixgbe_handle_mod(void *, int);
#ifdef IXGBE_FDIR
-static void ixgbe_atr(struct tx_ring *, struct mbuf *);
static void ixgbe_reinit_fdir(void *, int);
#endif
+
/* Missing shared code prototype */
extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
@@ -259,6 +233,7 @@ SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
/* How many packets rxeof tries to clean at a time */
static int ixgbe_rx_process_limit = 256;
+TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&ixgbe_rx_process_limit, 0,
"Maximum number of received packets to process at a time,"
@@ -266,6 +241,7 @@ SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
/* How many packets txeof tries to clean at a time */
static int ixgbe_tx_process_limit = 256;
+TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
&ixgbe_tx_process_limit, 0,
"Maximum number of sent packets to process at a time,"
@@ -320,34 +296,10 @@ SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
static int allow_unsupported_sfp = FALSE;
TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
-/*
-** HW RSC control:
-** this feature only works with
-** IPv4, and only on 82599 and later.
-** Also this will cause IP forwarding to
-** fail and that can't be controlled by
-** the stack as LRO can. For all these
-** reasons I've deemed it best to leave
-** this off and not bother with a tuneable
-** interface, this would need to be compiled
-** to enable.
-*/
-static bool ixgbe_rsc_enable = FALSE;
-
/* Keep running tab on them for sanity check */
static int ixgbe_total_ports;
#ifdef IXGBE_FDIR
-/*
-** For Flow Director: this is the
-** number of TX packets we sample
-** for the filter pool, this means
-** every 20th packet will be probed.
-**
-** This feature can be disabled by
-** setting this to 0.
-*/
-static int atr_sample_rate = 20;
/*
** Flow Director actually 'steals'
** part of the packet buffer as its
@@ -452,11 +404,10 @@ ixgbe_attach(device_t dev)
IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
/* SYSCTL APIs */
-
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
+ adapter, 0, ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@@ -471,7 +422,7 @@ ixgbe_attach(device_t dev)
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
+ adapter, 0, ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@@ -504,7 +455,7 @@ ixgbe_attach(device_t dev)
** system mbuf allocation. Tuning nmbclusters
** can alleviate this.
*/
- if (nmbclusters > 0 ) {
+ if (nmbclusters > 0) {
int s;
s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
if (s > nmbclusters) {
@@ -612,8 +563,9 @@ ixgbe_attach(device_t dev)
*/
ixgbe_get_slot_info(hw);
+
/* Set an initial default flow control value */
- adapter->fc = ixgbe_fc_full;
+ adapter->fc = ixgbe_fc_full;
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -627,6 +579,7 @@ ixgbe_attach(device_t dev)
#endif /* DEV_NETMAP */
INIT_DEBUGOUT("ixgbe_attach: end");
return (0);
+
err_late:
ixgbe_free_transmit_structures(adapter);
ixgbe_free_receive_structures(adapter);
@@ -636,7 +589,6 @@ err_out:
ixgbe_free_pci_resources(adapter);
free(adapter->mta, M_DEVBUF);
return (error);
-
}
/*********************************************************************
@@ -735,219 +687,6 @@ ixgbe_shutdown(device_t dev)
}
-#ifdef IXGBE_LEGACY_TX
-/*********************************************************************
- * Transmit entry point
- *
- * ixgbe_start is called by the stack to initiate a transmit.
- * The driver will remain in this routine as long as there are
- * packets to transmit and transmit resources are available.
- * In case resources are not available stack is notified and
- * the packet is requeued.
- **********************************************************************/
-
-static void
-ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
-{
- struct mbuf *m_head;
- struct adapter *adapter = txr->adapter;
-
- IXGBE_TX_LOCK_ASSERT(txr);
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
- if (!adapter->link_active)
- return;
-
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
- if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
- break;
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
- if (m_head == NULL)
- break;
-
- if (ixgbe_xmit(txr, &m_head)) {
- if (m_head != NULL)
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
-
- /* Set watchdog on */
- txr->watchdog_time = ticks;
- txr->queue_status = IXGBE_QUEUE_WORKING;
-
- }
- return;
-}
-
-/*
- * Legacy TX start - called by the stack, this
- * always uses the first tx ring, and should
- * not be used with multiqueue tx enabled.
- */
-static void
-ixgbe_start(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXGBE_TX_LOCK(txr);
- ixgbe_start_locked(txr, ifp);
- IXGBE_TX_UNLOCK(txr);
- }
- return;
-}
-
-#else /* ! IXGBE_LEGACY_TX */
-
-/*
-** Multiqueue Transmit driver
-**
-*/
-static int
-ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ix_queue *que;
- struct tx_ring *txr;
- int i, err = 0;
-#ifdef RSS
- uint32_t bucket_id;
-#endif
-
- /* Which queue to use */
- /*
- * When doing RSS, map it to the same outbound queue
- * as the incoming flow would be mapped to.
- *
- * If everything is setup correctly, it should be the
- * same bucket that the current CPU we're on is.
- */
- if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
-#ifdef RSS
- if (rss_hash2bucket(m->m_pkthdr.flowid,
- M_HASHTYPE_GET(m), &bucket_id) == 0) {
- /* XXX TODO: spit out something if bucket_id > num_queues? */
- i = bucket_id % adapter->num_queues;
- } else {
-#endif
- i = m->m_pkthdr.flowid % adapter->num_queues;
-#ifdef RSS
- }
-#endif
- } else {
- i = curcpu % adapter->num_queues;
- }
-
- txr = &adapter->tx_rings[i];
- que = &adapter->queues[i];
-
- err = drbr_enqueue(ifp, txr->br, m);
- if (err)
- return (err);
- if (IXGBE_TX_TRYLOCK(txr)) {
- ixgbe_mq_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
- } else
- taskqueue_enqueue(que->tq, &txr->txq_task);
-
- return (0);
-}
-
-static int
-ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct mbuf *next;
- int enqueued = 0, err = 0;
-
- if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
- adapter->link_active == 0)
- return (ENETDOWN);
-
- /* Process the queue */
-#if __FreeBSD_version < 901504
- next = drbr_dequeue(ifp, txr->br);
- while (next != NULL) {
- if ((err = ixgbe_xmit(txr, &next)) != 0) {
- if (next != NULL)
- err = drbr_enqueue(ifp, txr->br, next);
-#else
- while ((next = drbr_peek(ifp, txr->br)) != NULL) {
- if ((err = ixgbe_xmit(txr, &next)) != 0) {
- if (next == NULL) {
- drbr_advance(ifp, txr->br);
- } else {
- drbr_putback(ifp, txr->br, next);
- }
-#endif
- break;
- }
-#if __FreeBSD_version >= 901504
- drbr_advance(ifp, txr->br);
-#endif
- enqueued++;
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
-#if __FreeBSD_version < 901504
- next = drbr_dequeue(ifp, txr->br);
-#endif
- }
-
- if (enqueued > 0) {
- /* Set watchdog on */
- txr->queue_status = IXGBE_QUEUE_WORKING;
- txr->watchdog_time = ticks;
- }
-
- if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
- ixgbe_txeof(txr);
-
- return (err);
-}
-
-/*
- * Called from a taskqueue to drain queued transmit packets.
- */
-static void
-ixgbe_deferred_mq_start(void *arg, int pending)
-{
- struct tx_ring *txr = arg;
- struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
-
- IXGBE_TX_LOCK(txr);
- if (!drbr_empty(ifp, txr->br))
- ixgbe_mq_start_locked(ifp, txr);
- IXGBE_TX_UNLOCK(txr);
-}
-
-/*
-** Flush all ring buffers
-*/
-static void
-ixgbe_qflush(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
- struct mbuf *m;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXGBE_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-}
-#endif /* IXGBE_LEGACY_TX */
-
/*********************************************************************
* Ioctl entry point
*
@@ -961,7 +700,6 @@ static int
ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
{
struct adapter *adapter = ifp->if_softc;
- struct ixgbe_hw *hw = &adapter->hw;
struct ifreq *ifr = (struct ifreq *) data;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
@@ -1067,8 +805,10 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
VLAN_CAPABILITIES(ifp);
break;
}
+#if __FreeBSD_version >= 1100036
case SIOCGI2C:
{
+ struct ixgbe_hw *hw = &adapter->hw;
struct ifi2creq i2c;
int i;
IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
@@ -1090,6 +830,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
break;
}
+#endif
default:
IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
error = ether_ioctl(ifp, command, data);
@@ -1186,15 +927,15 @@ ixgbe_init_locked(struct adapter *adapter)
gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
/* Enable Fan Failure Interrupt */
- gpie |= IXGBE_SDP1_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
/* Add for Module detection */
if (hw->mac.type == ixgbe_mac_82599EB)
- gpie |= IXGBE_SDP2_GPIEN;
+ gpie |= IXGBE_SDP2_GPIEN_BY_MAC(hw);
/* Thermal Failure Detection */
if (hw->mac.type == ixgbe_mac_X540)
- gpie |= IXGBE_SDP0_GPIEN;
+ gpie |= IXGBE_SDP0_GPIEN_BY_MAC(hw);
if (adapter->msix > 1) {
/* Enable Enhanced MSIX mode */
@@ -1328,7 +1069,7 @@ ixgbe_init_locked(struct adapter *adapter)
}
/* Set moderation on the Link interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
/* Config/Enable Link */
ixgbe_config_link(adapter);
@@ -1340,19 +1081,33 @@ ixgbe_init_locked(struct adapter *adapter)
frame = adapter->max_frame_size;
/* Calculate High Water */
- if (hw->mac.type == ixgbe_mac_X540)
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
tmp = IXGBE_DV_X540(frame, frame);
- else
+ break;
+ default:
tmp = IXGBE_DV(frame, frame);
+ break;
+ }
size = IXGBE_BT2KB(tmp);
rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
hw->fc.high_water[0] = rxpb - size;
/* Now calculate Low Water */
- if (hw->mac.type == ixgbe_mac_X540)
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
tmp = IXGBE_LOW_DV_X540(frame);
- else
+ break;
+ default:
tmp = IXGBE_LOW_DV(frame);
+ break;
+ }
hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
hw->fc.requested_mode = adapter->fc;
@@ -1505,10 +1260,10 @@ ixgbe_legacy_irq(void *arg)
/* Check for fan failure */
if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+ (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
"REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* Link status change */
@@ -1625,7 +1380,7 @@ ixgbe_msix_link(void *arg)
struct ixgbe_hw *hw = &adapter->hw;
u32 reg_eicr;
- ++adapter->link_irq;
+ ++adapter->vector_irq;
/* First get the cause */
reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
@@ -1655,32 +1410,40 @@ ixgbe_msix_link(void *arg)
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
} else
- if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
- } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+ if (ixgbe_is_sfp(hw)) {
+ if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
+ taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+ } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
+ taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+ }
}
}
/* Check for fan failure */
if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
- (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+ (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
"REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* Check for over temp condition */
- if ((hw->mac.type == ixgbe_mac_X540) &&
- (reg_eicr & IXGBE_EICR_TS)) {
- device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
- "PHY IS SHUT DOWN!!\n");
- device_printf(adapter->dev, "System shutdown required\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_a:
+ if (reg_eicr & IXGBE_EICR_TS) {
+ device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
+ "PHY IS SHUT DOWN!!\n");
+ device_printf(adapter->dev, "System shutdown required\n");
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+ }
+ break;
+ default:
+ /* Other MACs have no thermal sensor interrupt */
+ break;
}
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -1700,6 +1463,7 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
{
struct adapter *adapter = ifp->if_softc;
struct ixgbe_hw *hw = &adapter->hw;
+ int layer;
INIT_DEBUGOUT("ixgbe_media_status: begin");
IXGBE_CORE_LOCK(adapter);
@@ -1714,29 +1478,98 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
}
ifmr->ifm_status |= IFM_ACTIVE;
+ layer = ixgbe_get_supported_physical_layer(hw);
- /*
- * Not all NIC are 1000baseSX as an example X540T.
- * We must set properly the media based on NIC model.
- */
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- if (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL)
- ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
- else if (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL)
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
+ layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
+ layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
- else if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)
- ifmr->ifm_active |= adapter->optics | IFM_FDX;
- break;
- default:
- if (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL)
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
- else if (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL)
+ break;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
+ layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
+ break;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
+ break;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
+ break;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
+ layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
- else if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)
- ifmr->ifm_active |= adapter->optics | IFM_FDX;
- break;
- }
+ break;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
+ break;
+ }
+ /*
+ ** XXX: These need to use the proper media types once
+ ** they're added.
+ */
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
+ break;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
+ || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10_2 | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
+ break;
+ }
+
+ /* If nothing is recognized... */
+ if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
+ ifmr->ifm_active |= IFM_UNKNOWN;
+
+#if __FreeBSD_version >= 900025
+ /* Flow control setting */
+ if (adapter->fc == ixgbe_fc_rx_pause || adapter->fc == ixgbe_fc_full)
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+ if (adapter->fc == ixgbe_fc_tx_pause || adapter->fc == ixgbe_fc_full)
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+#endif
IXGBE_CORE_UNLOCK(adapter);
@@ -1756,185 +1589,59 @@ ixgbe_media_change(struct ifnet * ifp)
{
struct adapter *adapter = ifp->if_softc;
struct ifmedia *ifm = &adapter->media;
+ struct ixgbe_hw *hw = &adapter->hw;
+ ixgbe_link_speed speed = 0;
INIT_DEBUGOUT("ixgbe_media_change: begin");
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
- switch (IFM_SUBTYPE(ifm->ifm_media)) {
- case IFM_10G_T:
- case IFM_AUTO:
- adapter->hw.phy.autoneg_advertised =
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_10GB_FULL;
- break;
- default:
- device_printf(adapter->dev, "Only auto media type\n");
- return (EINVAL);
- }
-
- return (0);
-}
-
-/*********************************************************************
- *
- * This routine maps the mbufs to tx descriptors, allowing the
- * TX engine to transmit the packets.
- * - return 0 on success, positive on failure
- *
- **********************************************************************/
-
-static int
-ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
-{
- struct adapter *adapter = txr->adapter;
- u32 olinfo_status = 0, cmd_type_len;
- int i, j, error, nsegs;
- int first;
- bool remap = TRUE;
- struct mbuf *m_head;
- bus_dma_segment_t segs[adapter->num_segs];
- bus_dmamap_t map;
- struct ixgbe_tx_buf *txbuf;
- union ixgbe_adv_tx_desc *txd = NULL;
-
- m_head = *m_headp;
-
- /* Basic descriptor defines */
- cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
-
- if (m_head->m_flags & M_VLANTAG)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
-
- /*
- * Important to capture the first descriptor
- * used because it will contain the index of
- * the one we tell the hardware to report back
- */
- first = txr->next_avail_desc;
- txbuf = &txr->tx_buffers[first];
- map = txbuf->map;
-
- /*
- * Map the packet for DMA.
- */
-retry:
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (__predict_false(error)) {
- struct mbuf *m;
-
- switch (error) {
- case EFBIG:
- /* Try it again? - one try */
- if (remap == TRUE) {
- remap = FALSE;
- m = m_defrag(*m_headp, M_NOWAIT);
- if (m == NULL) {
- adapter->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
- goto retry;
- } else
- return (error);
- case ENOMEM:
- txr->no_tx_dma_setup++;
- return (error);
- default:
- txr->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- }
-
- /* Make certain there are enough descriptors */
- if (nsegs > txr->tx_avail - 2) {
- txr->no_desc_avail++;
- bus_dmamap_unload(txr->txtag, map);
- return (ENOBUFS);
- }
- m_head = *m_headp;
-
/*
- ** Set up the appropriate offload context
- ** this will consume the first descriptor
+ ** We don't actually need to check against the supported
+ ** media types of the adapter; ifmedia will take care of
+ ** that for us.
+ ** NOTE: this relies on falling thru the switch
+ ** to get all the values set, it can be confusing.
*/
- error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
- if (__predict_false(error)) {
- if (error == ENOBUFS)
- *m_headp = NULL;
- return (error);
- }
-
-#ifdef IXGBE_FDIR
- /* Do the flow director magic */
- if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
- ++txr->atr_count;
- if (txr->atr_count >= atr_sample_rate) {
- ixgbe_atr(txr, m_head);
- txr->atr_count = 0;
- }
- }
-#endif
-
- i = txr->next_avail_desc;
- for (j = 0; j < nsegs; j++) {
- bus_size_t seglen;
- bus_addr_t segaddr;
-
- txbuf = &txr->tx_buffers[i];
- txd = &txr->tx_base[i];
- seglen = segs[j].ds_len;
- segaddr = htole64(segs[j].ds_addr);
-
- txd->read.buffer_addr = segaddr;
- txd->read.cmd_type_len = htole32(txr->txd_cmd |
- cmd_type_len |seglen);
- txd->read.olinfo_status = htole32(olinfo_status);
-
- if (++i == txr->num_desc)
- i = 0;
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ case IFM_10G_T:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ case IFM_10G_LRM:
+ case IFM_10G_SR: /* KR, too */
+ case IFM_10G_LR:
+ case IFM_10G_CX4: /* KX4 for now */
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ case IFM_10G_TWINAX:
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IFM_1000_T:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ case IFM_1000_LX:
+ case IFM_1000_SX:
+ case IFM_1000_CX: /* KX until there's real support */
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IFM_100_TX:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ goto invalid;
}
- txd->read.cmd_type_len |=
- htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
- txr->tx_avail -= nsegs;
- txr->next_avail_desc = i;
-
- txbuf->m_head = m_head;
- /*
- ** Here we swap the map so the last descriptor,
- ** which gets the completion interrupt has the
- ** real map, and the first descriptor gets the
- ** unused map from this descriptor.
- */
- txr->tx_buffers[first].map = txbuf->map;
- txbuf->map = map;
- bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the EOP descriptor that will be marked done */
- txbuf = &txr->tx_buffers[first];
- txbuf->eop = txd;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
+ hw->mac.autotry_restart = TRUE;
+ hw->mac.ops.setup_link(hw, speed, TRUE);
+ adapter->advertise =
+ ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
+ ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
+ ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
return (0);
+invalid:
+ device_printf(adapter->dev, "Invalid media type\n");
+ return (EINVAL);
}
static void
@@ -2083,8 +1790,8 @@ ixgbe_local_timer(void *arg)
struct adapter *adapter = arg;
device_t dev = adapter->dev;
struct ix_queue *que = adapter->queues;
- struct tx_ring *txr = adapter->tx_rings;
- int hung = 0, paused = 0;
+ u64 queues = 0;
+ int hung = 0;
mtx_assert(&adapter->core_mtx, MA_OWNED);
@@ -2097,26 +1804,44 @@ ixgbe_local_timer(void *arg)
ixgbe_update_stats_counters(adapter);
/*
- * If the interface has been paused
- * then don't do the watchdog check
- */
- if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
- paused = 1;
-
- /*
** Check the TX queues status
+ ** - mark hung queues so we don't schedule on them
** - watchdog only if all queues show hung
*/
- for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
- if ((txr->queue_status == IXGBE_QUEUE_HUNG) &&
- (paused == 0))
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ /* Keep track of queues with work for soft irq */
+ if (que->txr->busy)
+ queues |= ((u64)1 << que->me);
+ /*
+ ** Each time txeof runs without cleaning, but there
+ ** are uncleaned descriptors it increments busy. If
+ ** we get to the MAX we declare it hung.
+ */
+ if (que->busy == IXGBE_QUEUE_HUNG) {
++hung;
- else if (txr->queue_status == IXGBE_QUEUE_WORKING)
- taskqueue_enqueue(que->tq, &txr->txq_task);
- }
- /* Only truely watchdog if all queues show hung */
- if (hung == adapter->num_queues)
- goto watchdog;
+ /* Mark the queue as inactive */
+ adapter->active_queues &= ~((u64)1 << que->me);
+ continue;
+ } else {
+ /* Check if we've come back from hung */
+ if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
+ adapter->active_queues |= ((u64)1 << que->me);
+ }
+ if (que->busy >= IXGBE_MAX_TX_BUSY) {
+ device_printf(dev,"Warning queue %d "
+ "appears to be hung!\n", i);
+ que->txr->busy = IXGBE_QUEUE_HUNG;
+ ++hung;
+ }
+
+ }
+
+ /* Only truly watchdog if all queues show hung */
+ if (hung == adapter->num_queues)
+ goto watchdog;
+ else if (queues != 0) { /* Force an IRQ on queues with work */
+ ixgbe_rearm_queues(adapter, queues);
+ }
out:
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
@@ -2124,12 +1849,6 @@ out:
watchdog:
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
- IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
- IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
- device_printf(dev,"TX(%d) desc avail = %d,"
- "Next TX to Clean = %d\n",
- txr->me, txr->tx_avail, txr->next_to_clean);
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
ixgbe_init_locked(adapter);
@@ -2234,6 +1953,11 @@ ixgbe_identify_hardware(struct adapter *adapter)
hw->subsystem_device_id =
pci_read_config(dev, PCIR_SUBDEV_0, 2);
+ /*
+ ** Make sure BUSMASTER is set
+ */
+ pci_enable_busmaster(dev);
+
/* We need this here to set the num_segs below */
ixgbe_set_mac_type(hw);
@@ -2367,7 +2091,7 @@ ixgbe_allocate_legacy(struct adapter *adapter)
return (error);
}
/* For simplicity in the handlers */
- adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
+ adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
return (0);
}
@@ -2386,9 +2110,6 @@ ixgbe_allocate_msix(struct adapter *adapter)
struct tx_ring *txr = adapter->tx_rings;
int error, rid, vector = 0;
int cpu_id = 0;
-#ifdef RSS
- cpuset_t cpu_mask;
-#endif
#ifdef RSS
/*
@@ -2436,7 +2157,7 @@ ixgbe_allocate_msix(struct adapter *adapter)
bus_describe_intr(dev, que->res, que->tag, "que %d", i);
#endif
que->msix = vector;
- adapter->que_mask |= (u64)(1 << que->msix);
+ adapter->active_queues |= (u64)(1 << que->msix);
#ifdef RSS
/*
* The queue ID is used as the RSS layer bucket ID.
@@ -2463,10 +2184,12 @@ ixgbe_allocate_msix(struct adapter *adapter)
"Bound RSS bucket %d to CPU %d\n",
i, cpu_id);
#else
+#if 0 // This is too noisy
device_printf(dev,
"Bound queue %d to cpu %d\n",
i, cpu_id);
#endif
+#endif
#ifndef IXGBE_LEGACY_TX
@@ -2476,9 +2199,8 @@ ixgbe_allocate_msix(struct adapter *adapter)
que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
#ifdef RSS
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask,
+ taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
+ cpu_id,
"%s (bucket %d)",
device_get_nameunit(adapter->dev),
cpu_id);
@@ -2509,7 +2231,7 @@ ixgbe_allocate_msix(struct adapter *adapter)
#if __FreeBSD_version >= 800504
bus_describe_intr(dev, adapter->res, adapter->tag, "link");
#endif
- adapter->linkvec = vector;
+ adapter->vector = vector;
/* Tasklets for Link, SFP and Multispeed Fiber */
TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
@@ -2560,16 +2282,15 @@ ixgbe_setup_msix(struct adapter *adapter)
/* Figure out a reasonable auto config value */
queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
- /* Override based on tuneable */
- if (ixgbe_num_queues != 0)
- queues = ixgbe_num_queues;
-
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
+ if (ixgbe_num_queues != 0)
+ queues = ixgbe_num_queues;
+
/* reflect correct sysctl value */
ixgbe_num_queues = queues;
@@ -2687,8 +2408,8 @@ ixgbe_free_pci_resources(struct adapter * adapter)
/* Clean the Legacy or Link interrupt last */
- if (adapter->linkvec) /* we are doing MSIX */
- rid = adapter->linkvec + 1;
+ if (adapter->vector) /* we are doing MSIX */
+ rid = adapter->vector + 1;
else
(adapter->msix != 0) ? (rid = 1):(rid = 0);
@@ -2722,7 +2443,6 @@ mem:
static int
ixgbe_setup_interface(device_t dev, struct adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct ifnet *ifp;
INIT_DEBUGOUT("ixgbe_setup_interface: begin");
@@ -2738,7 +2458,9 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_softc = adapter;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixgbe_ioctl;
- ifp->if_get_counter = ixgbe_get_counter;
+#if __FreeBSD_version >= 1100036
+ if_setgetcounterfn(ifp, ixgbe_get_counter);
+#endif
#ifndef IXGBE_LEGACY_TX
ifp->if_transmit = ixgbe_mq_start;
ifp->if_qflush = ixgbe_qflush;
@@ -2783,19 +2505,84 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
* callbacks to update media and link information
*/
ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
- ixgbe_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
+ ixgbe_media_status);
+
+ ixgbe_add_media_types(adapter);
+
+ /* Autoselect media by default */
+ ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+ return (0);
+}
+
+static void
+ixgbe_add_media_types(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ device_t dev = adapter->dev;
+ int layer;
+
+ layer = ixgbe_get_supported_physical_layer(hw);
+
+ /* Media types with matching FreeBSD media defines */
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+ if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
+ layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+#if 0
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+#endif
+
+ /*
+ ** Other (no matching FreeBSD media type):
+ ** To workaround this, we'll assign these completely
+ ** inappropriate media types.
+ */
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
+ device_printf(dev, "Media supported: 10GbaseKR\n");
+ device_printf(dev, "10GbaseKR mapped to 10baseT\n");
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
+ device_printf(dev, "Media supported: 10GbaseKX4\n");
+ device_printf(dev, "10GbaseKX4 mapped to 10base2\n");
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_2, 0, NULL);
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
+ device_printf(dev, "Media supported: 1000baseKX\n");
+ device_printf(dev, "1000baseKX mapped to 10base5\n");
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_5, 0, NULL);
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
+ /* Someday, someone will care about you... */
+ device_printf(dev, "Media supported: 1000baseBX\n");
+ }
+
+ /* Very old */
if (hw->device_id == IXGBE_DEV_ID_82598AT) {
ifmedia_add(&adapter->media,
IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
ifmedia_add(&adapter->media,
IFM_ETHER | IFM_1000_T, 0, NULL);
}
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
- return (0);
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
}
static void
@@ -2834,392 +2621,10 @@ out:
return;
}
-/********************************************************************
- * Manage DMA'able memory.
- *******************************************************************/
-static void
-ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
-{
- if (error)
- return;
- *(bus_addr_t *) arg = segs->ds_addr;
- return;
-}
-
-static int
-ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
- struct ixgbe_dma_alloc *dma, int mapflags)
-{
- device_t dev = adapter->dev;
- int r;
-
- r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
- DBA_ALIGN, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- size, /* maxsize */
- 1, /* nsegments */
- size, /* maxsegsize */
- BUS_DMA_ALLOCNOW, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &dma->dma_tag);
- if (r != 0) {
- device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
- "error %u\n", r);
- goto fail_0;
- }
- r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
- BUS_DMA_NOWAIT, &dma->dma_map);
- if (r != 0) {
- device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
- "error %u\n", r);
- goto fail_1;
- }
- r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
- size,
- ixgbe_dmamap_cb,
- &dma->dma_paddr,
- mapflags | BUS_DMA_NOWAIT);
- if (r != 0) {
- device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
- "error %u\n", r);
- goto fail_2;
- }
- dma->dma_size = size;
- return (0);
-fail_2:
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-fail_1:
- bus_dma_tag_destroy(dma->dma_tag);
-fail_0:
- dma->dma_tag = NULL;
- return (r);
-}
-
-static void
-ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
-{
- bus_dmamap_sync(dma->dma_tag, dma->dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(dma->dma_tag, dma->dma_map);
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- bus_dma_tag_destroy(dma->dma_tag);
-}
-
/*********************************************************************
*
- * Allocate memory for the transmit and receive rings, and then
- * the descriptors associated with each, called only once at attach.
- *
- **********************************************************************/
-static int
-ixgbe_allocate_queues(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize, error = IXGBE_SUCCESS;
- int txconf = 0, rxconf = 0;
-
- /* First allocate the top level queue structs */
- if (!(adapter->queues =
- (struct ix_queue *) malloc(sizeof(struct ix_queue) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* First allocate the TX ring struct memory */
- if (!(adapter->tx_rings =
- (struct tx_ring *) malloc(sizeof(struct tx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate TX ring memory\n");
- error = ENOMEM;
- goto tx_fail;
- }
-
- /* Next allocate the RX */
- if (!(adapter->rx_rings =
- (struct rx_ring *) malloc(sizeof(struct rx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate RX ring memory\n");
- error = ENOMEM;
- goto rx_fail;
- }
-
- /* For the ring itself */
- tsize = roundup2(adapter->num_tx_desc *
- sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
-
- /*
- * Now set up the TX queues, txconf is needed to handle the
- * possibility that things fail midcourse and we need to
- * undo memory gracefully
- */
- for (int i = 0; i < adapter->num_queues; i++, txconf++) {
- /* Set up some basics */
- txr = &adapter->tx_rings[i];
- txr->adapter = adapter;
- txr->me = i;
- txr->num_desc = adapter->num_tx_desc;
-
- /* Initialize the TX side lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), txr->me);
- mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
-
- if (ixgbe_dma_malloc(adapter, tsize,
- &txr->txdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
- bzero((void *)txr->tx_base, tsize);
-
- /* Now allocate transmit buffers for the ring */
- if (ixgbe_allocate_transmit_buffers(txr)) {
- device_printf(dev,
- "Critical Failure setting up transmit buffers\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#ifndef IXGBE_LEGACY_TX
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
- M_WAITOK, &txr->tx_mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up buf ring\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#endif
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
- rxr = &adapter->rx_rings[i];
- /* Set up some basics */
- rxr->adapter = adapter;
- rxr->me = i;
- rxr->num_desc = adapter->num_rx_desc;
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), rxr->me);
- mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (ixgbe_dma_malloc(adapter, rsize,
- &rxr->rxdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate RxDescriptor memory\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
- bzero((void *)rxr->rx_base, rsize);
-
- /* Allocate receive buffers for the ring*/
- if (ixgbe_allocate_receive_buffers(rxr)) {
- device_printf(dev,
- "Critical Failure setting up receive buffers\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- }
-
- /*
- ** Finally set up the queue holding structs
- */
- for (int i = 0; i < adapter->num_queues; i++) {
- que = &adapter->queues[i];
- que->adapter = adapter;
- que->txr = &adapter->tx_rings[i];
- que->rxr = &adapter->rx_rings[i];
- }
-
- return (0);
-
-err_rx_desc:
- for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
- ixgbe_dma_free(adapter, &rxr->rxdma);
-err_tx_desc:
- for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
- ixgbe_dma_free(adapter, &txr->txdma);
- free(adapter->rx_rings, M_DEVBUF);
-rx_fail:
- free(adapter->tx_rings, M_DEVBUF);
-tx_fail:
- free(adapter->queues, M_DEVBUF);
-fail:
- return (error);
-}
-
-/*********************************************************************
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-static int
-ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- device_t dev = adapter->dev;
- struct ixgbe_tx_buf *txbuf;
- int error, i;
-
- /*
- * Setup DMA descriptor areas.
- */
- if ((error = bus_dma_tag_create(
- bus_get_dma_tag(adapter->dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXGBE_TSO_SIZE, /* maxsize */
- adapter->num_segs, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->txtag))) {
- device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- if (!(txr->tx_buffers =
- (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer dma maps */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-
- return 0;
-fail:
- /* We free all, it handles case where we are in the middle */
- ixgbe_free_transmit_structures(adapter);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize a transmit ring.
- *
- **********************************************************************/
-static void
-ixgbe_setup_transmit_ring(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *txbuf;
- int i;
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(adapter->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
-
- /* Clear the old ring contents */
- IXGBE_TX_LOCK(txr);
-#ifdef DEV_NETMAP
- /*
- * (under lock): if in netmap mode, do some consistency
- * checks and set slot to entry 0 of the netmap ring.
- */
- slot = netmap_reset(na, NR_TX, txr->me, 0);
-#endif /* DEV_NETMAP */
- bzero((void *)txr->tx_base,
- (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
- /* Reset indices */
- txr->next_avail_desc = 0;
- txr->next_to_clean = 0;
-
- /* Free any existing tx buffers. */
- txbuf = txr->tx_buffers;
- for (i = 0; i < txr->num_desc; i++, txbuf++) {
- if (txbuf->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, txbuf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, txbuf->map);
- m_freem(txbuf->m_head);
- txbuf->m_head = NULL;
- }
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, set the map for the packet buffer.
- * NOTE: Some drivers (not this one) also need to set
- * the physical buffer address in the NIC ring.
- * Slots in the netmap ring (indexed by "si") are
- * kring->nkr_hwofs positions "ahead" wrt the
- * corresponding slot in the NIC ring. In some drivers
- * (not here) nkr_hwofs can be negative. Function
- * netmap_idx_n2k() handles wraparounds properly.
- */
- if (slot) {
- int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
- netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
- }
-#endif /* DEV_NETMAP */
- /* Clear the EOP descriptor pointer */
- txbuf->eop = NULL;
- }
-
-#ifdef IXGBE_FDIR
- /* Set the rate at which we sample packets */
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- txr->atr_sample = atr_sample_rate;
-#endif
-
- /* Set number of descriptors available */
- txr->tx_avail = adapter->num_tx_desc;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXGBE_TX_UNLOCK(txr);
-}
-
-/*********************************************************************
- *
- * Initialize all transmit rings.
- *
- **********************************************************************/
-static int
-ixgbe_setup_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- ixgbe_setup_transmit_ring(txr);
-
- return (0);
-}
-
-/*********************************************************************
- *
- * Enable transmit unit.
+ * Enable transmit units.
*
**********************************************************************/
static void
@@ -3232,7 +2637,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
for (int i = 0; i < adapter->num_queues; i++, txr++) {
u64 tdba = txr->txdma.dma_paddr;
- u32 txctrl;
+ u32 txctrl = 0;
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
(tdba & 0x00000000ffffffffULL));
@@ -3244,9 +2649,8 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
- /* Setup Transmit Descriptor Cmd Settings */
- txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
- txr->queue_status = IXGBE_QUEUE_IDLE;
+ /* Cache the tail address */
+ txr->tail = IXGBE_TDT(txr->me);
/* Set the processing limit */
txr->process_limit = ixgbe_tx_process_limit;
@@ -3293,933 +2697,6 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
return;
}
-/*********************************************************************
- *
- * Free all transmit rings.
- *
- **********************************************************************/
-static void
-ixgbe_free_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXGBE_TX_LOCK(txr);
- ixgbe_free_transmit_buffers(txr);
- ixgbe_dma_free(adapter, &txr->txdma);
- IXGBE_TX_UNLOCK(txr);
- IXGBE_TX_LOCK_DESTROY(txr);
- }
- free(adapter->tx_rings, M_DEVBUF);
-}
-
-/*********************************************************************
- *
- * Free transmit ring related data structures.
- *
- **********************************************************************/
-static void
-ixgbe_free_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_tx_buf *tx_buffer;
- int i;
-
- INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
-
- if (txr->tx_buffers == NULL)
- return;
-
- tx_buffer = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- } else if (tx_buffer->map != NULL) {
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- }
-#ifdef IXGBE_LEGACY_TX
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
-#endif
- if (txr->tx_buffers != NULL) {
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
- if (txr->txtag != NULL) {
- bus_dma_tag_destroy(txr->txtag);
- txr->txtag = NULL;
- }
- return;
-}
-
-/*********************************************************************
- *
- * Advanced Context Descriptor setup for VLAN, CSUM or TSO
- *
- **********************************************************************/
-
-static int
-ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
- u32 *cmd_type_len, u32 *olinfo_status)
-{
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- int ehdrlen, ip_hlen = 0;
- u16 etype;
- u8 ipproto = 0;
- int offload = TRUE;
- int ctxd = txr->next_avail_desc;
- u16 vtag = 0;
-
- /* First check if TSO is to be used */
- if (mp->m_pkthdr.csum_flags & CSUM_TSO)
- return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
-
- if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
- offload = FALSE;
-
- /* Indicate the whole packet as payload when not doing TSO */
- *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
-
- /* Now ready a context descriptor */
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- /*
- ** In advanced descriptors the vlan tag must
- ** be placed into the context descriptor. Hence
- ** we need to make one even if not doing offloads.
- */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- } else if (offload == FALSE) /* ... no offload to do */
- return (0);
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Set the ether header length */
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
-
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
- ipproto = ip->ip_p;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- break;
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr);
- /* XXX-BZ this will go badly in case of ext hdrs. */
- ipproto = ip6->ip6_nxt;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
- default:
- offload = FALSE;
- break;
- }
-
- vlan_macip_lens |= ip_hlen;
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
-
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & CSUM_UDP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- break;
-
-#if __FreeBSD_version >= 800000
- case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
-#endif
- default:
- offload = FALSE;
- break;
- }
-
- if (offload) /* For the TX descriptor setup */
- *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-
- /* Now copy bits into descriptor */
- TXD->vlan_macip_lens = htole32(vlan_macip_lens);
- TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
- TXD->seqnum_seed = htole32(0);
- TXD->mss_l4len_idx = htole32(0);
-
- /* We've consumed the first desc, adjust counters */
- if (++ctxd == txr->num_desc)
- ctxd = 0;
- txr->next_avail_desc = ctxd;
- --txr->tx_avail;
-
- return (0);
-}
-
-/**********************************************************************
- *
- * Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors
- *
- **********************************************************************/
-static int
-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
- u32 *cmd_type_len, u32 *olinfo_status)
-{
- struct ixgbe_adv_tx_context_desc *TXD;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- u32 mss_l4len_idx = 0, paylen;
- u16 vtag = 0, eh_type;
- int ctxd, ehdrlen, ip_hlen, tcp_hlen;
- struct ether_vlan_header *eh;
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
-#ifdef INET
- struct ip *ip;
-#endif
- struct tcphdr *th;
-
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- eh_type = eh->evl_proto;
- } else {
- ehdrlen = ETHER_HDR_LEN;
- eh_type = eh->evl_encap_proto;
- }
-
- switch (ntohs(eh_type)) {
-#ifdef INET6
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- /* XXX-BZ For now we do not pretend to support ext. hdrs. */
- if (ip6->ip6_nxt != IPPROTO_TCP)
- return (ENXIO);
- ip_hlen = sizeof(struct ip6_hdr);
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
- th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
-#endif
-#ifdef INET
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return (ENXIO);
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- /* Tell transmit desc to also do IPv4 checksum. */
- *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
- break;
-#endif
- default:
- panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(eh_type));
- break;
- }
-
- ctxd = txr->next_avail_desc;
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- tcp_hlen = th->th_off << 2;
-
- /* This is used in the transmit desc in encap */
- paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
-
- /* VLAN MACLEN IPLEN */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- }
-
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= ip_hlen;
- TXD->vlan_macip_lens = htole32(vlan_macip_lens);
-
- /* ADV DTYPE TUCMD */
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
- TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
- TXD->seqnum_seed = htole32(0);
-
- if (++ctxd == txr->num_desc)
- ctxd = 0;
-
- txr->tx_avail--;
- txr->next_avail_desc = ctxd;
- *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
- *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
- ++txr->tso_tx;
- return (0);
-}
-
-#ifdef IXGBE_FDIR
-/*
-** This routine parses packet headers so that Flow
-** Director can make a hashed filter table entry
-** allowing traffic flows to be identified and kept
-** on the same cpu. This would be a performance
-** hit, but we only do it at IXGBE_FDIR_RATE of
-** packets.
-*/
-static void
-ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
-{
- struct adapter *adapter = txr->adapter;
- struct ix_queue *que;
- struct ip *ip;
- struct tcphdr *th;
- struct udphdr *uh;
- struct ether_vlan_header *eh;
- union ixgbe_atr_hash_dword input = {.dword = 0};
- union ixgbe_atr_hash_dword common = {.dword = 0};
- int ehdrlen, ip_hlen;
- u16 etype;
-
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- etype = eh->evl_proto;
- } else {
- ehdrlen = ETHER_HDR_LEN;
- etype = eh->evl_encap_proto;
- }
-
- /* Only handling IPv4 */
- if (etype != htons(ETHERTYPE_IP))
- return;
-
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
-
- /* check if we're UDP or TCP */
- switch (ip->ip_p) {
- case IPPROTO_TCP:
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- /* src and dst are inverted */
- common.port.dst ^= th->th_sport;
- common.port.src ^= th->th_dport;
- input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
- break;
- case IPPROTO_UDP:
- uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
- /* src and dst are inverted */
- common.port.dst ^= uh->uh_sport;
- common.port.src ^= uh->uh_dport;
- input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
- break;
- default:
- return;
- }
-
- input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
- if (mp->m_pkthdr.ether_vtag)
- common.flex_bytes ^= htons(ETHERTYPE_VLAN);
- else
- common.flex_bytes ^= etype;
- common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
-
- que = &adapter->queues[txr->me];
- /*
- ** This assumes the Rx queue and Tx
- ** queue are bound to the same CPU
- */
- ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
- input, common, que->msix);
-}
-#endif /* IXGBE_FDIR */
-
-/**********************************************************************
- *
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
- *
- **********************************************************************/
-static void
-ixgbe_txeof(struct tx_ring *txr)
-{
-#ifdef DEV_NETMAP
- struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
-#endif
- u32 work, processed = 0;
- u16 limit = txr->process_limit;
- struct ixgbe_tx_buf *buf;
- union ixgbe_adv_tx_desc *txd;
-
- mtx_assert(&txr->tx_mtx, MA_OWNED);
-
-#ifdef DEV_NETMAP
- if (ifp->if_capenable & IFCAP_NETMAP) {
- struct netmap_adapter *na = NA(ifp);
- struct netmap_kring *kring = &na->tx_rings[txr->me];
- txd = txr->tx_base;
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- /*
- * In netmap mode, all the work is done in the context
- * of the client thread. Interrupt handlers only wake up
- * clients, which may be sleeping on individual rings
- * or on a global resource for all rings.
- * To implement tx interrupt mitigation, we wake up the client
- * thread roughly every half ring, even if the NIC interrupts
- * more frequently. This is implemented as follows:
- * - ixgbe_txsync() sets kring->nr_kflags with the index of
- * the slot that should wake up the thread (nkr_num_slots
- * means the user thread should not be woken up);
- * - the driver ignores tx interrupts unless netmap_mitigate=0
- * or the slot has the DD bit set.
- */
- if (!netmap_mitigate ||
- (kring->nr_kflags < kring->nkr_num_slots &&
- txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
- netmap_tx_irq(ifp, txr->me);
- }
- return;
- }
-#endif /* DEV_NETMAP */
-
- if (txr->tx_avail == txr->num_desc) {
- txr->queue_status = IXGBE_QUEUE_IDLE;
- return;
- }
-
- /* Get work starting point */
- work = txr->next_to_clean;
- buf = &txr->tx_buffers[work];
- txd = &txr->tx_base[work];
- work -= txr->num_desc; /* The distance to ring end */
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
-
- do {
- union ixgbe_adv_tx_desc *eop= buf->eop;
- if (eop == NULL) /* No work */
- break;
-
- if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
- break; /* I/O not complete */
-
- if (buf->m_head) {
- txr->bytes +=
- buf->m_head->m_pkthdr.len;
- bus_dmamap_sync(txr->txtag,
- buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- buf->map = NULL;
- }
- buf->eop = NULL;
- ++txr->tx_avail;
-
- /* We clean the range if multi segment */
- while (txd != eop) {
- ++txd;
- ++buf;
- ++work;
- /* wrap the ring? */
- if (__predict_false(!work)) {
- work -= txr->num_desc;
- buf = txr->tx_buffers;
- txd = txr->tx_base;
- }
- if (buf->m_head) {
- txr->bytes +=
- buf->m_head->m_pkthdr.len;
- bus_dmamap_sync(txr->txtag,
- buf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- buf->map);
- m_freem(buf->m_head);
- buf->m_head = NULL;
- buf->map = NULL;
- }
- ++txr->tx_avail;
- buf->eop = NULL;
-
- }
- ++txr->packets;
- ++processed;
- txr->watchdog_time = ticks;
-
- /* Try the next packet */
- ++txd;
- ++buf;
- ++work;
- /* reset with a wrap */
- if (__predict_false(!work)) {
- work -= txr->num_desc;
- buf = txr->tx_buffers;
- txd = txr->tx_base;
- }
- prefetch(txd);
- } while (__predict_true(--limit));
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- work += txr->num_desc;
- txr->next_to_clean = work;
-
- /*
- ** Watchdog calculation, we know there's
- ** work outstanding or the first return
- ** would have been taken, so none processed
- ** for too long indicates a hang.
- */
- if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
- txr->queue_status = IXGBE_QUEUE_HUNG;
-
- if (txr->tx_avail == txr->num_desc)
- txr->queue_status = IXGBE_QUEUE_IDLE;
-
- return;
-}
-
-/*********************************************************************
- *
- * Refresh mbuf buffers for RX descriptor rings
- * - now keeps its own state so discards due to resource
- * exhaustion are unnecessary, if an mbuf cannot be obtained
- * it just returns, keeping its placeholder, thus it can simply
- * be recalled to try again.
- *
- **********************************************************************/
-static void
-ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
-{
- struct adapter *adapter = rxr->adapter;
- bus_dma_segment_t seg[1];
- struct ixgbe_rx_buf *rxbuf;
- struct mbuf *mp;
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_to_refresh;
- /* Control the loop with one beyond */
- if (++j == rxr->num_desc)
- j = 0;
-
- while (j != limit) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->buf == NULL) {
- mp = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, rxr->mbuf_sz);
- if (mp == NULL)
- goto update;
- if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(mp, ETHER_ALIGN);
- } else
- mp = rxbuf->buf;
-
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
-
- /* If we're dealing with an mbuf that was copied rather
- * than replaced, there's no need to go through busdma.
- */
- if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("Refresh mbufs: payload dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- rxbuf->buf = NULL;
- goto update;
- }
- rxbuf->buf = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
- htole64(seg[0].ds_addr);
- } else {
- rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
- rxbuf->flags &= ~IXGBE_RX_COPY;
- }
-
- refreshed = TRUE;
- /* Next is precalculated */
- i = j;
- rxr->next_to_refresh = i;
- if (++j == rxr->num_desc)
- j = 0;
- }
-update:
- if (refreshed) /* Update hardware tail index */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_RDT(rxr->me), rxr->next_to_refresh);
- return;
-}
-
-/*********************************************************************
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per received packet, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've allocated.
- *
- **********************************************************************/
-static int
-ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- device_t dev = adapter->dev;
- struct ixgbe_rx_buf *rxbuf;
- int i, bsize, error;
-
- bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
- if (!(rxr->rx_buffers =
- (struct ixgbe_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MJUM16BYTES, /* maxsize */
- 1, /* nsegments */
- MJUM16BYTES, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->ptag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
- rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->ptag,
- BUS_DMA_NOWAIT, &rxbuf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX dma map\n");
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- /* Frees all, but can handle partial completion */
- ixgbe_free_receive_structures(adapter);
- return (error);
-}
-
-/*
-** Used to detect a descriptor that has
-** been merged by Hardware RSC.
-*/
-static inline u32
-ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
-{
- return (le32toh(rx->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
-}
-
-/*********************************************************************
- *
- * Initialize Hardware RSC (LRO) feature on 82599
- * for an RX ring, this is toggled by the LRO capability
- * even though it is transparent to the stack.
- *
- * NOTE: since this HW feature only works with IPV4 and
- * our testing has shown soft LRO to be as effective
- * I have decided to disable this by default.
- *
- **********************************************************************/
-static void
-ixgbe_setup_hw_rsc(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rscctrl, rdrxctl;
-
- /* If turning LRO/RSC off we need to disable it */
- if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
- rscctrl &= ~IXGBE_RSCCTL_RSCEN;
- return;
- }
-
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
-#ifdef DEV_NETMAP /* crcstrip is optional in netmap */
- if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
-#endif /* DEV_NETMAP */
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
- rscctrl |= IXGBE_RSCCTL_RSCEN;
- /*
- ** Limit the total number of descriptors that
- ** can be combined, so it does not exceed 64K
- */
- if (rxr->mbuf_sz == MCLBYTES)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rxr->mbuf_sz == MJUMPAGESIZE)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else if (rxr->mbuf_sz == MJUM9BYTES)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- else /* Using 16K cluster */
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
-
- /* Enable TCP header recognition */
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
- (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
- IXGBE_PSRTYPE_TCPHDR));
-
- /* Disable RSC for ACK packets */
- IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
- (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
-
- rxr->hw_rsc = TRUE;
-}
-
-
-static void
-ixgbe_free_receive_ring(struct rx_ring *rxr)
-{
- struct ixgbe_rx_buf *rxbuf;
- int i;
-
- for (i = 0; i < rxr->num_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->buf != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->buf->m_flags |= M_PKTHDR;
- m_freem(rxbuf->buf);
- rxbuf->buf = NULL;
- rxbuf->flags = 0;
- }
- }
-}
-
-
-/*********************************************************************
- *
- * Initialize a receive ring and its buffers.
- *
- **********************************************************************/
-static int
-ixgbe_setup_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ifnet *ifp;
- device_t dev;
- struct ixgbe_rx_buf *rxbuf;
- bus_dma_segment_t seg[1];
- struct lro_ctrl *lro = &rxr->lro;
- int rsize, nsegs, error = 0;
-#ifdef DEV_NETMAP
- struct netmap_adapter *na = NA(rxr->adapter->ifp);
- struct netmap_slot *slot;
-#endif /* DEV_NETMAP */
-
- adapter = rxr->adapter;
- ifp = adapter->ifp;
- dev = adapter->dev;
-
- /* Clear the ring contents */
- IXGBE_RX_LOCK(rxr);
-#ifdef DEV_NETMAP
- /* same as in ixgbe_setup_transmit_ring() */
- slot = netmap_reset(na, NR_RX, rxr->me, 0);
-#endif /* DEV_NETMAP */
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
- /* Cache the size */
- rxr->mbuf_sz = adapter->rx_mbuf_sz;
-
- /* Free current RX buffer structs and their mbufs */
- ixgbe_free_receive_ring(rxr);
-
- /* Now replenish the mbufs */
- for (int j = 0; j != rxr->num_desc; ++j) {
- struct mbuf *mp;
-
- rxbuf = &rxr->rx_buffers[j];
-#ifdef DEV_NETMAP
- /*
- * In netmap mode, fill the map and set the buffer
- * address in the NIC ring, considering the offset
- * between the netmap and NIC rings (see comment in
- * ixgbe_setup_transmit_ring() ). No need to allocate
- * an mbuf, so end the block with a continue;
- */
- if (slot) {
- int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
- uint64_t paddr;
- void *addr;
-
- addr = PNMB(na, slot + sj, &paddr);
- netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
- /* Update descriptor and the cached value */
- rxr->rx_base[j].read.pkt_addr = htole64(paddr);
- rxbuf->addr = htole64(paddr);
- continue;
- }
-#endif /* DEV_NETMAP */
- rxbuf->flags = 0;
- rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (rxbuf->buf == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = rxbuf->buf;
- mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, seg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag,
- rxbuf->pmap, BUS_DMASYNC_PREREAD);
- /* Update the descriptor and the cached value */
- rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
- rxbuf->addr = htole64(seg[0].ds_addr);
- }
-
-
- /* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->rx_copies = 0;
- rxr->rx_bytes = 0;
- rxr->vtag_strip = FALSE;
-
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /*
- ** Now set up the LRO interface:
- */
- if (ixgbe_rsc_enable)
- ixgbe_setup_hw_rsc(rxr);
- else if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- device_printf(dev, "LRO Initialization failed!\n");
- goto fail;
- }
- INIT_DEBUGOUT("RX Soft LRO Initialized\n");
- rxr->lro_enabled = TRUE;
- lro->ifp = adapter->ifp;
- }
-
- IXGBE_RX_UNLOCK(rxr);
- return (0);
-
-fail:
- ixgbe_free_receive_ring(rxr);
- IXGBE_RX_UNLOCK(rxr);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize all receive rings.
- *
- **********************************************************************/
-static int
-ixgbe_setup_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- int j;
-
- for (j = 0; j < adapter->num_queues; j++, rxr++)
- if (ixgbe_setup_receive_ring(rxr))
- goto fail;
-
- return (0);
-fail:
- /*
- * Free RX buffers allocated so far, we will only handle
- * the rings that completed, the failing case will have
- * cleaned up for itself. 'j' failed, so its the terminus.
- */
- for (int i = 0; i < j; ++i) {
- rxr = &adapter->rx_rings[i];
- ixgbe_free_receive_ring(rxr);
- }
-
- return (ENOBUFS);
-}
-
static void
ixgbe_initialise_rss_mapping(struct adapter *adapter)
{
@@ -4340,7 +2817,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
struct rx_ring *rxr = adapter->rx_rings;
struct ixgbe_hw *hw = &adapter->hw;
struct ifnet *ifp = adapter->ifp;
- u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
+ u32 bufsz, fctrl, srrctl, rxcsum;
u32 hlreg;
@@ -4348,9 +2825,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
* Make sure receives are disabled while
* setting up the descriptor ring
*/
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
- rxctrl & ~IXGBE_RXCTRL_RXEN);
+ ixgbe_disable_rx(hw);
/* Enable broadcasts */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -4401,7 +2876,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
* this code is moved elsewhere.
*/
if (adapter->num_queues > 1 &&
- adapter->fc == ixgbe_fc_none) {
+ adapter->hw.fc.requested_mode == ixgbe_fc_none) {
srrctl |= IXGBE_SRRCTL_DROP_EN;
} else {
srrctl &= ~IXGBE_SRRCTL_DROP_EN;
@@ -4415,6 +2890,9 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
/* Set the processing limit */
rxr->process_limit = ixgbe_rx_process_limit;
+
+ /* Set the driver rx tail address */
+ rxr->tail = IXGBE_RDT(rxr->me);
}
if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
@@ -4445,449 +2923,6 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
return;
}
-/*********************************************************************
- *
- * Free all receive rings.
- *
- **********************************************************************/
-static void
-ixgbe_free_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
-
- INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- struct lro_ctrl *lro = &rxr->lro;
- ixgbe_free_receive_buffers(rxr);
- /* Free LRO memory */
- tcp_lro_free(lro);
- /* Free the ring memory as well */
- ixgbe_dma_free(adapter, &rxr->rxdma);
- }
-
- free(adapter->rx_rings, M_DEVBUF);
-}
-
-
-/*********************************************************************
- *
- * Free receive ring data structures
- *
- **********************************************************************/
-static void
-ixgbe_free_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixgbe_rx_buf *rxbuf;
-
- INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
-
- /* Cleanup any existing buffers */
- if (rxr->rx_buffers != NULL) {
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->buf != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->buf->m_flags |= M_PKTHDR;
- m_freem(rxbuf->buf);
- }
- rxbuf->buf = NULL;
- if (rxbuf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
- rxbuf->pmap = NULL;
- }
- }
- if (rxr->rx_buffers != NULL) {
- free(rxr->rx_buffers, M_DEVBUF);
- rxr->rx_buffers = NULL;
- }
- }
-
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
-
- return;
-}
-
-static __inline void
-ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
-{
-
- /*
- * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
- (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
- IXGBE_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXGBE_RX_LOCK(rxr);
-}
-
-static __inline void
-ixgbe_rx_discard(struct rx_ring *rxr, int i)
-{
- struct ixgbe_rx_buf *rbuf;
-
- rbuf = &rxr->rx_buffers[i];
-
-
- /*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
-
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
- } else if (rbuf->buf) {
- m_free(rbuf->buf);
- rbuf->buf = NULL;
- }
-
- rbuf->flags = 0;
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine executes in interrupt context. It replenishes
- * the mbufs in the descriptor and sends data which has been
- * dma'ed into host memory to upper layer.
- *
- * We loop at most count times if count is > 0, or until done if
- * count < 0.
- *
- * Return TRUE for more work, FALSE for all clean.
- *********************************************************************/
-static bool
-ixgbe_rxeof(struct ix_queue *que)
-{
- struct adapter *adapter = que->adapter;
- struct rx_ring *rxr = que->rxr;
- struct ifnet *ifp = adapter->ifp;
- struct lro_ctrl *lro = &rxr->lro;
- struct lro_entry *queued;
- int i, nextp, processed = 0;
- u32 staterr = 0;
- u16 count = rxr->process_limit;
- union ixgbe_adv_rx_desc *cur;
- struct ixgbe_rx_buf *rbuf, *nbuf;
- u16 pkt_info;
-
- IXGBE_RX_LOCK(rxr);
-
-#ifdef DEV_NETMAP
- /* Same as the txeof routine: wakeup clients on intr. */
- if (netmap_rx_irq(ifp, rxr->me, &processed)) {
- IXGBE_RX_UNLOCK(rxr);
- return (FALSE);
- }
-#endif /* DEV_NETMAP */
-
- for (i = rxr->next_to_check; count != 0;) {
- struct mbuf *sendmp, *mp;
- u32 rsc, ptype;
- u16 len;
- u16 vtag = 0;
- bool eop;
-
- /* Sync the ring. */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- cur = &rxr->rx_base[i];
- staterr = le32toh(cur->wb.upper.status_error);
- pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
-
- if ((staterr & IXGBE_RXD_STAT_DD) == 0)
- break;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
-
- count--;
- sendmp = NULL;
- nbuf = NULL;
- rsc = 0;
- cur->wb.upper.status_error = 0;
- rbuf = &rxr->rx_buffers[i];
- mp = rbuf->buf;
-
- len = le16toh(cur->wb.upper.length);
- ptype = le32toh(cur->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_PKTTYPE_MASK;
- eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
-
- /* Make sure bad packets are discarded */
- if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
- rxr->rx_discarded++;
- ixgbe_rx_discard(rxr, i);
- goto next_desc;
- }
-
- /*
- ** On 82599 which supports a hardware
- ** LRO (called HW RSC), packets need
- ** not be fragmented across sequential
- ** descriptors, rather the next descriptor
- ** is indicated in bits of the descriptor.
- ** This also means that we might proceses
- ** more than one packet at a time, something
- ** that has never been true before, it
- ** required eliminating global chain pointers
- ** in favor of what we are doing here. -jfv
- */
- if (!eop) {
- /*
- ** Figure out the next descriptor
- ** of this frame.
- */
- if (rxr->hw_rsc == TRUE) {
- rsc = ixgbe_rsc_count(cur);
- rxr->rsc_num += (rsc - 1);
- }
- if (rsc) { /* Get hardware index */
- nextp = ((staterr &
- IXGBE_RXDADV_NEXTP_MASK) >>
- IXGBE_RXDADV_NEXTP_SHIFT);
- } else { /* Just sequential */
- nextp = i + 1;
- if (nextp == adapter->num_rx_desc)
- nextp = 0;
- }
- nbuf = &rxr->rx_buffers[nextp];
- prefetch(nbuf);
- }
- /*
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** buffer struct and pass this along from one
- ** descriptor to the next, until we get EOP.
- */
- mp->m_len = len;
- /*
- ** See if there is a stored head
- ** that determines what we are
- */
- sendmp = rbuf->fmp;
- if (sendmp != NULL) { /* secondary frag */
- rbuf->buf = rbuf->fmp = NULL;
- mp->m_flags &= ~M_PKTHDR;
- sendmp->m_pkthdr.len += mp->m_len;
- } else {
- /*
- * Optimize. This might be a small packet,
- * maybe just a TCP ACK. Do a fast copy that
- * is cache aligned into a new mbuf, and
- * leave the old mbuf+cluster for re-use.
- */
- if (eop && len <= IXGBE_RX_COPY_LEN) {
- sendmp = m_gethdr(M_NOWAIT, MT_DATA);
- if (sendmp != NULL) {
- sendmp->m_data +=
- IXGBE_RX_COPY_ALIGN;
- ixgbe_bcopy(mp->m_data,
- sendmp->m_data, len);
- sendmp->m_len = len;
- rxr->rx_copies++;
- rbuf->flags |= IXGBE_RX_COPY;
- }
- }
- if (sendmp == NULL) {
- rbuf->buf = rbuf->fmp = NULL;
- sendmp = mp;
- }
-
- /* first desc of a non-ps chain */
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- }
- ++processed;
-
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->buf;
- } else { /* Sending this frame */
- sendmp->m_pkthdr.rcvif = ifp;
- rxr->rx_packets++;
- /* capture data for AIM */
- rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
- /* Process vlan info */
- if ((rxr->vtag_strip) &&
- (staterr & IXGBE_RXD_STAT_VP))
- vtag = le16toh(cur->wb.upper.vlan);
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixgbe_rx_checksum(staterr, sendmp, ptype);
-#if __FreeBSD_version >= 800000
-#ifdef RSS
- sendmp->m_pkthdr.flowid =
- le32toh(cur->wb.lower.hi_dword.rss);
- switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
- case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV4);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV4:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV4);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6_EX);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6_EX);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV4);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6);
- break;
- case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6_EX);
- break;
- default:
- /* XXX fallthrough */
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
- break;
- }
-#else /* RSS */
- sendmp->m_pkthdr.flowid = que->msix;
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
-#endif /* RSS */
-#endif /* FreeBSD_version */
- }
-next_desc:
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == rxr->num_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL) {
- rxr->next_to_check = i;
- ixgbe_rx_input(rxr, ifp, sendmp, ptype);
- i = rxr->next_to_check;
- }
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixgbe_refresh_mbufs(rxr, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixgbe_rx_unrefreshed(rxr))
- ixgbe_refresh_mbufs(rxr, i);
-
- rxr->next_to_check = i;
-
- /*
- * Flush any outstanding LRO work
- */
- while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
- SLIST_REMOVE_HEAD(&lro->lro_active, next);
- tcp_lro_flush(lro, queued);
- }
-
- IXGBE_RX_UNLOCK(rxr);
-
- /*
- ** Still have cleaning to do?
- */
- if ((staterr & IXGBE_RXD_STAT_DD) != 0)
- return (TRUE);
- else
- return (FALSE);
-}
-
-
-/*********************************************************************
- *
- * Verify that the hardware indicated that the checksum is valid.
- * Inform the stack about the status of checksum so that stack
- * doesn't spend time verifying the checksum.
- *
- *********************************************************************/
-static void
-ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
-{
- u16 status = (u16) staterr;
- u8 errors = (u8) (staterr >> 24);
- bool sctp = FALSE;
-
- if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
- sctp = TRUE;
-
- if (status & IXGBE_RXD_STAT_IPCS) {
- if (!(errors & IXGBE_RXD_ERR_IPE)) {
- /* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-
- } else
- mp->m_pkthdr.csum_flags = 0;
- }
- if (status & IXGBE_RXD_STAT_L4CS) {
- u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
- if (sctp)
- type = CSUM_SCTP_VALID;
-#endif
- if (!(errors & IXGBE_RXD_ERR_TCPE)) {
- mp->m_pkthdr.csum_flags |= type;
- if (!sctp)
- mp->m_pkthdr.csum_data = htons(0xffff);
- }
- }
- return;
-}
-
/*
** This routine is run via an vlan config EVENT,
@@ -5006,24 +3041,32 @@ ixgbe_enable_intr(struct adapter *adapter)
mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
/* Enable Fan Failure detection */
if (hw->device_id == IXGBE_DEV_ID_82598AT)
- mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_ECC;
- mask |= IXGBE_EIMS_GPI_SDP0;
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
+ /* Temperature sensor on some adapters */
+ mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
+ /* SFP+ (RX_LOS_N & MOD_ABS_N) */
+ mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
+ mask |= IXGBE_EIMS_GPI_SDP2_BY_MAC(hw);
#ifdef IXGBE_FDIR
mask |= IXGBE_EIMS_FLOW_DIR;
#endif
break;
case ixgbe_mac_X540:
- mask |= IXGBE_EIMS_ECC;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
/* Detect if Thermal Sensor is enabled */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
mask |= IXGBE_EIMS_TS;
+ /* XXX: Which SFP mode line does this look at? */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
+ mask |= IXGBE_EIMS_ECC;
#ifdef IXGBE_FDIR
mask |= IXGBE_EIMS_FLOW_DIR;
#endif
@@ -5072,26 +3115,6 @@ ixgbe_disable_intr(struct adapter *adapter)
return;
}
-u16
-ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
-{
- u16 value;
-
- value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
- reg, 2);
-
- return (value);
-}
-
-void
-ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
-{
- pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
- reg, value, 2);
-
- return;
-}
-
/*
** Get the width and transaction speed of
** the slot this adapter is plugged into.
@@ -5107,6 +3130,10 @@ ixgbe_get_slot_info(struct ixgbe_hw *hw)
/* For most devices simply call the shared code routine */
if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
ixgbe_get_bus_info(hw);
+ /* These devices don't use PCI-E */
+ if (hw->mac.type == ixgbe_mac_X550EM_x
+ || hw->mac.type == ixgbe_mac_X550EM_a)
+ return;
goto display;
}
@@ -5228,6 +3255,9 @@ ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -5269,7 +3299,7 @@ ixgbe_configure_ivars(struct adapter *adapter)
}
/* For the Link interrupt */
- ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
+ ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
}
/*
@@ -5357,12 +3387,22 @@ ixgbe_handle_msf(void *context, int pending)
struct ixgbe_hw *hw = &adapter->hw;
u32 autoneg;
bool negotiate;
+ int err;
+
+ err = hw->phy.ops.identify_sfp(hw);
+ if (!err) {
+ ixgbe_setup_optics(adapter);
+ INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
+ }
autoneg = hw->phy.autoneg_advertised;
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, autoneg, TRUE);
+
+ ifmedia_removeall(&adapter->media);
+ ixgbe_add_media_types(adapter);
return;
}
@@ -5397,12 +3437,13 @@ static void
ixgbe_update_stats_counters(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 missed_rx = 0, bprc, lxon, lxoff, total;
+ u32 missed_rx = 0, bprc, lxon, lxoff, total;
+ u64 total_missed_rx = 0;
- adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
- adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
- adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
- adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
/*
** Note: these are for the 8 possible traffic classes,
@@ -5415,57 +3456,60 @@ ixgbe_update_stats_counters(struct adapter *adapter)
/* missed_rx tallies misses for the gprc workaround */
missed_rx += mp;
/* global total per queue */
- adapter->stats.mpc[i] += mp;
+ adapter->stats.pf.mpc[i] += mp;
+ /* total for stats display */
+ total_missed_rx += adapter->stats.pf.mpc[i];
if (hw->mac.type == ixgbe_mac_82598EB) {
- adapter->stats.rnbc[i] +=
+ adapter->stats.pf.rnbc[i] +=
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- adapter->stats.qbtc[i] +=
+ adapter->stats.pf.qbtc[i] +=
IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- adapter->stats.qbrc[i] +=
+ adapter->stats.pf.qbrc[i] +=
IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- adapter->stats.pxonrxc[i] +=
+ adapter->stats.pf.pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
} else
- adapter->stats.pxonrxc[i] +=
+ adapter->stats.pf.pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- adapter->stats.pxontxc[i] +=
+ adapter->stats.pf.pxontxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- adapter->stats.pxofftxc[i] +=
+ adapter->stats.pf.pxofftxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- adapter->stats.pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
- adapter->stats.pxon2offc[i] +=
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ adapter->stats.pf.pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ adapter->stats.pf.pxon2offc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
}
for (int i = 0; i < 16; i++) {
- adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
}
- adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
- adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
- adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+ adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+ adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
/* Hardware workaround, gprc counts missed packets */
- adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
- adapter->stats.gprc -= missed_rx;
+ adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ adapter->stats.pf.gprc -= missed_rx;
if (hw->mac.type != ixgbe_mac_82598EB) {
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
+ adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
+ adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
+ adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
} else {
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
/* 82598 only has a counter in the high register */
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
}
/*
@@ -5473,94 +3517,104 @@ ixgbe_update_stats_counters(struct adapter *adapter)
* broadcasts, so for now we subtract those.
*/
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
- adapter->stats.bprc += bprc;
- adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ adapter->stats.pf.bprc += bprc;
+ adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.mprc -= bprc;
+ adapter->stats.pf.mprc -= bprc;
- adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
- adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
- adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
- adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
- adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
- adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- adapter->stats.lxontxc += lxon;
+ adapter->stats.pf.lxontxc += lxon;
lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- adapter->stats.lxofftxc += lxoff;
+ adapter->stats.pf.lxofftxc += lxoff;
total = lxon + lxoff;
- adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
- adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
- adapter->stats.gptc -= total;
- adapter->stats.mptc -= total;
- adapter->stats.ptc64 -= total;
- adapter->stats.gotc -= total * ETHER_MIN_LEN;
-
- adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
- adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
- adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
- adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
- adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
- adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
- adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
- adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
- adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
- adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
- adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
- adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
- adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
- adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
- adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+ adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ adapter->stats.pf.gptc -= total;
+ adapter->stats.pf.mptc -= total;
+ adapter->stats.pf.ptc64 -= total;
+ adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
+
+ adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+ adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+ adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+ adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
/* Only read FCOE on 82599 */
if (hw->mac.type != ixgbe_mac_82598EB) {
- adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
- adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
- adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
- adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
- adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
- }
+ adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ }
+
+ /* Fill out the OS statistics structure */
+ IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
+ IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
+ IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
+ IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
+ IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
+ IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
+ IXGBE_SET_COLLISIONS(adapter, 0);
+ IXGBE_SET_IQDROPS(adapter, total_missed_rx);
+ IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
+ + adapter->stats.pf.rlec);
}
+#if __FreeBSD_version >= 1100036
static uint64_t
ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
{
struct adapter *adapter;
- uint64_t rv;
adapter = if_getsoftc(ifp);
switch (cnt) {
case IFCOUNTER_IPACKETS:
- return (adapter->stats.gprc);
+ return (adapter->ipackets);
case IFCOUNTER_OPACKETS:
- return (adapter->stats.gptc);
+ return (adapter->opackets);
case IFCOUNTER_IBYTES:
- return (adapter->stats.gorc);
+ return (adapter->ibytes);
case IFCOUNTER_OBYTES:
- return (adapter->stats.gotc);
+ return (adapter->obytes);
case IFCOUNTER_IMCASTS:
- return (adapter->stats.mprc);
+ return (adapter->imcasts);
case IFCOUNTER_OMCASTS:
- return (adapter->stats.mptc);
+ return (adapter->omcasts);
case IFCOUNTER_COLLISIONS:
return (0);
case IFCOUNTER_IQDROPS:
- rv = 0;
- for (int i = 0; i < 8; i++)
- rv += adapter->stats.mpc[i];
- return (rv);
+ return (adapter->iqdrops);
case IFCOUNTER_IERRORS:
- return (adapter->stats.crcerrs + adapter->stats.rlec);
+ return (adapter->ierrors);
default:
return (if_get_counter_default(ifp, cnt));
}
}
+#endif
/** ixgbe_sysctl_tdh_handler - Handler function
* Retrieves the TDH value from the hardware
@@ -5668,7 +3722,6 @@ ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
static void
ixgbe_add_hw_stats(struct adapter *adapter)
{
-
device_t dev = adapter->dev;
struct tx_ring *txr = adapter->tx_rings;
@@ -5677,7 +3730,7 @@ ixgbe_add_hw_stats(struct adapter *adapter)
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct ixgbe_hw_stats *stats = &adapter->stats;
+ struct ixgbe_hw_stats *stats = &adapter->stats.pf;
struct sysctl_oid *stat_node, *queue_node;
struct sysctl_oid_list *stat_list, *queue_list;
@@ -5696,7 +3749,7 @@ ixgbe_add_hw_stats(struct adapter *adapter)
CTLFLAG_RD, &adapter->watchdog_events,
"Watchdog timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
- CTLFLAG_RD, &adapter->link_irq,
+ CTLFLAG_RD, &adapter->vector_irq,
"Link MSIX IRQ Handled");
for (int i = 0; i < adapter->num_queues; i++, txr++) {
@@ -5959,57 +4012,67 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
return error;
}
-
/*
-** Control link advertise speed:
-** 1 - advertise only 1G
-** 2 - advertise 100Mb
-** 3 - advertise normal
+** Control advertised link speed:
+** Flags:
+** 0x1 - advertise 100 Mb
+** 0x2 - advertise 1G
+** 0x4 - advertise 10G
*/
static int
ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
{
- int error = 0;
+ int error = 0, requested;
struct adapter *adapter;
device_t dev;
struct ixgbe_hw *hw;
- ixgbe_link_speed speed, last;
+ ixgbe_link_speed speed = 0;
adapter = (struct adapter *) arg1;
dev = adapter->dev;
hw = &adapter->hw;
- last = adapter->advertise;
- error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
+ requested = adapter->advertise;
+ error = sysctl_handle_int(oidp, &requested, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- if (adapter->advertise == last) /* no change */
+ /* Checks to validate new value */
+ if (adapter->advertise == requested) /* no change */
return (0);
if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)))
+ (hw->phy.multispeed_fiber))) {
+ device_printf(dev,
+ "Advertised speed can only be set on copper or "
+ "multispeed fiber media types.\n");
return (EINVAL);
+ }
- if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
- device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
+ if (requested < 0x1 || requested > 0x7) {
+ device_printf(dev,
+ "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
return (EINVAL);
}
- if (adapter->advertise == 1)
- speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if (adapter->advertise == 2)
- speed = IXGBE_LINK_SPEED_100_FULL;
- else if (adapter->advertise == 3)
- speed = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_10GB_FULL;
- else { /* bogus value */
- adapter->advertise = last;
+ if ((requested & 0x1)
+ && (hw->mac.type != ixgbe_mac_X540)
+ && (hw->mac.type != ixgbe_mac_X550)) {
+ device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
return (EINVAL);
}
+ /* Set new value and report new advertised mode */
+ if (requested & 0x1)
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ if (requested & 0x2)
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (requested & 0x4)
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+
hw->mac.autotry_restart = TRUE;
hw->mac.ops.setup_link(hw, speed, TRUE);
+ adapter->advertise = requested;
return (error);
}
@@ -6027,7 +4090,7 @@ ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
struct ixgbe_hw *hw = &adapter->hw;
- if (hw->mac.type != ixgbe_mac_X540)
+ if (hw->mac.type < ixgbe_mac_X540)
return (0);
error = sysctl_handle_int(oidp, &fire, 0, req);
@@ -6073,3 +4136,28 @@ ixgbe_disable_rx_drop(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
}
}
+
+static void
+ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
+{
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ mask = (queues & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+ mask = (queues >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+}
+
+
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
new file mode 100644
index 0000000..f8f3c7f
--- /dev/null
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -0,0 +1,2107 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef IXGBE_STANDALONE_BUILD
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#endif
+
+#include "ixgbe.h"
+
+/*********************************************************************
+ * Driver version
+ *********************************************************************/
+char ixv_driver_version[] = "1.2.5";
+
+/*********************************************************************
+ * PCI Device ID Table
+ *
+ * Used by probe to select devices to load on
+ * Last field stores an index into ixv_strings
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixgbe_vendor_info_t ixv_vendor_info_array[] =
+{
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
+ /* required last entry */
+ {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ * Table of branding strings
+ *********************************************************************/
+
+static char *ixv_strings[] = {
+ "Intel(R) PRO/10GbE Virtual Function Network Driver"
+};
+
+/*********************************************************************
+ * Function prototypes
+ *********************************************************************/
+static int ixv_probe(device_t);
+static int ixv_attach(device_t);
+static int ixv_detach(device_t);
+static int ixv_shutdown(device_t);
+static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
+static void ixv_init(void *);
+static void ixv_init_locked(struct adapter *);
+static void ixv_stop(void *);
+static void ixv_media_status(struct ifnet *, struct ifmediareq *);
+static int ixv_media_change(struct ifnet *);
+static void ixv_identify_hardware(struct adapter *);
+static int ixv_allocate_pci_resources(struct adapter *);
+static int ixv_allocate_msix(struct adapter *);
+static int ixv_setup_msix(struct adapter *);
+static void ixv_free_pci_resources(struct adapter *);
+static void ixv_local_timer(void *);
+static void ixv_setup_interface(device_t, struct adapter *);
+static void ixv_config_link(struct adapter *);
+
+static void ixv_initialize_transmit_units(struct adapter *);
+static void ixv_initialize_receive_units(struct adapter *);
+
+static void ixv_enable_intr(struct adapter *);
+static void ixv_disable_intr(struct adapter *);
+static void ixv_set_multi(struct adapter *);
+static void ixv_update_link_status(struct adapter *);
+static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
+static void ixv_set_ivar(struct adapter *, u8, u8, s8);
+static void ixv_configure_ivars(struct adapter *);
+static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+
+static void ixv_setup_vlan_support(struct adapter *);
+static void ixv_register_vlan(void *, struct ifnet *, u16);
+static void ixv_unregister_vlan(void *, struct ifnet *, u16);
+
+static void ixv_save_stats(struct adapter *);
+static void ixv_init_stats(struct adapter *);
+static void ixv_update_stats(struct adapter *);
+static void ixv_add_stats_sysctls(struct adapter *);
+
+/* The MSI/X Interrupt handlers */
+static void ixv_msix_que(void *);
+static void ixv_msix_mbx(void *);
+
+/* Deferred interrupt tasklets */
+static void ixv_handle_que(void *, int);
+static void ixv_handle_mbx(void *, int);
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixv_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ixv_probe),
+ DEVMETHOD(device_attach, ixv_attach),
+ DEVMETHOD(device_detach, ixv_detach),
+ DEVMETHOD(device_shutdown, ixv_shutdown),
+ DEVMETHOD_END
+};
+
+static driver_t ixv_driver = {
+ "ixv", ixv_methods, sizeof(struct adapter),
+};
+
+devclass_t ixgbe_devclass;
+DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
+MODULE_DEPEND(ixv, pci, 1, 1, 1);
+MODULE_DEPEND(ixv, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int ixv_enable_aim = FALSE;
+TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
+
+/* How many packets rxeof tries to clean at a time */
+static int ixv_rx_process_limit = 256;
+TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
+
+/* How many packets txeof tries to clean at a time */
+static int ixv_tx_process_limit = 256;
+TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
+
+/* Flow control setting, default to full */
+static int ixv_flow_control = ixgbe_fc_full;
+TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
+
+/*
+ * Header split: this causes the hardware to DMA
+ * the header into a seperate mbuf from the payload,
+ * it can be a performance win in some workloads, but
+ * in others it actually hurts, its off by default.
+ */
+static int ixv_header_split = FALSE;
+TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
+
+/*
+** Number of TX descriptors per ring,
+** setting higher than RX as this seems
+** the better performing choice.
+*/
+static int ixv_txd = DEFAULT_TXD;
+TUNABLE_INT("hw.ixv.txd", &ixv_txd);
+
+/* Number of RX descriptors per ring */
+static int ixv_rxd = DEFAULT_RXD;
+TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
+
+/*
+** Shadow VFTA table, this is needed because
+** the real filter table gets cleared during
+** a soft reset and we need to repopulate it.
+*/
+static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
+
+/*********************************************************************
+ * Device identification routine
+ *
+ * ixv_probe determines if the driver should be loaded on
+ * adapter based on PCI vendor/device id of the adapter.
+ *
+ * return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_probe(device_t dev)
+{
+ ixgbe_vendor_info_t *ent;
+
+ u16 pci_vendor_id = 0;
+ u16 pci_device_id = 0;
+ u16 pci_subvendor_id = 0;
+ u16 pci_subdevice_id = 0;
+ char adapter_name[256];
+
+
+ pci_vendor_id = pci_get_vendor(dev);
+ if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
+ return (ENXIO);
+
+ pci_device_id = pci_get_device(dev);
+ pci_subvendor_id = pci_get_subvendor(dev);
+ pci_subdevice_id = pci_get_subdevice(dev);
+
+ ent = ixv_vendor_info_array;
+ while (ent->vendor_id != 0) {
+ if ((pci_vendor_id == ent->vendor_id) &&
+ (pci_device_id == ent->device_id) &&
+
+ ((pci_subvendor_id == ent->subvendor_id) ||
+ (ent->subvendor_id == 0)) &&
+
+ ((pci_subdevice_id == ent->subdevice_id) ||
+ (ent->subdevice_id == 0))) {
+ sprintf(adapter_name, "%s, Version - %s",
+ ixv_strings[ent->index],
+ ixv_driver_version);
+ device_set_desc_copy(dev, adapter_name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ ent++;
+ }
+ return (ENXIO);
+}
+
+/*********************************************************************
+ * Device initialization routine
+ *
+ * The attach entry point is called when the driver is being loaded.
+ * This routine identifies the type of hardware, allocates all resources
+ * and initializes the hardware.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_attach(device_t dev)
+{
+ struct adapter *adapter;
+ struct ixgbe_hw *hw;
+ int error = 0;
+
+ INIT_DEBUGOUT("ixv_attach: begin");
+
+ /* Allocate, clear, and link in our adapter structure */
+ adapter = device_get_softc(dev);
+ adapter->dev = adapter->osdep.dev = dev;
+ hw = &adapter->hw;
+
+ /* Core Lock Init*/
+ IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+
+ /* SYSCTL APIs */
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
+ adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+ &ixv_enable_aim, 1, "Interrupt Moderation");
+
+ /* Set up the timer callout */
+ callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
+
+ /* Determine hardware revision */
+ ixv_identify_hardware(adapter);
+
+ /* Do base PCI setup - map BAR0 */
+ if (ixv_allocate_pci_resources(adapter)) {
+ device_printf(dev, "Allocation of PCI resources failed\n");
+ error = ENXIO;
+ goto err_out;
+ }
+
+ /* Do descriptor calc and sanity checks */
+ if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
+ ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
+ device_printf(dev, "TXD config issue, using default!\n");
+ adapter->num_tx_desc = DEFAULT_TXD;
+ } else
+ adapter->num_tx_desc = ixv_txd;
+
+ if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
+ ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
+ device_printf(dev, "RXD config issue, using default!\n");
+ adapter->num_rx_desc = DEFAULT_RXD;
+ } else
+ adapter->num_rx_desc = ixv_rxd;
+
+ /* Allocate our TX/RX Queues */
+ if (ixgbe_allocate_queues(adapter)) {
+ error = ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ ** Initialize the shared code: its
+ ** at this point the mac type is set.
+ */
+ error = ixgbe_init_shared_code(hw);
+ if (error) {
+ device_printf(dev,"Shared Code Initialization Failure\n");
+ error = EIO;
+ goto err_late;
+ }
+
+ /* Setup the mailbox */
+ ixgbe_init_mbx_params_vf(hw);
+
+ ixgbe_reset_hw(hw);
+
+ error = ixgbe_init_hw(hw);
+ if (error) {
+ device_printf(dev,"Hardware Initialization Failure\n");
+ error = EIO;
+ goto err_late;
+ }
+
+ error = ixv_allocate_msix(adapter);
+ if (error)
+ goto err_late;
+
+ /* If no mac address was assigned, make a random one */
+ if (!ixv_check_ether_addr(hw->mac.addr)) {
+ u8 addr[ETHER_ADDR_LEN];
+ arc4rand(&addr, sizeof(addr), 0);
+ addr[0] &= 0xFE;
+ addr[0] |= 0x02;
+ bcopy(addr, hw->mac.addr, sizeof(addr));
+ }
+
+ /* Setup OS specific network interface */
+ ixv_setup_interface(dev, adapter);
+
+ /* Do the stats setup */
+ ixv_save_stats(adapter);
+ ixv_init_stats(adapter);
+ ixv_add_stats_sysctls(adapter);
+
+ /* Register for VLAN events */
+ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+ adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+ INIT_DEBUGOUT("ixv_attach: end");
+ return (0);
+
+err_late:
+ ixgbe_free_transmit_structures(adapter);
+ ixgbe_free_receive_structures(adapter);
+err_out:
+ ixv_free_pci_resources(adapter);
+ return (error);
+
+}
+
+/*********************************************************************
+ * Device removal routine
+ *
+ * The detach entry point is called when the driver is being removed.
+ * This routine stops the adapter and deallocates all the resources
+ * that were allocated for driver operation.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_detach(device_t dev)
+{
+ struct adapter *adapter = device_get_softc(dev);
+ struct ix_queue *que = adapter->queues;
+
+ INIT_DEBUGOUT("ixv_detach: begin");
+
+ /* Make sure VLANS are not using driver */
+ if (adapter->ifp->if_vlantrunk != NULL) {
+ device_printf(dev,"Vlan in use, detach first\n");
+ return (EBUSY);
+ }
+
+ IXGBE_CORE_LOCK(adapter);
+ ixv_stop(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ if (que->tq) {
+ struct tx_ring *txr = que->txr;
+ taskqueue_drain(que->tq, &txr->txq_task);
+ taskqueue_drain(que->tq, &que->que_task);
+ taskqueue_free(que->tq);
+ }
+ }
+
+ /* Drain the Mailbox(link) queue */
+ if (adapter->tq) {
+ taskqueue_drain(adapter->tq, &adapter->link_task);
+ taskqueue_free(adapter->tq);
+ }
+
+ /* Unregister VLAN events */
+ if (adapter->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+ if (adapter->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+
+ ether_ifdetach(adapter->ifp);
+ callout_drain(&adapter->timer);
+ ixv_free_pci_resources(adapter);
+ bus_generic_detach(dev);
+ if_free(adapter->ifp);
+
+ ixgbe_free_transmit_structures(adapter);
+ ixgbe_free_receive_structures(adapter);
+
+ IXGBE_CORE_LOCK_DESTROY(adapter);
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Shutdown entry point
+ *
+ **********************************************************************/
+static int
+ixv_shutdown(device_t dev)
+{
+ struct adapter *adapter = device_get_softc(dev);
+ IXGBE_CORE_LOCK(adapter);
+ ixv_stop(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+ return (0);
+}
+
+
+/*********************************************************************
+ * Ioctl entry point
+ *
+ * ixv_ioctl is called when the user wants to configure the
+ * interface.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ bool avoid_reset = FALSE;
+#endif
+ int error = 0;
+
+ switch (command) {
+
+ case SIOCSIFADDR:
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixv_init(adapter);
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+#endif
+ case SIOCSIFMTU:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+ error = EINVAL;
+ } else {
+ IXGBE_CORE_LOCK(adapter);
+ ifp->if_mtu = ifr->ifr_mtu;
+ adapter->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ixv_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+ IXGBE_CORE_LOCK(adapter);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ ixv_init_locked(adapter);
+ } else
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixv_stop(adapter);
+ adapter->if_flags = ifp->if_flags;
+ IXGBE_CORE_UNLOCK(adapter);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXGBE_CORE_LOCK(adapter);
+ ixv_disable_intr(adapter);
+ ixv_set_multi(adapter);
+ ixv_enable_intr(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+ break;
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+ if (mask & IFCAP_HWCSUM)
+ ifp->if_capenable ^= IFCAP_HWCSUM;
+ if (mask & IFCAP_TSO4)
+ ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXGBE_CORE_LOCK(adapter);
+ ixv_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+ }
+ VLAN_CAPABILITIES(ifp);
+ break;
+ }
+
+ default:
+ IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+/*********************************************************************
+ * Init entry point
+ *
+ * This routine is used in two ways. It is used by the stack as
+ * init entry point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get to a
+ * consistent state.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+static void
+ixv_init_locked(struct adapter *adapter)
+{
+ struct ifnet *ifp = adapter->ifp;
+ device_t dev = adapter->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mhadd, gpie;
+
+ INIT_DEBUGOUT("ixv_init: begin");
+ mtx_assert(&adapter->core_mtx, MA_OWNED);
+ hw->adapter_stopped = FALSE;
+ ixgbe_stop_adapter(hw);
+ callout_stop(&adapter->timer);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* Get the latest mac address, User can use a LAA */
+ bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Prepare transmit descriptors and buffers */
+ if (ixgbe_setup_transmit_structures(adapter)) {
+ device_printf(dev,"Could not setup transmit structures\n");
+ ixv_stop(adapter);
+ return;
+ }
+
+ ixgbe_reset_hw(hw);
+ ixv_initialize_transmit_units(adapter);
+
+ /* Setup Multicast table */
+ ixv_set_multi(adapter);
+
+ /*
+ ** Determine the correct mbuf pool
+ ** for doing jumbo/headersplit
+ */
+ if (ifp->if_mtu > ETHERMTU)
+ adapter->rx_mbuf_sz = MJUMPAGESIZE;
+ else
+ adapter->rx_mbuf_sz = MCLBYTES;
+
+ /* Prepare receive descriptors and buffers */
+ if (ixgbe_setup_receive_structures(adapter)) {
+ device_printf(dev,"Could not setup receive structures\n");
+ ixv_stop(adapter);
+ return;
+ }
+
+ /* Configure RX settings */
+ ixv_initialize_receive_units(adapter);
+
+ /* Enable Enhanced MSIX mode */
+ gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
+ gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* Set the various hardware offload abilities */
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM) {
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+ ifp->if_hwassist |= CSUM_SCTP;
+#endif
+ }
+
+ /* Set MTU size */
+ if (ifp->if_mtu > ETHERMTU) {
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ mhadd &= ~IXGBE_MHADD_MFS_MASK;
+ mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
+
+ /* Set up VLAN offload and filter */
+ ixv_setup_vlan_support(adapter);
+
+ callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+
+ /* Set up MSI/X routing */
+ ixv_configure_ivars(adapter);
+
+ /* Set up auto-mask */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
+
+ /* Set moderation on the Link interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
+
+ /* Stats init */
+ ixv_init_stats(adapter);
+
+ /* Config/Enable Link */
+ ixv_config_link(adapter);
+
+ /* And now turn on interrupts */
+ ixv_enable_intr(adapter);
+
+ /* Now inform the stack we're ready */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ return;
+}
+
+static void
+ixv_init(void *arg)
+{
+ struct adapter *adapter = arg;
+
+ IXGBE_CORE_LOCK(adapter);
+ ixv_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+ return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+
+static inline void
+ixv_enable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 queue = 1 << vector;
+ u32 mask;
+
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline void
+ixv_disable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
+
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
+}
+
+static inline void
+ixv_rearm_queues(struct adapter *adapter, u64 queues)
+{
+ u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
+}
+
+
+static void
+ixv_handle_que(void *context, int pending)
+{
+ struct ix_queue *que = context;
+ struct adapter *adapter = que->adapter;
+ struct tx_ring *txr = que->txr;
+ struct ifnet *ifp = adapter->ifp;
+ bool more;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ more = ixgbe_rxeof(que);
+ IXGBE_TX_LOCK(txr);
+ ixgbe_txeof(txr);
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, txr->br))
+ ixgbe_mq_start_locked(ifp, txr);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ ixgbe_start_locked(txr, ifp);
+#endif
+ IXGBE_TX_UNLOCK(txr);
+ if (more) {
+ taskqueue_enqueue(que->tq, &que->que_task);
+ return;
+ }
+ }
+
+ /* Reenable this interrupt */
+ ixv_enable_queue(adapter, que->msix);
+ return;
+}
+
+/*********************************************************************
+ *
+ * MSI Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixv_msix_que(void *arg)
+{
+ struct ix_queue *que = arg;
+ struct adapter *adapter = que->adapter;
+ struct ifnet *ifp = adapter->ifp;
+ struct tx_ring *txr = que->txr;
+ struct rx_ring *rxr = que->rxr;
+ bool more;
+ u32 newitr = 0;
+
+ ixv_disable_queue(adapter, que->msix);
+ ++que->irqs;
+
+ more = ixgbe_rxeof(que);
+
+ IXGBE_TX_LOCK(txr);
+ ixgbe_txeof(txr);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+#ifdef IXGBE_LEGACY_TX
+ if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+ ixgbe_start_locked(txr, ifp);
+#else
+ if (!drbr_empty(adapter->ifp, txr->br))
+ ixgbe_mq_start_locked(ifp, txr);
+#endif
+ IXGBE_TX_UNLOCK(txr);
+
+ /* Do AIM now? */
+
+ if (ixv_enable_aim == FALSE)
+ goto no_calc;
+ /*
+ ** Do Adaptive Interrupt Moderation:
+ ** - Write out last calculated setting
+ ** - Calculate based on average size over
+ ** the last interval.
+ */
+ if (que->eitr_setting)
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_VTEITR(que->msix),
+ que->eitr_setting);
+
+ que->eitr_setting = 0;
+
+ /* Idle, do nothing */
+ if ((txr->bytes == 0) && (rxr->bytes == 0))
+ goto no_calc;
+
+ if ((txr->bytes) && (txr->packets))
+ newitr = txr->bytes/txr->packets;
+ if ((rxr->bytes) && (rxr->packets))
+ newitr = max(newitr,
+ (rxr->bytes / rxr->packets));
+ newitr += 24; /* account for hardware frame, crc */
+
+ /* set an upper boundary */
+ newitr = min(newitr, 3000);
+
+ /* Be nice to the mid range */
+ if ((newitr > 300) && (newitr < 1200))
+ newitr = (newitr / 3);
+ else
+ newitr = (newitr / 2);
+
+ newitr |= newitr << 16;
+
+ /* save for next interrupt */
+ que->eitr_setting = newitr;
+
+ /* Reset state */
+ txr->bytes = 0;
+ txr->packets = 0;
+ rxr->bytes = 0;
+ rxr->packets = 0;
+
+no_calc:
+ if (more)
+ taskqueue_enqueue(que->tq, &que->que_task);
+ else /* Reenable this interrupt */
+ ixv_enable_queue(adapter, que->msix);
+ return;
+}
+
+static void
+ixv_msix_mbx(void *arg)
+{
+ struct adapter *adapter = arg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ++adapter->vector_irq;
+
+ /* First get the cause */
+ reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ /* Clear interrupt with write */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
+
+ /* Link status change */
+ if (reg & IXGBE_EICR_LSC)
+ taskqueue_enqueue(adapter->tq, &adapter->link_task);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+ struct adapter *adapter = ifp->if_softc;
+
+ INIT_DEBUGOUT("ixv_media_status: begin");
+ IXGBE_CORE_LOCK(adapter);
+ ixv_update_link_status(adapter);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!adapter->link_active) {
+ IXGBE_CORE_UNLOCK(adapter);
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_FDX;
+ break;
+ }
+
+ IXGBE_CORE_UNLOCK(adapter);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called when the user changes speed/duplex using
+ * media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixv_media_change(struct ifnet * ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct ifmedia *ifm = &adapter->media;
+
+ INIT_DEBUGOUT("ixv_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ break;
+ default:
+ device_printf(adapter->dev, "Only auto media type\n");
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+
+/*********************************************************************
+ * Multicast Update
+ *
+ * This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+#define IXGBE_RAR_ENTRIES 16
+
+static void
+ixv_set_multi(struct adapter *adapter)
+{
+ u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 *update_ptr;
+ struct ifmultiaddr *ifma;
+ int mcnt = 0;
+ struct ifnet *ifp = adapter->ifp;
+
+ IOCTL_DEBUGOUT("ixv_set_multi: begin");
+
+#if __FreeBSD_version < 800000
+ IF_ADDR_LOCK(ifp);
+#else
+ if_maddr_rlock(ifp);
+#endif
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+ mcnt++;
+ }
+#if __FreeBSD_version < 800000
+ IF_ADDR_UNLOCK(ifp);
+#else
+ if_maddr_runlock(ifp);
+#endif
+
+ update_ptr = mta;
+
+ ixgbe_update_mc_addr_list(&adapter->hw,
+ update_ptr, mcnt, ixv_mc_array_itr, TRUE);
+
+ return;
+}
+
+/*
+ * This is an iterator function now needed by the multicast
+ * shared code. It simply feeds the shared code routine the
+ * addresses in the array of ixv_set_multi() one by one.
+ */
+static u8 *
+ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
+{
+ u8 *addr = *update_ptr;
+ u8 *newptr;
+ *vmdq = 0;
+
+ newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+ *update_ptr = newptr;
+ return addr;
+}
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine checks for link status,updates statistics,
+ * and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixv_local_timer(void *arg)
+{
+ struct adapter *adapter = arg;
+ device_t dev = adapter->dev;
+ struct ix_queue *que = adapter->queues;
+ u64 queues = 0;
+ int hung = 0;
+
+ mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+ ixv_update_link_status(adapter);
+
+ /* Stats Update */
+ ixv_update_stats(adapter);
+
+ /*
+ ** Check the TX queues status
+ ** - mark hung queues so we don't schedule on them
+ ** - watchdog only if all queues show hung
+ */
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ /* Keep track of queues with work for soft irq */
+ if (que->txr->busy)
+ queues |= ((u64)1 << que->me);
+ /*
+ ** Each time txeof runs without cleaning, but there
+ ** are uncleaned descriptors it increments busy. If
+ ** we get to the MAX we declare it hung.
+ */
+ if (que->busy == IXGBE_QUEUE_HUNG) {
+ ++hung;
+ /* Mark the queue as inactive */
+ adapter->active_queues &= ~((u64)1 << que->me);
+ continue;
+ } else {
+ /* Check if we've come back from hung */
+ if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
+ adapter->active_queues |= ((u64)1 << que->me);
+ }
+ if (que->busy >= IXGBE_MAX_TX_BUSY) {
+ device_printf(dev,"Warning queue %d "
+ "appears to be hung!\n", i);
+ que->txr->busy = IXGBE_QUEUE_HUNG;
+ ++hung;
+ }
+
+ }
+
+ /* Only truely watchdog if all queues show hung */
+ if (hung == adapter->num_queues)
+ goto watchdog;
+ else if (queues != 0) { /* Force an IRQ on queues with work */
+ ixv_rearm_queues(adapter, queues);
+ }
+
+ callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+ return;
+
+watchdog:
+ device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ adapter->watchdog_events++;
+ ixv_init_locked(adapter);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+** the real check of the hardware only happens with
+** a link interrupt.
+*/
+static void
+ixv_update_link_status(struct adapter *adapter)
+{
+ struct ifnet *ifp = adapter->ifp;
+ device_t dev = adapter->dev;
+
+ if (adapter->link_up){
+ if (adapter->link_active == FALSE) {
+ if (bootverbose)
+ device_printf(dev,"Link is up %d Gbps %s \n",
+ ((adapter->link_speed == 128)? 10:1),
+ "Full Duplex");
+ adapter->link_active = TRUE;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+ } else { /* Link down */
+ if (adapter->link_active == TRUE) {
+ if (bootverbose)
+ device_printf(dev,"Link is Down\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ adapter->link_active = FALSE;
+ }
+ }
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixv_stop(void *arg)
+{
+ struct ifnet *ifp;
+ struct adapter *adapter = arg;
+ struct ixgbe_hw *hw = &adapter->hw;
+ ifp = adapter->ifp;
+
+ mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+ INIT_DEBUGOUT("ixv_stop: begin\n");
+ ixv_disable_intr(adapter);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ ixgbe_reset_hw(hw);
+ adapter->hw.adapter_stopped = FALSE;
+ ixgbe_stop_adapter(hw);
+ callout_stop(&adapter->timer);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixv_identify_hardware(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /*
+ ** Make sure BUSMASTER is set, on a VM under
+ ** KVM it may not be and will break things.
+ */
+ pci_enable_busmaster(dev);
+
+ /* Save off the information about this board */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ /* We need this to determine device-specific things */
+ ixgbe_set_mac_type(hw);
+
+ /* Set the right number of segments */
+ adapter->num_segs = IXGBE_82599_SCATTER;
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers
+ *
+ **********************************************************************/
+static int
+ixv_allocate_msix(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ix_queue *que = adapter->queues;
+ struct tx_ring *txr = adapter->tx_rings;
+ int error, rid, vector = 0;
+
+ for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
+ rid = vector + 1;
+ que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (que->res == NULL) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: que interrupt [%d]\n", vector);
+ return (ENXIO);
+ }
+ /* Set the handler function */
+ error = bus_setup_intr(dev, que->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixv_msix_que, que, &que->tag);
+ if (error) {
+ que->res = NULL;
+ device_printf(dev, "Failed to register QUE handler");
+ return (error);
+ }
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+ que->msix = vector;
+ adapter->active_queues |= (u64)(1 << que->msix);
+ /*
+ ** Bind the msix vector, and thus the
+ ** ring to the corresponding cpu.
+ */
+ if (adapter->num_queues > 1)
+ bus_bind_intr(dev, que->res, i);
+ TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
+ TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
+ que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(adapter->dev));
+ }
+
+ /* and Mailbox */
+ rid = vector + 1;
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!adapter->res) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: MBX interrupt [%d]\n", rid);
+ return (ENXIO);
+ }
+ /* Set the mbx handler function */
+ error = bus_setup_intr(dev, adapter->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixv_msix_mbx, adapter, &adapter->tag);
+ if (error) {
+ adapter->res = NULL;
+ device_printf(dev, "Failed to register LINK handler");
+ return (error);
+ }
+#if __FreeBSD_version >= 800504
+ bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
+#endif
+ adapter->vector = vector;
+ /* Tasklets for Mailbox */
+ TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
+ adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
+ taskqueue_thread_enqueue, &adapter->tq);
+ taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
+ device_get_nameunit(adapter->dev));
+ /*
+ ** Due to a broken design QEMU will fail to properly
+ ** enable the guest for MSIX unless the vectors in
+ ** the table are all set up, so we must rewrite the
+ ** ENABLE in the MSIX control register again at this
+ ** point to cause it to successfully initialize us.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
+ int msix_ctrl;
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
+
+ return (0);
+}
+
+/*
+ * Setup MSIX resources, note that the VF
+ * device MUST use MSIX, there is no fallback.
+ */
+static int
+ixv_setup_msix(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ int rid, want;
+
+
+ /* First try MSI/X */
+ rid = PCIR_BAR(3);
+ adapter->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (adapter->msix_mem == NULL) {
+ device_printf(adapter->dev,
+ "Unable to map MSIX table \n");
+ goto out;
+ }
+
+ /*
+ ** Want two vectors: one for a queue,
+ ** plus an additional for mailbox.
+ */
+ want = 2;
+ if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
+ device_printf(adapter->dev,
+ "Using MSIX interrupts with %d vectors\n", want);
+ return (want);
+ }
+ /* Release in case alloc was insufficient */
+ pci_release_msi(dev);
+out:
+ if (adapter->msix_mem != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rid, adapter->msix_mem);
+ adapter->msix_mem = NULL;
+ }
+ device_printf(adapter->dev,"MSIX config error\n");
+ return (ENXIO);
+}
+
+
+static int
+ixv_allocate_pci_resources(struct adapter *adapter)
+{
+ int rid;
+ device_t dev = adapter->dev;
+
+ rid = PCIR_BAR(0);
+ adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(adapter->pci_mem)) {
+ device_printf(dev,"Unable to allocate bus resource: memory\n");
+ return (ENXIO);
+ }
+
+ adapter->osdep.mem_bus_space_tag =
+ rman_get_bustag(adapter->pci_mem);
+ adapter->osdep.mem_bus_space_handle =
+ rman_get_bushandle(adapter->pci_mem);
+ adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+
+ adapter->num_queues = 1;
+ adapter->hw.back = &adapter->osdep;
+
+ /*
+ ** Now setup MSI/X, should
+ ** return us the number of
+ ** configured vectors.
+ */
+ adapter->msix = ixv_setup_msix(adapter);
+ if (adapter->msix == ENXIO)
+ return (ENXIO);
+ else
+ return (0);
+}
+
+static void
+ixv_free_pci_resources(struct adapter * adapter)
+{
+ struct ix_queue *que = adapter->queues;
+ device_t dev = adapter->dev;
+ int rid, memrid;
+
+ memrid = PCIR_BAR(MSIX_82598_BAR);
+
+ /*
+ ** There is a slight possibility of a failure mode
+ ** in attach that will result in entering this function
+ ** before interrupt resources have been initialized, and
+ ** in that case we do not want to execute the loops below
+ ** We can detect this reliably by the state of the adapter
+ ** res pointer.
+ */
+ if (adapter->res == NULL)
+ goto mem;
+
+ /*
+ ** Release all msix queue resources:
+ */
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
+ }
+ if (que->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ }
+
+
+ /* Clean the Legacy or Link interrupt last */
+ if (adapter->vector) /* we are doing MSIX */
+ rid = adapter->vector + 1;
+ else
+ (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (adapter->tag != NULL) {
+ bus_teardown_intr(dev, adapter->res, adapter->tag);
+ adapter->tag = NULL;
+ }
+ if (adapter->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+ if (adapter->msix)
+ pci_release_msi(dev);
+
+ if (adapter->msix_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ memrid, adapter->msix_mem);
+
+ if (adapter->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), adapter->pci_mem);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static void
+ixv_setup_interface(device_t dev, struct adapter *adapter)
+{
+ struct ifnet *ifp;
+
+ INIT_DEBUGOUT("ixv_setup_interface: begin");
+
+ ifp = adapter->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL)
+ panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_baudrate = 1000000000;
+ ifp->if_init = ixv_init;
+ ifp->if_softc = adapter;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ixv_ioctl;
+#if __FreeBSD_version >= 800000
+ ifp->if_transmit = ixgbe_mq_start;
+ ifp->if_qflush = ixgbe_qflush;
+#else
+ ifp->if_start = ixgbe_start;
+#endif
+ ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
+
+ ether_ifattach(ifp, adapter->hw.mac.addr);
+
+ adapter->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ /*
+ * Tell the upper layer(s) we support long frames.
+ */
+ ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_LRO;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
+ ixv_media_status);
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+ return;
+}
+
+static void
+ixv_config_link(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 autoneg, err = 0;
+
+ if (hw->mac.ops.check_link)
+ err = hw->mac.ops.check_link(hw, &autoneg,
+ &adapter->link_up, FALSE);
+ if (err)
+ goto out;
+
+ if (hw->mac.ops.setup_link)
+ err = hw->mac.ops.setup_link(hw,
+ autoneg, adapter->link_up);
+out:
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+ixv_initialize_transmit_units(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ u64 tdba = txr->txdma.dma_paddr;
+ u32 txctrl, txdctl;
+
+ /* Set WTHRESH to 8, burst writeback */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+ /* Set the HW Tx Head and Tail indices */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
+
+ /* Set Tx Tail register */
+ txr->tail = IXGBE_VFTDT(i);
+
+ /* Set the processing limit */
+ txr->process_limit = ixv_tx_process_limit;
+
+ /* Set Ring parameters */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+ (tdba & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+ adapter->num_tx_desc *
+ sizeof(struct ixgbe_legacy_tx_desc));
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
+
+ /* Now enable */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ }
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Setup receive registers and features.
+ *
+ **********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void
+ixv_initialize_receive_units(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ifnet *ifp = adapter->ifp;
+ u32 bufsz, fctrl, rxcsum, hlreg;
+
+
+ /* Enable broadcasts */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF;
+ fctrl |= IXGBE_FCTRL_PMCF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ /* Set for Jumbo Frames? */
+ hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ if (ifp->if_mtu > ETHERMTU) {
+ hlreg |= IXGBE_HLREG0_JUMBOEN;
+ bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ } else {
+ hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+ bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ u64 rdba = rxr->rxdma.dma_paddr;
+ u32 reg, rxdctl;
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+ (rdba & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+ (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+ adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+ /* Set up the SRRCTL register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+ reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ reg |= bufsz;
+ reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
+ adapter->num_rx_desc - 1);
+ /* Set the processing limit */
+ rxr->process_limit = ixv_rx_process_limit;
+
+ /* Set Rx Tail register */
+ rxr->tail = IXGBE_VFRDT(rxr->me);
+
+ /* Do the queue enabling last */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+ for (int k = 0; k < 10; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msec_delay(1);
+ }
+ wmb();
+ }
+
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+
+ if (!(rxcsum & IXGBE_RXCSUM_PCSD))
+ rxcsum |= IXGBE_RXCSUM_IPPCSE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ return;
+}
+
+static void
+ixv_setup_vlan_support(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ctrl, vid, vfta, retry;
+
+
+ /*
+ ** We get here thru init_locked, meaning
+ ** a soft reset, this has already cleared
+ ** the VFTA and other state, so if there
+ ** have been no vlan's registered do nothing.
+ */
+ if (adapter->num_vlans == 0)
+ return;
+
+ /* Enable the queues */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
+ }
+
+ /*
+ ** A soft reset zero's out the VFTA, so
+ ** we need to repopulate it now.
+ */
+ for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
+ if (ixv_shadow_vfta[i] == 0)
+ continue;
+ vfta = ixv_shadow_vfta[i];
+ /*
+ ** Reconstruct the vlan id's
+ ** based on the bits set in each
+ ** of the array ints.
+ */
+ for ( int j = 0; j < 32; j++) {
+ retry = 0;
+ if ((vfta & (1 << j)) == 0)
+ continue;
+ vid = (i * 32) + j;
+ /* Call the shared code mailbox routine */
+ while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
+ if (++retry > 5)
+ break;
+ }
+ }
+ }
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct adapter *adapter = ifp->if_softc;
+ u16 index, bit;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXGBE_CORE_LOCK(adapter);
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ ixv_shadow_vfta[index] |= (1 << bit);
+ ++adapter->num_vlans;
+ /* Re-init to load the changes */
+ ixv_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct adapter *adapter = ifp->if_softc;
+ u16 index, bit;
+
+ if (ifp->if_softc != arg)
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXGBE_CORE_LOCK(adapter);
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ ixv_shadow_vfta[index] &= ~(1 << bit);
+ --adapter->num_vlans;
+ /* Re-init to load the changes */
+ ixv_init_locked(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
+}
+
+static void
+ixv_enable_intr(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ix_queue *que = adapter->queues;
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+
+ for (int i = 0; i < adapter->num_queues; i++, que++)
+ ixv_enable_queue(adapter, que->msix);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return;
+}
+
+static void
+ixv_disable_intr(struct adapter *adapter)
+{
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ return;
+}
+
+/*
+** Setup the correct IVAR register for a particular MSIX interrupt
+** - entry is the register array entry
+** - vector is the MSIX vector for this queue
+** - type is RX/TX/MISC
+*/
+static void
+ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ivar, index;
+
+ vector |= IXGBE_IVAR_ALLOC_VAL;
+
+ if (type == -1) { /* MISC IVAR */
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else { /* RX/TX IVARS */
+ index = (16 * (entry & 1)) + (8 * type);
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
+ }
+}
+
+static void
+ixv_configure_ivars(struct adapter *adapter)
+{
+ struct ix_queue *que = adapter->queues;
+
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ /* First the RX queue entry */
+ ixv_set_ivar(adapter, i, que->msix, 0);
+ /* ... and the TX */
+ ixv_set_ivar(adapter, i, que->msix, 1);
+ /* Set an initial value in EITR */
+ IXGBE_WRITE_REG(&adapter->hw,
+ IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
+ }
+
+ /* For the mailbox interrupt */
+ ixv_set_ivar(adapter, 1, adapter->vector, -1);
+}
+
+
+/*
+** Tasklet handler for MSIX MBX interrupts
+** - do outside interrupt since it might sleep
+*/
+static void
+ixv_handle_mbx(void *context, int pending)
+{
+ struct adapter *adapter = context;
+
+ ixgbe_check_link(&adapter->hw,
+ &adapter->link_speed, &adapter->link_up, 0);
+ ixv_update_link_status(adapter);
+}
+
+/*
+** The VF stats registers never have a truely virgin
+** starting point, so this routine tries to make an
+** artificial one, marking ground zero on attach as
+** it were.
+*/
+static void
+ixv_save_stats(struct adapter *adapter)
+{
+ if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
+ adapter->stats.vf.saved_reset_vfgprc +=
+ adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
+ adapter->stats.vf.saved_reset_vfgptc +=
+ adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
+ adapter->stats.vf.saved_reset_vfgorc +=
+ adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
+ adapter->stats.vf.saved_reset_vfgotc +=
+ adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
+ adapter->stats.vf.saved_reset_vfmprc +=
+ adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
+ }
+}
+
+static void
+ixv_init_stats(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.vf.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+
+ adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.vf.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+
+ adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
+ adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
+ adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
+ adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
+ adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
+}
+
+#define UPDATE_STAT_32(reg, last, count) \
+{ \
+ u32 current = IXGBE_READ_REG(hw, reg); \
+ if (current < last) \
+ count += 0x100000000LL; \
+ last = current; \
+ count &= 0xFFFFFFFF00000000LL; \
+ count |= current; \
+}
+
+#define UPDATE_STAT_36(lsb, msb, last, count) \
+{ \
+ u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
+ u64 cur_msb = IXGBE_READ_REG(hw, msb); \
+ u64 current = ((cur_msb << 32) | cur_lsb); \
+ if (current < last) \
+ count += 0x1000000000LL; \
+ last = current; \
+ count &= 0xFFFFFFF000000000LL; \
+ count |= current; \
+}
+
+/*
+** ixv_update_stats - Update the board statistics counters.
+*/
+void
+ixv_update_stats(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
+ adapter->stats.vf.vfgprc);
+ UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
+ adapter->stats.vf.vfgptc);
+ UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
+ UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
+ UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
+ adapter->stats.vf.vfmprc);
+}
+
+/*
+ * Add statistic sysctls for the VF.
+ */
+static void
+ixv_add_stats_sysctls(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ix_queue *que = &adapter->queues[0];
+ struct tx_ring *txr = que->txr;
+ struct rx_ring *rxr = que->rxr;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
+
+ struct sysctl_oid *stat_node, *queue_node;
+ struct sysctl_oid_list *stat_list, *queue_list;
+
+ /* Driver Statistics */
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "dropped",
+ CTLFLAG_RD, &adapter->dropped_pkts,
+ "Driver dropped packets");
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_defrag_failed",
+ CTLFLAG_RD, &adapter->mbuf_defrag_failed,
+ "m_defrag() failed");
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
+ CTLFLAG_RD, &adapter->watchdog_events,
+ "Watchdog timeouts");
+
+ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
+ CTLFLAG_RD, NULL,
+ "VF Statistics (read from HW registers)");
+ stat_list = SYSCTL_CHILDREN(stat_node);
+
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
+ CTLFLAG_RD, &stats->vfgprc,
+ "Good Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
+ CTLFLAG_RD, &stats->vfgorc,
+ "Good Octets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
+ CTLFLAG_RD, &stats->vfmprc,
+ "Multicast Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ CTLFLAG_RD, &stats->vfgptc,
+ "Good Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &stats->vfgotc,
+ "Good Octets Transmitted");
+
+ queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
+ CTLFLAG_RD, NULL,
+ "Queue Statistics (collected by SW)");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(que->irqs),
+ "IRQs on queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
+ CTLFLAG_RD, &(rxr->rx_irq),
+ "RX irqs on queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "RX packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "RX bytes");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
+ CTLFLAG_RD, &(rxr->rx_discarded),
+ "Discarded RX packets");
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ CTLFLAG_RD, &(txr->total_packets),
+ "TX Packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "TX Bytes");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
+ CTLFLAG_RD, &(txr->no_desc_avail),
+ "# of times not enough descriptors were available during TX");
+}
+
+/**********************************************************************
+ *
+ * This routine is called only when em_display_debug_stats is enabled.
+ * This routine provides a way to take a look at important statistics
+ * maintained by the driver and hardware.
+ *
+ **********************************************************************/
+static void
+ixv_print_debug_info(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ix_queue *que = adapter->queues;
+ struct rx_ring *rxr;
+ struct tx_ring *txr;
+ struct lro_ctrl *lro;
+
+ device_printf(dev,"Error Byte Count = %u \n",
+ IXGBE_READ_REG(hw, IXGBE_ERRBC));
+
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ txr = que->txr;
+ rxr = que->rxr;
+ lro = &rxr->lro;
+ device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
+ que->msix, (long)que->irqs);
+ device_printf(dev,"RX(%d) Packets Received: %lld\n",
+ rxr->me, (long long)rxr->rx_packets);
+ device_printf(dev,"RX(%d) Bytes Received: %lu\n",
+ rxr->me, (long)rxr->rx_bytes);
+ device_printf(dev,"RX(%d) LRO Queued= %d\n",
+ rxr->me, lro->lro_queued);
+ device_printf(dev,"RX(%d) LRO Flushed= %d\n",
+ rxr->me, lro->lro_flushed);
+ device_printf(dev,"TX(%d) Packets Sent: %lu\n",
+ txr->me, (long)txr->total_packets);
+ device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
+ txr->me, (long)txr->no_desc_avail);
+ }
+
+ device_printf(dev,"MBX IRQ Handled: %lu\n",
+ (long)adapter->vector_irq);
+ return;
+}
+
+static int
+ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
+{
+ int error, result;
+ struct adapter *adapter;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ adapter = (struct adapter *) arg1;
+ ixv_print_debug_info(adapter);
+ }
+ return error;
+}
+
diff --git a/sys/dev/ixgbe/ix_txrx.c b/sys/dev/ixgbe/ix_txrx.c
new file mode 100644
index 0000000..b16301d
--- /dev/null
+++ b/sys/dev/ixgbe/ix_txrx.c
@@ -0,0 +1,2259 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef IXGBE_STANDALONE_BUILD
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+#endif
+
+#include "ixgbe.h"
+
+#ifdef RSS
+#include <netinet/in_rss.h>
+#endif
+
+/*
+** HW RSC control:
+** this feature only works with
+** IPv4, and only on 82599 and later.
+** Also this will cause IP forwarding to
+** fail and that can't be controlled by
+** the stack as LRO can. For all these
+** reasons I've deemed it best to leave
+** this off and not bother with a tuneable
+** interface, this would need to be compiled
+** to enable.
+*/
+static bool ixgbe_rsc_enable = FALSE;
+
+#ifdef IXGBE_FDIR
+/*
+** For Flow Director: this is the
+** number of TX packets we sample
+** for the filter pool, this means
+** every 20th packet will be probed.
+**
+** This feature can be disabled by
+** setting this to 0.
+*/
+static int atr_sample_rate = 20;
+#endif
+
+/* Shared PCI config read/write */
+inline u16
+ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
+{
+ u16 value;
+
+ value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
+ reg, 2);
+
+ return (value);
+}
+
+inline void
+ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+ pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
+ reg, value, 2);
+
+ return;
+}
+
+/*********************************************************************
+ * Local Function prototypes
+ *********************************************************************/
+static void ixgbe_setup_transmit_ring(struct tx_ring *);
+static void ixgbe_free_transmit_buffers(struct tx_ring *);
+static int ixgbe_setup_receive_ring(struct rx_ring *);
+static void ixgbe_free_receive_buffers(struct rx_ring *);
+
+static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
+static void ixgbe_refresh_mbufs(struct rx_ring *, int);
+static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
+static int ixgbe_tx_ctx_setup(struct tx_ring *,
+ struct mbuf *, u32 *, u32 *);
+static int ixgbe_tso_setup(struct tx_ring *,
+ struct mbuf *, u32 *, u32 *);
+#ifdef IXGBE_FDIR
+static void ixgbe_atr(struct tx_ring *, struct mbuf *);
+#endif
+static __inline void ixgbe_rx_discard(struct rx_ring *, int);
+static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
+ struct mbuf *, u32);
+
+#ifdef IXGBE_LEGACY_TX
+/*********************************************************************
+ * Transmit entry point
+ *
+ * ixgbe_start is called by the stack to initiate a transmit.
+ * The driver will remain in this routine as long as there are
+ * packets to transmit and transmit resources are available.
+ * In case resources are not available stack is notified and
+ * the packet is requeued.
+ **********************************************************************/
+
+void
+ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
+{
+ struct mbuf *m_head;
+ struct adapter *adapter = txr->adapter;
+
+ IXGBE_TX_LOCK_ASSERT(txr);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+ if (!adapter->link_active)
+ return;
+
+ while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+ if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
+ break;
+
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+
+ if (ixgbe_xmit(txr, &m_head)) {
+ if (m_head != NULL)
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ break;
+ }
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m_head);
+ }
+ return;
+}
+
+/*
+ * Legacy TX start - called by the stack, this
+ * always uses the first tx ring, and should
+ * not be used with multiqueue tx enabled.
+ */
+void
+ixgbe_start(struct ifnet *ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXGBE_TX_LOCK(txr);
+ ixgbe_start_locked(txr, ifp);
+ IXGBE_TX_UNLOCK(txr);
+ }
+ return;
+}
+
+#else /* ! IXGBE_LEGACY_TX */
+
+/*
+** Multiqueue Transmit driver
+**
+*/
+int
+ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct ix_queue *que;
+ struct tx_ring *txr;
+ int i, err = 0;
+#ifdef RSS
+ uint32_t bucket_id;
+#endif
+
+ /*
+ * When doing RSS, map it to the same outbound queue
+ * as the incoming flow would be mapped to.
+ *
+ * If everything is setup correctly, it should be the
+ * same bucket that the current CPU we're on is.
+ */
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+#ifdef RSS
+ if (rss_hash2bucket(m->m_pkthdr.flowid,
+ M_HASHTYPE_GET(m), &bucket_id) == 0)
+ /* TODO: spit out something if bucket_id > num_queues? */
+ i = bucket_id % adapter->num_queues;
+ else
+#endif
+ i = m->m_pkthdr.flowid % adapter->num_queues;
+ } else
+ i = curcpu % adapter->num_queues;
+
+ /* Check for a hung queue and pick alternative */
+ if (((1 << i) & adapter->active_queues) == 0)
+ i = ffsl(adapter->active_queues);
+
+ txr = &adapter->tx_rings[i];
+ que = &adapter->queues[i];
+
+ err = drbr_enqueue(ifp, txr->br, m);
+ if (err)
+ return (err);
+ if (IXGBE_TX_TRYLOCK(txr)) {
+ ixgbe_mq_start_locked(ifp, txr);
+ IXGBE_TX_UNLOCK(txr);
+ } else
+ taskqueue_enqueue(que->tq, &txr->txq_task);
+
+ return (0);
+}
+
+int
+ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct mbuf *next;
+ int enqueued = 0, err = 0;
+
+ if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
+ adapter->link_active == 0)
+ return (ENETDOWN);
+
+ /* Process the queue */
+#if __FreeBSD_version < 901504
+ next = drbr_dequeue(ifp, txr->br);
+ while (next != NULL) {
+ if ((err = ixgbe_xmit(txr, &next)) != 0) {
+ if (next != NULL)
+ err = drbr_enqueue(ifp, txr->br, next);
+#else
+ while ((next = drbr_peek(ifp, txr->br)) != NULL) {
+ if ((err = ixgbe_xmit(txr, &next)) != 0) {
+ if (next == NULL) {
+ drbr_advance(ifp, txr->br);
+ } else {
+ drbr_putback(ifp, txr->br, next);
+ }
+#endif
+ break;
+ }
+#if __FreeBSD_version >= 901504
+ drbr_advance(ifp, txr->br);
+#endif
+ enqueued++;
+#if 0 // this is VF-only
+#if __FreeBSD_version >= 1100036
+ if (next->m_flags & M_MCAST)
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
+#endif
+#endif
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, next);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+#if __FreeBSD_version < 901504
+ next = drbr_dequeue(ifp, txr->br);
+#endif
+ }
+
+ if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
+ ixgbe_txeof(txr);
+
+ return (err);
+}
+
+/*
+ * Called from a taskqueue to drain queued transmit packets.
+ */
+void
+ixgbe_deferred_mq_start(void *arg, int pending)
+{
+ struct tx_ring *txr = arg;
+ struct adapter *adapter = txr->adapter;
+ struct ifnet *ifp = adapter->ifp;
+
+ IXGBE_TX_LOCK(txr);
+ if (!drbr_empty(ifp, txr->br))
+ ixgbe_mq_start_locked(ifp, txr);
+ IXGBE_TX_UNLOCK(txr);
+}
+
+/*
+** Flush all ring buffers
+*/
+void
+ixgbe_qflush(struct ifnet *ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct mbuf *m;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IXGBE_TX_LOCK(txr);
+ while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+ m_freem(m);
+ IXGBE_TX_UNLOCK(txr);
+ }
+ if_qflush(ifp);
+}
+#endif /* IXGBE_LEGACY_TX */
+
+
+/*********************************************************************
+ *
+ * This routine maps the mbufs to tx descriptors, allowing the
+ * TX engine to transmit the packets.
+ * - return 0 on success, positive on failure
+ *
+ **********************************************************************/
+
+static int
+ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+ struct adapter *adapter = txr->adapter;
+ u32 olinfo_status = 0, cmd_type_len;
+ int i, j, error, nsegs;
+ int first;
+ bool remap = TRUE;
+ struct mbuf *m_head;
+ bus_dma_segment_t segs[adapter->num_segs];
+ bus_dmamap_t map;
+ struct ixgbe_tx_buf *txbuf;
+ union ixgbe_adv_tx_desc *txd = NULL;
+
+ m_head = *m_headp;
+
+ /* Basic descriptor defines */
+ cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+ if (m_head->m_flags & M_VLANTAG)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ /*
+ * Important to capture the first descriptor
+ * used because it will contain the index of
+ * the one we tell the hardware to report back
+ */
+ first = txr->next_avail_desc;
+ txbuf = &txr->tx_buffers[first];
+ map = txbuf->map;
+
+ /*
+ * Map the packet for DMA.
+ */
+retry:
+ error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (__predict_false(error)) {
+ struct mbuf *m;
+
+ switch (error) {
+ case EFBIG:
+ /* Try it again? - one try */
+ if (remap == TRUE) {
+ remap = FALSE;
+ m = m_defrag(*m_headp, M_NOWAIT);
+ if (m == NULL) {
+ adapter->mbuf_defrag_failed++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ *m_headp = m;
+ goto retry;
+ } else
+ return (error);
+ case ENOMEM:
+ txr->no_tx_dma_setup++;
+ return (error);
+ default:
+ txr->no_tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+ }
+
+ /* Make certain there are enough descriptors */
+ if (nsegs > txr->tx_avail - 2) {
+ txr->no_desc_avail++;
+ bus_dmamap_unload(txr->txtag, map);
+ return (ENOBUFS);
+ }
+ m_head = *m_headp;
+
+ /*
+ ** Set up the appropriate offload context
+ ** this will consume the first descriptor
+ */
+ error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
+ if (__predict_false(error)) {
+ if (error == ENOBUFS)
+ *m_headp = NULL;
+ return (error);
+ }
+
+#ifdef IXGBE_FDIR
+ /* Do the flow director magic */
+ if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
+ ++txr->atr_count;
+ if (txr->atr_count >= atr_sample_rate) {
+ ixgbe_atr(txr, m_head);
+ txr->atr_count = 0;
+ }
+ }
+#endif
+
+ olinfo_status |= IXGBE_ADVTXD_CC;
+ i = txr->next_avail_desc;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
+ bus_addr_t segaddr;
+
+ txbuf = &txr->tx_buffers[i];
+ txd = &txr->tx_base[i];
+ seglen = segs[j].ds_len;
+ segaddr = htole64(segs[j].ds_addr);
+
+ txd->read.buffer_addr = segaddr;
+ txd->read.cmd_type_len = htole32(txr->txd_cmd |
+ cmd_type_len |seglen);
+ txd->read.olinfo_status = htole32(olinfo_status);
+
+ if (++i == txr->num_desc)
+ i = 0;
+ }
+
+ txd->read.cmd_type_len |=
+ htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
+ txr->tx_avail -= nsegs;
+ txr->next_avail_desc = i;
+
+ txbuf->m_head = m_head;
+ /*
+ ** Here we swap the map so the last descriptor,
+ ** which gets the completion interrupt has the
+ ** real map, and the first descriptor gets the
+ ** unused map from this descriptor.
+ */
+ txr->tx_buffers[first].map = txbuf->map;
+ txbuf->map = map;
+ bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+ /* Set the EOP descriptor that will be marked done */
+ txbuf = &txr->tx_buffers[first];
+ txbuf->eop = txd;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * Advance the Transmit Descriptor Tail (Tdt), this tells the
+ * hardware that this frame is available to transmit.
+ */
+ ++txr->total_packets;
+ IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
+
+ /* Mark queue as having work */
+ if (txr->busy == 0)
+ txr->busy = 1;
+
+ return (0);
+
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for tx_buffer structures. The tx_buffer stores all
+ * the information needed to transmit a packet on the wire. This is
+ * called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+int
+ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ device_t dev = adapter->dev;
+ struct ixgbe_tx_buf *txbuf;
+ int error, i;
+
+ /*
+ * Setup DMA descriptor areas.
+ */
+ if ((error = bus_dma_tag_create(
+ bus_get_dma_tag(adapter->dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ IXGBE_TSO_SIZE, /* maxsize */
+ adapter->num_segs, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txr->txtag))) {
+ device_printf(dev,"Unable to allocate TX DMA tag\n");
+ goto fail;
+ }
+
+ if (!(txr->tx_buffers =
+ (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
+ adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* Create the descriptor buffer dma maps */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+ error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+ if (error != 0) {
+ device_printf(dev, "Unable to create TX DMA map\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ /* We free all, it handles case where we are in the middle */
+ ixgbe_free_transmit_structures(adapter);
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_transmit_ring(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ixgbe_tx_buf *txbuf;
+ int i;
+#ifdef DEV_NETMAP
+ struct netmap_adapter *na = NA(adapter->ifp);
+ struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+ /* Clear the old ring contents */
+ IXGBE_TX_LOCK(txr);
+#ifdef DEV_NETMAP
+ /*
+ * (under lock): if in netmap mode, do some consistency
+ * checks and set slot to entry 0 of the netmap ring.
+ */
+ slot = netmap_reset(na, NR_TX, txr->me, 0);
+#endif /* DEV_NETMAP */
+ bzero((void *)txr->tx_base,
+ (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
+ /* Reset indices */
+ txr->next_avail_desc = 0;
+ txr->next_to_clean = 0;
+
+ /* Free any existing tx buffers. */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < txr->num_desc; i++, txbuf++) {
+ if (txbuf->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag, txbuf->map);
+ m_freem(txbuf->m_head);
+ txbuf->m_head = NULL;
+ }
+#ifdef DEV_NETMAP
+ /*
+ * In netmap mode, set the map for the packet buffer.
+ * NOTE: Some drivers (not this one) also need to set
+ * the physical buffer address in the NIC ring.
+ * Slots in the netmap ring (indexed by "si") are
+ * kring->nkr_hwofs positions "ahead" wrt the
+ * corresponding slot in the NIC ring. In some drivers
+ * (not here) nkr_hwofs can be negative. Function
+ * netmap_idx_n2k() handles wraparounds properly.
+ */
+ if (slot) {
+ int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
+ netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
+ }
+#endif /* DEV_NETMAP */
+ /* Clear the EOP descriptor pointer */
+ txbuf->eop = NULL;
+ }
+
+#ifdef IXGBE_FDIR
+ /* Set the rate at which we sample packets */
+ if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+ txr->atr_sample = atr_sample_rate;
+#endif
+
+ /* Set number of descriptors available */
+ txr->tx_avail = adapter->num_tx_desc;
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ IXGBE_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ * Initialize all transmit rings.
+ *
+ **********************************************************************/
+int
+ixgbe_setup_transmit_structures(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ ixgbe_setup_transmit_ring(txr);
+
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Free all transmit rings.
+ *
+ **********************************************************************/
+void
+ixgbe_free_transmit_structures(struct adapter *adapter)
+{
+ struct tx_ring *txr = adapter->tx_rings;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IXGBE_TX_LOCK(txr);
+ ixgbe_free_transmit_buffers(txr);
+ ixgbe_dma_free(adapter, &txr->txdma);
+ IXGBE_TX_UNLOCK(txr);
+ IXGBE_TX_LOCK_DESTROY(txr);
+ }
+ free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ * Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_transmit_buffers(struct tx_ring *txr)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ixgbe_tx_buf *tx_buffer;
+ int i;
+
+ INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
+
+ if (txr->tx_buffers == NULL)
+ return;
+
+ tx_buffer = txr->tx_buffers;
+ for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+ if (tx_buffer->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, tx_buffer->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
+ if (tx_buffer->map != NULL) {
+ bus_dmamap_destroy(txr->txtag,
+ tx_buffer->map);
+ tx_buffer->map = NULL;
+ }
+ } else if (tx_buffer->map != NULL) {
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ bus_dmamap_destroy(txr->txtag,
+ tx_buffer->map);
+ tx_buffer->map = NULL;
+ }
+ }
+#ifdef IXGBE_LEGACY_TX
+ if (txr->br != NULL)
+ buf_ring_free(txr->br, M_DEVBUF);
+#endif
+ if (txr->tx_buffers != NULL) {
+ free(txr->tx_buffers, M_DEVBUF);
+ txr->tx_buffers = NULL;
+ }
+ if (txr->txtag != NULL) {
+ bus_dma_tag_destroy(txr->txtag);
+ txr->txtag = NULL;
+ }
+ return;
+}
+
+/*********************************************************************
+ *
+ * Advanced Context Descriptor setup for VLAN, CSUM or TSO
+ *
+ **********************************************************************/
+
+static int
+ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
+ u32 *cmd_type_len, u32 *olinfo_status)
+{
+ struct ixgbe_adv_tx_context_desc *TXD;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ int ehdrlen, ip_hlen = 0;
+ u16 etype;
+ u8 ipproto = 0;
+ int offload = TRUE;
+ int ctxd = txr->next_avail_desc;
+ u16 vtag = 0;
+
+ /* First check if TSO is to be used */
+ if (mp->m_pkthdr.csum_flags & CSUM_TSO)
+ return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
+
+ if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+ offload = FALSE;
+
+ /* Indicate the whole packet as payload when not doing TSO */
+ *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+ /* Now ready a context descriptor */
+ TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+ /*
+ ** In advanced descriptors the vlan tag must
+ ** be placed into the context descriptor. Hence
+ ** we need to make one even if not doing offloads.
+ */
+ if (mp->m_flags & M_VLANTAG) {
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+ }
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present,
+ * helpful for QinQ too.
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ ehdrlen = ETHER_HDR_LEN;
+ }
+
+ /* Set the ether header length */
+ vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+
+ if (offload == FALSE)
+ goto no_offloads;
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+ ipproto = ip->ip_p;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ ip_hlen = sizeof(struct ip6_hdr);
+ /* XXX-BZ this will go badly in case of ext hdrs. */
+ ipproto = ip6->ip6_nxt;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+ break;
+ default:
+ offload = FALSE;
+ break;
+ }
+
+ vlan_macip_lens |= ip_hlen;
+
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+
+ case IPPROTO_UDP:
+ if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ break;
+
+#if __FreeBSD_version >= 800000
+ case IPPROTO_SCTP:
+ if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+#endif
+ default:
+ offload = FALSE;
+ break;
+ }
+
+ if (offload) /* For the TX descriptor setup */
+ *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
+no_offloads:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+ /* Now copy bits into descriptor */
+ TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+ TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
+ TXD->seqnum_seed = htole32(0);
+ TXD->mss_l4len_idx = htole32(0);
+
+ /* We've consumed the first desc, adjust counters */
+ if (++ctxd == txr->num_desc)
+ ctxd = 0;
+ txr->next_avail_desc = ctxd;
+ --txr->tx_avail;
+
+ return (0);
+}
+
+/**********************************************************************
+ *
+ * Setup work for hardware segmentation offload (TSO) on
+ * adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+static int
+ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
+ u32 *cmd_type_len, u32 *olinfo_status)
+{
+ struct ixgbe_adv_tx_context_desc *TXD;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+ u32 mss_l4len_idx = 0, paylen;
+ u16 vtag = 0, eh_type;
+ int ctxd, ehdrlen, ip_hlen, tcp_hlen;
+ struct ether_vlan_header *eh;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+#ifdef INET
+ struct ip *ip;
+#endif
+ struct tcphdr *th;
+
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ eh_type = eh->evl_proto;
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ eh_type = eh->evl_encap_proto;
+ }
+
+ switch (ntohs(eh_type)) {
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ /* XXX-BZ For now we do not pretend to support ext. hdrs. */
+ if (ip6->ip6_nxt != IPPROTO_TCP)
+ return (ENXIO);
+ ip_hlen = sizeof(struct ip6_hdr);
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+ th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+ break;
+#endif
+#ifdef INET
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ if (ip->ip_p != IPPROTO_TCP)
+ return (ENXIO);
+ ip->ip_sum = 0;
+ ip_hlen = ip->ip_hl << 2;
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ /* Tell transmit desc to also do IPv4 checksum. */
+ *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+ break;
+#endif
+ default:
+ panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
+ __func__, ntohs(eh_type));
+ break;
+ }
+
+ ctxd = txr->next_avail_desc;
+ TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+ tcp_hlen = th->th_off << 2;
+
+ /* This is used in the transmit desc in encap */
+ paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
+
+ /* VLAN MACLEN IPLEN */
+ if (mp->m_flags & M_VLANTAG) {
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+ }
+
+ vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= ip_hlen;
+ TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+
+ /* ADV DTYPE TUCMD */
+ type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+ TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+ TXD->seqnum_seed = htole32(0);
+
+ if (++ctxd == txr->num_desc)
+ ctxd = 0;
+
+ txr->tx_avail--;
+ txr->next_avail_desc = ctxd;
+ *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+ *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+ ++txr->tso_tx;
+ return (0);
+}
+
+
+/**********************************************************************
+ *
+ * Examine each tx_buffer in the used queue. If the hardware is done
+ * processing the packet then free associated resources. The
+ * tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+void
+ixgbe_txeof(struct tx_ring *txr)
+{
+#ifdef DEV_NETMAP
+ struct adapter *adapter = txr->adapter;
+ struct ifnet *ifp = adapter->ifp;
+#endif
+ u32 work, processed = 0;
+ u16 limit = txr->process_limit;
+ struct ixgbe_tx_buf *buf;
+ union ixgbe_adv_tx_desc *txd;
+
+ mtx_assert(&txr->tx_mtx, MA_OWNED);
+
+#ifdef DEV_NETMAP
+ if (ifp->if_capenable & IFCAP_NETMAP) {
+ struct netmap_adapter *na = NA(ifp);
+ struct netmap_kring *kring = &na->tx_rings[txr->me];
+ txd = txr->tx_base;
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+ /*
+ * In netmap mode, all the work is done in the context
+ * of the client thread. Interrupt handlers only wake up
+ * clients, which may be sleeping on individual rings
+ * or on a global resource for all rings.
+ * To implement tx interrupt mitigation, we wake up the client
+ * thread roughly every half ring, even if the NIC interrupts
+ * more frequently. This is implemented as follows:
+ * - ixgbe_txsync() sets kring->nr_kflags with the index of
+ * the slot that should wake up the thread (nkr_num_slots
+ * means the user thread should not be woken up);
+ * - the driver ignores tx interrupts unless netmap_mitigate=0
+ * or the slot has the DD bit set.
+ */
+ if (!netmap_mitigate ||
+ (kring->nr_kflags < kring->nkr_num_slots &&
+ txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
+ netmap_tx_irq(ifp, txr->me);
+ }
+ return;
+ }
+#endif /* DEV_NETMAP */
+
+ if (txr->tx_avail == txr->num_desc) {
+ txr->busy = 0;
+ return;
+ }
+
+ /* Get work starting point */
+ work = txr->next_to_clean;
+ buf = &txr->tx_buffers[work];
+ txd = &txr->tx_base[work];
+ work -= txr->num_desc; /* The distance to ring end */
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ do {
+ union ixgbe_adv_tx_desc *eop= buf->eop;
+ if (eop == NULL) /* No work */
+ break;
+
+ if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
+ break; /* I/O not complete */
+
+ if (buf->m_head) {
+ txr->bytes +=
+ buf->m_head->m_pkthdr.len;
+ bus_dmamap_sync(txr->txtag,
+ buf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ buf->map);
+ m_freem(buf->m_head);
+ buf->m_head = NULL;
+ buf->map = NULL;
+ }
+ buf->eop = NULL;
+ ++txr->tx_avail;
+
+ /* We clean the range if multi segment */
+ while (txd != eop) {
+ ++txd;
+ ++buf;
+ ++work;
+ /* wrap the ring? */
+ if (__predict_false(!work)) {
+ work -= txr->num_desc;
+ buf = txr->tx_buffers;
+ txd = txr->tx_base;
+ }
+ if (buf->m_head) {
+ txr->bytes +=
+ buf->m_head->m_pkthdr.len;
+ bus_dmamap_sync(txr->txtag,
+ buf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ buf->map);
+ m_freem(buf->m_head);
+ buf->m_head = NULL;
+ buf->map = NULL;
+ }
+ ++txr->tx_avail;
+ buf->eop = NULL;
+
+ }
+ ++txr->packets;
+ ++processed;
+
+ /* Try the next packet */
+ ++txd;
+ ++buf;
+ ++work;
+ /* reset with a wrap */
+ if (__predict_false(!work)) {
+ work -= txr->num_desc;
+ buf = txr->tx_buffers;
+ txd = txr->tx_base;
+ }
+ prefetch(txd);
+ } while (__predict_true(--limit));
+
+ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ work += txr->num_desc;
+ txr->next_to_clean = work;
+
+ /*
+ ** Queue Hang detection, we know there's
+ ** work outstanding or the first return
+ ** would have been taken, so increment busy
+ ** if nothing managed to get cleaned, then
+ ** in local_timer it will be checked and
+ ** marked as HUNG if it exceeds a MAX attempt.
+ */
+ if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
+ ++txr->busy;
+ /*
+ ** If anything gets cleaned we reset state to 1,
+ ** note this will turn off HUNG if its set.
+ */
+ if (processed)
+ txr->busy = 1;
+
+ if (txr->tx_avail == txr->num_desc)
+ txr->busy = 0;
+
+ return;
+}
+
+
+#ifdef IXGBE_FDIR
+/*
+** This routine parses packet headers so that Flow
+** Director can make a hashed filter table entry
+** allowing traffic flows to be identified and kept
+** on the same cpu. This would be a performance
+** hit, but we only do it at IXGBE_FDIR_RATE of
+** packets.
+*/
+static void
+ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
+{
+ struct adapter *adapter = txr->adapter;
+ struct ix_queue *que;
+ struct ip *ip;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct ether_vlan_header *eh;
+ union ixgbe_atr_hash_dword input = {.dword = 0};
+ union ixgbe_atr_hash_dword common = {.dword = 0};
+ int ehdrlen, ip_hlen;
+ u16 etype;
+
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = eh->evl_proto;
+ } else {
+ ehdrlen = ETHER_HDR_LEN;
+ etype = eh->evl_encap_proto;
+ }
+
+ /* Only handling IPv4 */
+ if (etype != htons(ETHERTYPE_IP))
+ return;
+
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+
+ /* check if we're UDP or TCP */
+ switch (ip->ip_p) {
+ case IPPROTO_TCP:
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ /* src and dst are inverted */
+ common.port.dst ^= th->th_sport;
+ common.port.src ^= th->th_dport;
+ input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
+ /* src and dst are inverted */
+ common.port.dst ^= uh->uh_sport;
+ common.port.src ^= uh->uh_dport;
+ input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ default:
+ return;
+ }
+
+ input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
+ if (mp->m_pkthdr.ether_vtag)
+ common.flex_bytes ^= htons(ETHERTYPE_VLAN);
+ else
+ common.flex_bytes ^= etype;
+ common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
+
+ que = &adapter->queues[txr->me];
+ /*
+ ** This assumes the Rx queue and Tx
+ ** queue are bound to the same CPU
+ */
+ ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
+ input, common, que->msix);
+}
+#endif /* IXGBE_FDIR */
+
+/*
+** Used to detect a descriptor that has
+** been merged by Hardware RSC.
+*/
+static inline u32
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+ return (le32toh(rx->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/*********************************************************************
+ *
+ * Initialize Hardware RSC (LRO) feature on 82599
+ * for an RX ring, this is toggled by the LRO capability
+ * even though it is transparent to the stack.
+ *
+ * NOTE: since this HW feature only works with IPV4 and
+ * our testing has shown soft LRO to be as effective
+ * I have decided to disable this by default.
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_hw_rsc(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rscctrl, rdrxctl;
+
+ /* If turning LRO/RSC off we need to disable it */
+ if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+ rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+ return;
+ }
+
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+#ifdef DEV_NETMAP /* crcstrip is optional in netmap */
+ if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
+#endif /* DEV_NETMAP */
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ ** Limit the total number of descriptors that
+ ** can be combined, so it does not exceed 64K
+ */
+ if (rxr->mbuf_sz == MCLBYTES)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rxr->mbuf_sz == MJUMPAGESIZE)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else if (rxr->mbuf_sz == MJUM9BYTES)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ else /* Using 16K cluster */
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
+
+ /* Enable TCP header recognition */
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
+ (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
+ IXGBE_PSRTYPE_TCPHDR));
+
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+
+ rxr->hw_rsc = TRUE;
+}
+/*********************************************************************
+ *
+ * Refresh mbuf buffers for RX descriptor rings
+ * - now keeps its own state so discards due to resource
+ * exhaustion are unnecessary, if an mbuf cannot be obtained
+ * it just returns, keeping its placeholder, thus it can simply
+ * be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+ struct adapter *adapter = rxr->adapter;
+ bus_dma_segment_t seg[1];
+ struct ixgbe_rx_buf *rxbuf;
+ struct mbuf *mp;
+ int i, j, nsegs, error;
+ bool refreshed = FALSE;
+
+ i = j = rxr->next_to_refresh;
+ /* Control the loop with one beyond */
+ if (++j == rxr->num_desc)
+ j = 0;
+
+ while (j != limit) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->buf == NULL) {
+ mp = m_getjcl(M_NOWAIT, MT_DATA,
+ M_PKTHDR, rxr->mbuf_sz);
+ if (mp == NULL)
+ goto update;
+ if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ m_adj(mp, ETHER_ALIGN);
+ } else
+ mp = rxbuf->buf;
+
+ mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
+
+ /* If we're dealing with an mbuf that was copied rather
+ * than replaced, there's no need to go through busdma.
+ */
+ if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("Refresh mbufs: payload dmamap load"
+ " failure - %d\n", error);
+ m_free(mp);
+ rxbuf->buf = NULL;
+ goto update;
+ }
+ rxbuf->buf = mp;
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+ rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
+ htole64(seg[0].ds_addr);
+ } else {
+ rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
+ rxbuf->flags &= ~IXGBE_RX_COPY;
+ }
+
+ refreshed = TRUE;
+ /* Next is precalculated */
+ i = j;
+ rxr->next_to_refresh = i;
+ if (++j == rxr->num_desc)
+ j = 0;
+ }
+update:
+ if (refreshed) /* Update hardware tail index */
+ IXGBE_WRITE_REG(&adapter->hw,
+ rxr->tail, rxr->next_to_refresh);
+ return;
+}
+
+/*********************************************************************
+ *
+ * Allocate memory for rx_buffer structures. Since we use one
+ * rx_buffer per received packet, the maximum number of rx_buffer's
+ * that we'll need is equal to the number of receive descriptors
+ * that we've allocated.
+ *
+ **********************************************************************/
+int
+ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ device_t dev = adapter->dev;
+ struct ixgbe_rx_buf *rxbuf;
+ int i, bsize, error;
+
+ bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
+ if (!(rxr->rx_buffers =
+ (struct ixgbe_rx_buf *) malloc(bsize,
+ M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate rx_buffer memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM16BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM16BYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &rxr->ptag))) {
+ device_printf(dev, "Unable to create RX DMA tag\n");
+ goto fail;
+ }
+
+ for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
+ rxbuf = &rxr->rx_buffers[i];
+ error = bus_dmamap_create(rxr->ptag,
+ BUS_DMA_NOWAIT, &rxbuf->pmap);
+ if (error) {
+ device_printf(dev, "Unable to create RX dma map\n");
+ goto fail;
+ }
+ }
+
+ return (0);
+
+fail:
+ /* Frees all, but can handle partial completion */
+ ixgbe_free_receive_structures(adapter);
+ return (error);
+}
+
+
+static void
+ixgbe_free_receive_ring(struct rx_ring *rxr)
+{
+ struct ixgbe_rx_buf *rxbuf;
+ int i;
+
+ for (i = 0; i < rxr->num_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->buf != NULL) {
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+ rxbuf->buf->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->buf);
+ rxbuf->buf = NULL;
+ rxbuf->flags = 0;
+ }
+ }
+}
+
+
+/*********************************************************************
+ *
+ * Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_receive_ring(struct rx_ring *rxr)
+{
+ struct adapter *adapter;
+ struct ifnet *ifp;
+ device_t dev;
+ struct ixgbe_rx_buf *rxbuf;
+ bus_dma_segment_t seg[1];
+ struct lro_ctrl *lro = &rxr->lro;
+ int rsize, nsegs, error = 0;
+#ifdef DEV_NETMAP
+ struct netmap_adapter *na = NA(rxr->adapter->ifp);
+ struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+ adapter = rxr->adapter;
+ ifp = adapter->ifp;
+ dev = adapter->dev;
+
+ /* Clear the ring contents */
+ IXGBE_RX_LOCK(rxr);
+#ifdef DEV_NETMAP
+ /* same as in ixgbe_setup_transmit_ring() */
+ slot = netmap_reset(na, NR_RX, rxr->me, 0);
+#endif /* DEV_NETMAP */
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+ bzero((void *)rxr->rx_base, rsize);
+ /* Cache the size */
+ rxr->mbuf_sz = adapter->rx_mbuf_sz;
+
+ /* Free current RX buffer structs and their mbufs */
+ ixgbe_free_receive_ring(rxr);
+
+ /* Now replenish the mbufs */
+ for (int j = 0; j != rxr->num_desc; ++j) {
+ struct mbuf *mp;
+
+ rxbuf = &rxr->rx_buffers[j];
+#ifdef DEV_NETMAP
+ /*
+ * In netmap mode, fill the map and set the buffer
+ * address in the NIC ring, considering the offset
+ * between the netmap and NIC rings (see comment in
+ * ixgbe_setup_transmit_ring() ). No need to allocate
+ * an mbuf, so end the block with a continue;
+ */
+ if (slot) {
+ int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
+ uint64_t paddr;
+ void *addr;
+
+ addr = PNMB(na, slot + sj, &paddr);
+ netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
+ /* Update descriptor and the cached value */
+ rxr->rx_base[j].read.pkt_addr = htole64(paddr);
+ rxbuf->addr = htole64(paddr);
+ continue;
+ }
+#endif /* DEV_NETMAP */
+ rxbuf->flags = 0;
+ rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
+ if (rxbuf->buf == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ mp = rxbuf->buf;
+ mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ rxbuf->pmap, mp, seg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+ bus_dmamap_sync(rxr->ptag,
+ rxbuf->pmap, BUS_DMASYNC_PREREAD);
+ /* Update the descriptor and the cached value */
+ rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
+ rxbuf->addr = htole64(seg[0].ds_addr);
+ }
+
+
+ /* Setup our descriptor indices */
+ rxr->next_to_check = 0;
+ rxr->next_to_refresh = 0;
+ rxr->lro_enabled = FALSE;
+ rxr->rx_copies = 0;
+ rxr->rx_bytes = 0;
+ rxr->vtag_strip = FALSE;
+
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /*
+ ** Now set up the LRO interface:
+ */
+ if (ixgbe_rsc_enable)
+ ixgbe_setup_hw_rsc(rxr);
+ else if (ifp->if_capenable & IFCAP_LRO) {
+ int err = tcp_lro_init(lro);
+ if (err) {
+ device_printf(dev, "LRO Initialization failed!\n");
+ goto fail;
+ }
+ INIT_DEBUGOUT("RX Soft LRO Initialized\n");
+ rxr->lro_enabled = TRUE;
+ lro->ifp = adapter->ifp;
+ }
+
+ IXGBE_RX_UNLOCK(rxr);
+ return (0);
+
+fail:
+ ixgbe_free_receive_ring(rxr);
+ IXGBE_RX_UNLOCK(rxr);
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Initialize all receive rings.
+ *
+ **********************************************************************/
+int
+ixgbe_setup_receive_structures(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+ int j;
+
+ for (j = 0; j < adapter->num_queues; j++, rxr++)
+ if (ixgbe_setup_receive_ring(rxr))
+ goto fail;
+
+ return (0);
+fail:
+ /*
+ * Free RX buffers allocated so far, we will only handle
+ * the rings that completed, the failing case will have
+ * cleaned up for itself. 'j' failed, so its the terminus.
+ */
+ for (int i = 0; i < j; ++i) {
+ rxr = &adapter->rx_rings[i];
+ ixgbe_free_receive_ring(rxr);
+ }
+
+ return (ENOBUFS);
+}
+
+
+/*********************************************************************
+ *
+ * Free all receive rings.
+ *
+ **********************************************************************/
+void
+ixgbe_free_receive_structures(struct adapter *adapter)
+{
+ struct rx_ring *rxr = adapter->rx_rings;
+
+ INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ struct lro_ctrl *lro = &rxr->lro;
+ ixgbe_free_receive_buffers(rxr);
+ /* Free LRO memory */
+ tcp_lro_free(lro);
+ /* Free the ring memory as well */
+ ixgbe_dma_free(adapter, &rxr->rxdma);
+ }
+
+ free(adapter->rx_rings, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ * Free receive ring data structures
+ *
+ **********************************************************************/
+void
+ixgbe_free_receive_buffers(struct rx_ring *rxr)
+{
+ struct adapter *adapter = rxr->adapter;
+ struct ixgbe_rx_buf *rxbuf;
+
+ INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
+
+ /* Cleanup any existing buffers */
+ if (rxr->rx_buffers != NULL) {
+ for (int i = 0; i < adapter->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ if (rxbuf->buf != NULL) {
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+ rxbuf->buf->m_flags |= M_PKTHDR;
+ m_freem(rxbuf->buf);
+ }
+ rxbuf->buf = NULL;
+ if (rxbuf->pmap != NULL) {
+ bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+ rxbuf->pmap = NULL;
+ }
+ }
+ if (rxr->rx_buffers != NULL) {
+ free(rxr->rx_buffers, M_DEVBUF);
+ rxr->rx_buffers = NULL;
+ }
+ }
+
+ if (rxr->ptag != NULL) {
+ bus_dma_tag_destroy(rxr->ptag);
+ rxr->ptag = NULL;
+ }
+
+ return;
+}
+
+static __inline void
+ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+
+ /*
+ * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
+ * should be computed by hardware. Also it should not have VLAN tag in
+ * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
+ */
+ if (rxr->lro_enabled &&
+ (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+ ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+ (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
+ (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+ (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
+ (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+ /*
+ * Send to the stack if:
+ ** - LRO not enabled, or
+ ** - no LRO resources, or
+ ** - lro enqueue fails
+ */
+ if (rxr->lro.lro_cnt != 0)
+ if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+ return;
+ }
+ IXGBE_RX_UNLOCK(rxr);
+ (*ifp->if_input)(ifp, m);
+ IXGBE_RX_LOCK(rxr);
+}
+
+static __inline void
+ixgbe_rx_discard(struct rx_ring *rxr, int i)
+{
+ struct ixgbe_rx_buf *rbuf;
+
+ rbuf = &rxr->rx_buffers[i];
+
+
+ /*
+ ** With advanced descriptors the writeback
+ ** clobbers the buffer addrs, so its easier
+ ** to just free the existing mbufs and take
+ ** the normal refresh path to get new buffers
+ ** and mapping.
+ */
+
+ if (rbuf->fmp != NULL) {/* Partial chain ? */
+ rbuf->fmp->m_flags |= M_PKTHDR;
+ m_freem(rbuf->fmp);
+ rbuf->fmp = NULL;
+ rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
+ } else if (rbuf->buf) {
+ m_free(rbuf->buf);
+ rbuf->buf = NULL;
+ }
+
+ rbuf->flags = 0;
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * This routine executes in interrupt context. It replenishes
+ * the mbufs in the descriptor and sends data which has been
+ * dma'ed into host memory to upper layer.
+ *
+ * We loop at most count times if count is > 0, or until done if
+ * count < 0.
+ *
+ * Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+bool
+ixgbe_rxeof(struct ix_queue *que)
+{
+ struct adapter *adapter = que->adapter;
+ struct rx_ring *rxr = que->rxr;
+ struct ifnet *ifp = adapter->ifp;
+ struct lro_ctrl *lro = &rxr->lro;
+ struct lro_entry *queued;
+ int i, nextp, processed = 0;
+ u32 staterr = 0;
+ u16 count = rxr->process_limit;
+ union ixgbe_adv_rx_desc *cur;
+ struct ixgbe_rx_buf *rbuf, *nbuf;
+ u16 pkt_info;
+
+ IXGBE_RX_LOCK(rxr);
+
+#ifdef DEV_NETMAP
+ /* Same as the txeof routine: wakeup clients on intr. */
+ if (netmap_rx_irq(ifp, rxr->me, &processed)) {
+ IXGBE_RX_UNLOCK(rxr);
+ return (FALSE);
+ }
+#endif /* DEV_NETMAP */
+
+ for (i = rxr->next_to_check; count != 0;) {
+ struct mbuf *sendmp, *mp;
+ u32 rsc, ptype;
+ u16 len;
+ u16 vtag = 0;
+ bool eop;
+
+ /* Sync the ring. */
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cur = &rxr->rx_base[i];
+ staterr = le32toh(cur->wb.upper.status_error);
+ pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
+
+ if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+ break;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+
+ count--;
+ sendmp = NULL;
+ nbuf = NULL;
+ rsc = 0;
+ cur->wb.upper.status_error = 0;
+ rbuf = &rxr->rx_buffers[i];
+ mp = rbuf->buf;
+
+ len = le16toh(cur->wb.upper.length);
+ ptype = le32toh(cur->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_PKTTYPE_MASK;
+ eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+ /* Make sure bad packets are discarded */
+ if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
+#if 0 // VF-only
+#if __FreeBSD_version >= 1100036
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+#endif
+#endif
+ rxr->rx_discarded++;
+ ixgbe_rx_discard(rxr, i);
+ goto next_desc;
+ }
+
+ /*
+ ** On 82599 which supports a hardware
+ ** LRO (called HW RSC), packets need
+ ** not be fragmented across sequential
+ ** descriptors, rather the next descriptor
+ ** is indicated in bits of the descriptor.
+ ** This also means that we might proceses
+ ** more than one packet at a time, something
+ ** that has never been true before, it
+ ** required eliminating global chain pointers
+ ** in favor of what we are doing here. -jfv
+ */
+ if (!eop) {
+ /*
+ ** Figure out the next descriptor
+ ** of this frame.
+ */
+ if (rxr->hw_rsc == TRUE) {
+ rsc = ixgbe_rsc_count(cur);
+ rxr->rsc_num += (rsc - 1);
+ }
+ if (rsc) { /* Get hardware index */
+ nextp = ((staterr &
+ IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT);
+ } else { /* Just sequential */
+ nextp = i + 1;
+ if (nextp == adapter->num_rx_desc)
+ nextp = 0;
+ }
+ nbuf = &rxr->rx_buffers[nextp];
+ prefetch(nbuf);
+ }
+ /*
+ ** Rather than using the fmp/lmp global pointers
+ ** we now keep the head of a packet chain in the
+ ** buffer struct and pass this along from one
+ ** descriptor to the next, until we get EOP.
+ */
+ mp->m_len = len;
+ /*
+ ** See if there is a stored head
+ ** that determines what we are
+ */
+ sendmp = rbuf->fmp;
+ if (sendmp != NULL) { /* secondary frag */
+ rbuf->buf = rbuf->fmp = NULL;
+ mp->m_flags &= ~M_PKTHDR;
+ sendmp->m_pkthdr.len += mp->m_len;
+ } else {
+ /*
+ * Optimize. This might be a small packet,
+ * maybe just a TCP ACK. Do a fast copy that
+ * is cache aligned into a new mbuf, and
+ * leave the old mbuf+cluster for re-use.
+ */
+ if (eop && len <= IXGBE_RX_COPY_LEN) {
+ sendmp = m_gethdr(M_NOWAIT, MT_DATA);
+ if (sendmp != NULL) {
+ sendmp->m_data +=
+ IXGBE_RX_COPY_ALIGN;
+ ixgbe_bcopy(mp->m_data,
+ sendmp->m_data, len);
+ sendmp->m_len = len;
+ rxr->rx_copies++;
+ rbuf->flags |= IXGBE_RX_COPY;
+ }
+ }
+ if (sendmp == NULL) {
+ rbuf->buf = rbuf->fmp = NULL;
+ sendmp = mp;
+ }
+
+ /* first desc of a non-ps chain */
+ sendmp->m_flags |= M_PKTHDR;
+ sendmp->m_pkthdr.len = mp->m_len;
+ }
+ ++processed;
+
+ /* Pass the head pointer on */
+ if (eop == 0) {
+ nbuf->fmp = sendmp;
+ sendmp = NULL;
+ mp->m_next = nbuf->buf;
+ } else { /* Sending this frame */
+ sendmp->m_pkthdr.rcvif = ifp;
+ rxr->rx_packets++;
+ /* capture data for AIM */
+ rxr->bytes += sendmp->m_pkthdr.len;
+ rxr->rx_bytes += sendmp->m_pkthdr.len;
+ /* Process vlan info */
+ if ((rxr->vtag_strip) &&
+ (staterr & IXGBE_RXD_STAT_VP))
+ vtag = le16toh(cur->wb.upper.vlan);
+ if (vtag) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ ixgbe_rx_checksum(staterr, sendmp, ptype);
+#if __FreeBSD_version >= 800000
+#ifdef RSS
+ sendmp->m_pkthdr.flowid =
+ le32toh(cur->wb.lower.hi_dword.rss);
+ switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
+ case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV4);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV4:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV4);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6_EX);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV6:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_IPV6);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV6_EX);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV4);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6);
+ break;
+ case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_UDP_IPV6_EX);
+ break;
+ default:
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
+ }
+#else /* RSS */
+ sendmp->m_pkthdr.flowid = que->msix;
+ M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
+#endif /* RSS */
+#endif /* FreeBSD_version */
+ }
+next_desc:
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Advance our pointers to the next descriptor. */
+ if (++i == rxr->num_desc)
+ i = 0;
+
+ /* Now send to the stack or do LRO */
+ if (sendmp != NULL) {
+ rxr->next_to_check = i;
+ ixgbe_rx_input(rxr, ifp, sendmp, ptype);
+ i = rxr->next_to_check;
+ }
+
+ /* Every 8 descriptors we go to refresh mbufs */
+ if (processed == 8) {
+ ixgbe_refresh_mbufs(rxr, i);
+ processed = 0;
+ }
+ }
+
+ /* Refresh any remaining buf structs */
+ if (ixgbe_rx_unrefreshed(rxr))
+ ixgbe_refresh_mbufs(rxr, i);
+
+ rxr->next_to_check = i;
+
+ /*
+ * Flush any outstanding LRO work
+ */
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+
+ IXGBE_RX_UNLOCK(rxr);
+
+ /*
+ ** Still have cleaning to do?
+ */
+ if ((staterr & IXGBE_RXD_STAT_DD) != 0)
+ return (TRUE);
+ else
+ return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ * Verify that the hardware indicated that the checksum is valid.
+ * Inform the stack about the status of checksum so that stack
+ * doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
+{
+ u16 status = (u16) staterr;
+ u8 errors = (u8) (staterr >> 24);
+ bool sctp = FALSE;
+
+ if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+ (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
+ sctp = TRUE;
+
+ if (status & IXGBE_RXD_STAT_IPCS) {
+ if (!(errors & IXGBE_RXD_ERR_IPE)) {
+ /* IP Checksum Good */
+ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+ mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+ } else
+ mp->m_pkthdr.csum_flags = 0;
+ }
+ if (status & IXGBE_RXD_STAT_L4CS) {
+ u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+ if (sctp)
+ type = CSUM_SCTP_VALID;
+#endif
+ if (!(errors & IXGBE_RXD_ERR_TCPE)) {
+ mp->m_pkthdr.csum_flags |= type;
+ if (!sctp)
+ mp->m_pkthdr.csum_data = htons(0xffff);
+ }
+ }
+ return;
+}
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+int
+ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
+ struct ixgbe_dma_alloc *dma, int mapflags)
+{
+ device_t dev = adapter->dev;
+ int r;
+
+ r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
+ DBA_ALIGN, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &dma->dma_tag);
+ if (r != 0) {
+ device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
+ "error %u\n", r);
+ goto fail_0;
+ }
+ r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
+ BUS_DMA_NOWAIT, &dma->dma_map);
+ if (r != 0) {
+ device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
+ "error %u\n", r);
+ goto fail_1;
+ }
+ r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+ size,
+ ixgbe_dmamap_cb,
+ &dma->dma_paddr,
+ mapflags | BUS_DMA_NOWAIT);
+ if (r != 0) {
+ device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
+ "error %u\n", r);
+ goto fail_2;
+ }
+ dma->dma_size = size;
+ return (0);
+fail_2:
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+fail_1:
+ bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+ dma->dma_tag = NULL;
+ return (r);
+}
+
+void
+ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
+{
+ bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+ bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+ bus_dma_tag_destroy(dma->dma_tag);
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for the transmit and receive rings, and then
+ * the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+int
+ixgbe_allocate_queues(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct ix_queue *que;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rsize, tsize, error = IXGBE_SUCCESS;
+ int txconf = 0, rxconf = 0;
+
+ /* First allocate the top level queue structs */
+ if (!(adapter->queues =
+ (struct ix_queue *) malloc(sizeof(struct ix_queue) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* First allocate the TX ring struct memory */
+ if (!(adapter->tx_rings =
+ (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate TX ring memory\n");
+ error = ENOMEM;
+ goto tx_fail;
+ }
+
+ /* Next allocate the RX */
+ if (!(adapter->rx_rings =
+ (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate RX ring memory\n");
+ error = ENOMEM;
+ goto rx_fail;
+ }
+
+ /* For the ring itself */
+ tsize = roundup2(adapter->num_tx_desc *
+ sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
+
+ /*
+ * Now set up the TX queues, txconf is needed to handle the
+ * possibility that things fail midcourse and we need to
+ * undo memory gracefully
+ */
+ for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+ /* Set up some basics */
+ txr = &adapter->tx_rings[i];
+ txr->adapter = adapter;
+ txr->me = i;
+ txr->num_desc = adapter->num_tx_desc;
+
+ /* Initialize the TX side lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), txr->me);
+ mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+ if (ixgbe_dma_malloc(adapter, tsize,
+ &txr->txdma, BUS_DMA_NOWAIT)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+ txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
+ bzero((void *)txr->tx_base, tsize);
+
+ /* Now allocate transmit buffers for the ring */
+ if (ixgbe_allocate_transmit_buffers(txr)) {
+ device_printf(dev,
+ "Critical Failure setting up transmit buffers\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+#ifndef IXGBE_LEGACY_TX
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
+ M_WAITOK, &txr->tx_mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up buf ring\n");
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+#endif
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(adapter->num_rx_desc *
+ sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+ for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+ rxr = &adapter->rx_rings[i];
+ /* Set up some basics */
+ rxr->adapter = adapter;
+ rxr->me = i;
+ rxr->num_desc = adapter->num_rx_desc;
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), rxr->me);
+ mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (ixgbe_dma_malloc(adapter, rsize,
+ &rxr->rxdma, BUS_DMA_NOWAIT)) {
+ device_printf(dev,
+ "Unable to allocate RxDescriptor memory\n");
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
+ rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+ bzero((void *)rxr->rx_base, rsize);
+
+ /* Allocate receive buffers for the ring*/
+ if (ixgbe_allocate_receive_buffers(rxr)) {
+ device_printf(dev,
+ "Critical Failure setting up receive buffers\n");
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
+ }
+
+ /*
+ ** Finally set up the queue holding structs
+ */
+ for (int i = 0; i < adapter->num_queues; i++) {
+ que = &adapter->queues[i];
+ que->adapter = adapter;
+ que->me = i;
+ que->txr = &adapter->tx_rings[i];
+ que->rxr = &adapter->rx_rings[i];
+ }
+
+ return (0);
+
+err_rx_desc:
+ for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+ ixgbe_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+ for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+ ixgbe_dma_free(adapter, &txr->txdma);
+ free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+ free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+ free(adapter->queues, M_DEVBUF);
+fail:
+ return (error);
+}
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index b69274a..e0680f1 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@
#include <machine/smp.h>
#include "ixgbe_api.h"
+#include "ixgbe_vf.h"
/* Tunables */
@@ -197,6 +198,10 @@
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
#define IXGBE_QUEUE_MIN_FREE 32
+#define IXGBE_MAX_TX_BUSY 10
+#define IXGBE_QUEUE_HUNG 0x80000000
+
+#define IXV_EITR_DEFAULT 128
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
@@ -205,6 +210,15 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
+/* Backward compatibility items for very old versions */
+#ifndef pci_find_cap
+#define pci_find_cap pci_find_extcap
+#endif
+
+#ifndef DEVMETHOD_END
+#define DEVMETHOD_END { NULL, NULL }
+#endif
+
/*
* Interrupt Moderation parameters
*/
@@ -213,7 +227,6 @@
#define IXGBE_BULK_LATENCY 1200
#define IXGBE_LINK_ITR 2000
-
/*
*****************************************************************************
* vendor_info_array
@@ -268,8 +281,10 @@ struct ix_queue {
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
+ u32 me;
struct resource *res;
void *tag;
+ int busy;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
@@ -284,7 +299,8 @@ struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
- int watchdog_time;
+ u32 tail;
+ int busy;
union ixgbe_adv_tx_desc *tx_base;
struct ixgbe_tx_buf *tx_buffers;
struct ixgbe_dma_alloc txdma;
@@ -293,11 +309,6 @@ struct tx_ring {
u16 next_to_clean;
u16 process_limit;
u16 num_desc;
- enum {
- IXGBE_QUEUE_IDLE,
- IXGBE_QUEUE_WORKING,
- IXGBE_QUEUE_HUNG,
- } queue_status;
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
@@ -312,6 +323,7 @@ struct tx_ring {
u32 bytes; /* used for AIM */
u32 packets;
/* Soft Stats */
+ unsigned long tx_bytes;
unsigned long tso_tx;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
@@ -327,6 +339,7 @@ struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
+ u32 tail;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
@@ -406,7 +419,7 @@ struct adapter {
u16 num_segs;
u32 link_speed;
bool link_up;
- u32 linkvec;
+ u32 vector;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
@@ -442,7 +455,7 @@ struct adapter {
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
- u64 que_mask;
+ u64 active_queues;
u32 num_rx_desc;
/* Multicast array memory */
@@ -455,9 +468,24 @@ struct adapter {
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long watchdog_events;
- unsigned long link_irq;
-
- struct ixgbe_hw_stats stats;
+ unsigned long vector_irq;
+ union {
+ struct ixgbe_hw_stats pf;
+ struct ixgbevf_hw_stats vf;
+ } stats;
+#if __FreeBSD_version >= 1100036
+ /* counter(9) stats */
+ u64 ipackets;
+ u64 ierrors;
+ u64 opackets;
+ u64 oerrors;
+ u64 ibytes;
+ u64 obytes;
+ u64 imcasts;
+ u64 omcasts;
+ u64 iqdrops;
+ u64 noproto;
+#endif
};
@@ -488,6 +516,45 @@ struct adapter {
#define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA
#endif
+/* Stats macros */
+#if __FreeBSD_version >= 1100036
+#define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count)
+#define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count)
+#define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count)
+#define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count)
+#define IXGBE_SET_COLLISIONS(sc, count)
+#define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count)
+#define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count)
+#define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count)
+#define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count)
+#define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count)
+#else
+#define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count)
+#define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count)
+#define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count)
+#define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count)
+#define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count)
+#define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
+#define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
+#define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
+#define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
+#define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
+#endif
+
+/* Sysctl help messages; displayed with sysctl -d */
+#define IXGBE_SYSCTL_DESC_ADV_SPEED \
+ "\nControl advertised link speed using these flags:\n" \
+ "\t0x1 - advertise 100M\n" \
+ "\t0x2 - advertise 1G\n" \
+ "\t0x4 - advertise 10G"
+
+#define IXGBE_SYSCTL_DESC_SET_FC \
+ "\nSet flow control mode using these values:\n" \
+ "\t0 - off\n" \
+ "\t1 - rx pause\n" \
+ "\t2 - tx pause\n" \
+ "\t3 - tx and rx pause"
+
static inline bool
ixgbe_is_sfp(struct ixgbe_hw *hw)
{
@@ -498,6 +565,10 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
return TRUE;
default:
return FALSE;
@@ -530,4 +601,44 @@ ixgbe_rx_unrefreshed(struct rx_ring *rxr)
rxr->next_to_refresh - 1);
}
+/*
+** This checks for a zero mac addr, something that will be likely
+** unless the Admin on the Host has created one.
+*/
+static inline bool
+ixv_check_ether_addr(u8 *addr)
+{
+ bool status = TRUE;
+
+ if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
+ addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
+ status = FALSE;
+ return (status);
+}
+
+/* Shared Prototypes */
+
+#ifdef IXGBE_LEGACY_TX
+void ixgbe_start(struct ifnet *);
+void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
+#else /* ! IXGBE_LEGACY_TX */
+int ixgbe_mq_start(struct ifnet *, struct mbuf *);
+int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
+void ixgbe_qflush(struct ifnet *);
+void ixgbe_deferred_mq_start(void *, int);
+#endif /* IXGBE_LEGACY_TX */
+
+int ixgbe_allocate_queues(struct adapter *);
+int ixgbe_allocate_transmit_buffers(struct tx_ring *);
+int ixgbe_setup_transmit_structures(struct adapter *);
+void ixgbe_free_transmit_structures(struct adapter *);
+int ixgbe_allocate_receive_buffers(struct rx_ring *);
+int ixgbe_setup_receive_structures(struct adapter *);
+void ixgbe_free_receive_structures(struct adapter *);
+void ixgbe_txeof(struct tx_ring *);
+bool ixgbe_rxeof(struct ix_queue *);
+
+int ixgbe_dma_malloc(struct adapter *,
+ bus_size_t, struct ixgbe_dma_alloc *, int);
+void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
#endif /* _IXGBE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index e32f270..d9b8985 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,6 +38,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+#define IXGBE_82598_MAX_TX_QUEUES 32
+#define IXGBE_82598_MAX_RX_QUEUES 64
+#define IXGBE_82598_RAR_ENTRIES 16
+#define IXGBE_82598_MC_TBL_SIZE 128
+#define IXGBE_82598_VFT_TBL_SIZE 128
+#define IXGBE_82598_RX_PB_SIZE 512
+
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
@@ -121,47 +128,48 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_82598;
+ phy->ops.init = ixgbe_init_phy_ops_82598;
/* MAC */
- mac->ops.start_hw = &ixgbe_start_hw_82598;
- mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
- mac->ops.reset_hw = &ixgbe_reset_hw_82598;
- mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+ mac->ops.start_hw = ixgbe_start_hw_82598;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
+ mac->ops.reset_hw = ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82598;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
- mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+ ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
+ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
/* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
- mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = ixgbe_set_vfta_82598;
mac->ops.set_vlvf = NULL;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
/* Flow Control */
- mac->ops.fc_enable = &ixgbe_fc_enable_82598;
-
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 16;
- mac->rx_pb_size = 512;
- mac->max_tx_queues = 32;
- mac->max_rx_queues = 64;
+ mac->ops.fc_enable = ixgbe_fc_enable_82598;
+
+ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/* SFP+ Module */
- phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
- phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
/* Link */
- mac->ops.check_link = &ixgbe_check_mac_link_82598;
- mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.check_link = ixgbe_check_mac_link_82598;
+ mac->ops.setup_link = ixgbe_setup_mac_link_82598;
mac->ops.flap_tx_laser = NULL;
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
/* Manageability interface */
mac->ops.set_fw_drv_ver = NULL;
@@ -194,20 +202,20 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Overwrite the link function pointers if copper PHY */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.setup_link = ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
+ ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
case ixgbe_phy_tn:
- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
- phy->ops.reset = &ixgbe_reset_phy_nl;
+ phy->ops.reset = ixgbe_reset_phy_nl;
/* Call SFP+ identify routine to get the SFP+ module type */
ret_val = phy->ops.identify_sfp(hw);
@@ -1409,6 +1417,20 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+}
+
+/**
+ * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
+{
+ DEBUGFUNC("ixgbe_enable_rx_dma_82598");
- return;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+ return IXGBE_SUCCESS;
}
diff --git a/sys/dev/ixgbe/ixgbe_82598.h b/sys/dev/ixgbe/ixgbe_82598.h
index a195b15..621be41 100644
--- a/sys/dev/ixgbe/ixgbe_82598.h
+++ b/sys/dev/ixgbe/ixgbe_82598.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -49,4 +49,5 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval);
#endif /* _IXGBE_82598_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 3cc8cd7..d49d851 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,6 +38,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+#define IXGBE_82599_MAX_TX_QUEUES 128
+#define IXGBE_82599_MAX_RX_QUEUES 128
+#define IXGBE_82599_RAR_ENTRIES 128
+#define IXGBE_82599_MC_TBL_SIZE 128
+#define IXGBE_82599_VFT_TBL_SIZE 128
+#define IXGBE_82599_RX_PB_SIZE 512
+
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
@@ -46,25 +53,10 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data);
static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-
-static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
-{
- u32 fwsm, manc, factps;
-
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
- if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
- return FALSE;
-
- manc = IXGBE_READ_REG(hw, IXGBE_MANC);
- if (!(manc & IXGBE_MANC_RCV_TCO_EN))
- return FALSE;
-
- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
- if (factps & IXGBE_FACTPS_MNGCG)
- return FALSE;
-
- return TRUE;
-}
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -77,12 +69,12 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
* and MNG not enabled
*/
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- !hw->mng_fw_enabled) {
+ !ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
- mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+ ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
} else {
mac->ops.disable_tx_laser = NULL;
@@ -92,15 +84,21 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_hard_rate_select_speed;
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
hw->phy.smart_speed == ixgbe_smart_speed_on) &&
!ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
} else {
- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ mac->ops.setup_link = ixgbe_setup_mac_link_82599;
}
}
}
@@ -119,9 +117,27 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val = IXGBE_SUCCESS;
+ u32 esdp;
DEBUGFUNC("ixgbe_init_phy_ops_82599");
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = TRUE;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
+ }
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -134,18 +150,18 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
/* If copper media, overwrite with copper function pointers */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
- mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+ mac->ops.setup_link = ixgbe_setup_copper_link_82599;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
+ ixgbe_get_copper_link_capabilities_generic;
}
- /* Set necessary function pointers based on phy type */
+ /* Set necessary function pointers based on PHY type */
switch (hw->phy.type) {
case ixgbe_phy_tn:
- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -158,7 +174,6 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u16 list_offset, data_offset, data_value;
- bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
@@ -191,35 +206,15 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
/* Release the semaphore */
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
- /* Delay obtaining semaphore again to allow FW access */
- msec_delay(hw->eeprom.semaphore_delay);
-
- /* Need SW/FW semaphore around AUTOC writes if LESM on,
- * likewise reset_pipeline requires lock as it also writes
- * AUTOC.
+ /* Delay obtaining semaphore again to allow FW access
+ * prot_autoc_write uses the semaphore too.
*/
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val != IXGBE_SUCCESS) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto setup_sfp_out;
- }
-
- got_lock = TRUE;
- }
+ msec_delay(hw->eeprom.semaphore_delay);
/* Restart DSP and set SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
- IXGBE_AUTOC_LMS_10G_SERIAL));
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- ret_val = ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock) {
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- got_lock = FALSE;
- }
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ FALSE);
if (ret_val) {
DEBUGOUT("sfp module setup not complete\n");
@@ -243,6 +238,79 @@ setup_sfp_err:
}
/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599().
+ */
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = FALSE;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = TRUE;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ */
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ locked = TRUE;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
+/**
* ixgbe_init_ops_82599 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
@@ -263,53 +331,55 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
- phy->ops.identify = &ixgbe_identify_phy_82599;
- phy->ops.init = &ixgbe_init_phy_ops_82599;
+ phy->ops.identify = ixgbe_identify_phy_82599;
+ phy->ops.init = ixgbe_init_phy_ops_82599;
/* MAC */
- mac->ops.reset_hw = &ixgbe_reset_hw_82599;
- mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
- mac->ops.get_media_type = &ixgbe_get_media_type_82599;
+ mac->ops.reset_hw = ixgbe_reset_hw_82599;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_82599;
- mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
- mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
- mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
- mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
- mac->ops.start_hw = &ixgbe_start_hw_82599;
- mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
- mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
- mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
- mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+ ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
+ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
+ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
+ mac->ops.start_hw = ixgbe_start_hw_82599;
+ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.prot_autoc_read = prot_autoc_read_82599;
+ mac->ops.prot_autoc_write = prot_autoc_write_82599;
/* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
- mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
- mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
- mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
- mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
- mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
- mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+ mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
+ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
/* Link */
- mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
ixgbe_init_mac_link_ops_82599(hw);
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 128;
- mac->rx_pb_size = 512;
- mac->max_tx_queues = 128;
- mac->max_rx_queues = 128;
+ mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
@@ -318,17 +388,14 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
/* EEPROM */
- eeprom->ops.read = &ixgbe_read_eeprom_82599;
- eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+ eeprom->ops.read = ixgbe_read_eeprom_82599;
+ eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
/* Manageability interface */
- mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
-
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
- mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
- /* Cache if MNG FW is up */
- hw->mng_fw_enabled = ixgbe_mng_enabled(hw);
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
return ret_val;
}
@@ -429,7 +496,14 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = TRUE;
+
+ /* QSFP must not enable full auto-negotiation
+ * Limited autoneg is enabled at 1G
+ */
+ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+ *autoneg = FALSE;
+ else
+ *autoneg = TRUE;
}
out:
@@ -482,6 +556,9 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
media_type = ixgbe_media_type_copper;
break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
case IXGBE_DEV_ID_82599_BYPASS:
media_type = ixgbe_media_type_fiber_fixed;
hw->phy.multispeed_fiber = TRUE;
@@ -509,8 +586,8 @@ void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- if (!hw->mng_fw_enabled && !hw->wol_enabled &&
- ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
+ if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -597,7 +674,11 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- /* Disable tx laser; allow 100us to go dark per spec */
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
+ /* Disable Tx laser; allow 100us to go dark per spec */
esdp_reg |= IXGBE_ESDP_SDP3;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
IXGBE_WRITE_FLUSH(hw);
@@ -616,7 +697,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- /* Enable tx laser; allow 100ms to light up */
+ /* Enable Tx laser; allow 100ms to light up */
esdp_reg &= ~IXGBE_ESDP_SDP3;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
IXGBE_WRITE_FLUSH(hw);
@@ -630,7 +711,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* When the driver changes the link speeds that it can support,
* it sets autotry_restart to TRUE to indicate that we need to
* initiate a new autotry session with the link partner. To do
- * so, we set the speed then disable and re-enable the tx laser, to
+ * so, we set the speed then disable and re-enable the Tx laser, to
* alert the link partner that it also needs to restart autotry on its
* end. This is consistent with TRUE clause 37 autoneg, which also
* involves a loss of signal.
@@ -639,6 +720,10 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
if (hw->mac.autotry_restart) {
ixgbe_disable_tx_laser_multispeed_fiber(hw);
ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -647,229 +732,32 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * ixgbe_set_hard_rate_select_speed - Set module link speed
* @hw: pointer to hardware structure
* @speed: link speed to set
*
- * We set the module speed differently for fixed fiber. For other
- * multi-speed devices we don't have an error value so here if we
- * detect an error we just log it and exit.
+ * Set module link speed via RS0/RS1 rate select pins.
*/
-static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
- u8 rs, eeprom_data;
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- /* one bit mask same as setting on */
- rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
break;
default:
DEBUGOUT("Invalid fixed module speed\n");
return;
}
- /* Set RS0 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- DEBUGOUT("Failed to read Rx Rate Select RS0\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- DEBUGOUT("Failed to write Rx Rate Select RS0\n");
- goto out;
- }
-
- /* Set RS1 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- DEBUGOUT("Failed to read Rx Rate Select RS1\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- DEBUGOUT("Failed to write Rx Rate Select RS1\n");
- goto out;
- }
-out:
- return;
-}
-
-/**
- * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
- *
- * Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
-{
- s32 status = IXGBE_SUCCESS;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- u32 speedcnt = 0;
- u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- u32 i = 0;
- bool autoneg, link_up = FALSE;
-
- DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
-
- /* Mask off requested but non-supported speeds */
- status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- speed &= link_speed;
-
- /*
- * Try each speed one by one, highest priority first. We do this in
- * software because 10gb fiber doesn't support speed autonegotiation.
- */
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- speedcnt++;
- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
- ixgbe_set_fiber_fixed_speed(hw,
- IXGBE_LINK_SPEED_10GB_FULL);
- } else {
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- }
-
- /* Allow module to change analog characteristics (1G->10G) */
- msec_delay(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg_wait_to_complete);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted. 82599 uses the same timing for 10g SFI.
- */
- for (i = 0; i < 5; i++) {
- /* Wait for the link partner to also set speed */
- msec_delay(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed,
- &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if (link_up)
- goto out;
- }
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- speedcnt++;
- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
- ixgbe_set_fiber_fixed_speed(hw,
- IXGBE_LINK_SPEED_1GB_FULL);
- } else {
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- }
-
- /* Allow module to change analog characteristics (10G->1G) */
- msec_delay(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg_wait_to_complete);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- ixgbe_flap_tx_laser(hw);
-
- /* Wait for the link partner to also set speed */
- msec_delay(100);
-
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if (link_up)
- goto out;
- }
-
- /*
- * We didn't get link. Configure back to the highest speed we tried,
- * (if there was more than one). We call ourselves back with just the
- * single highest speed that the user requested.
- */
- if (speedcnt > 1)
- status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed, autoneg_wait_to_complete);
-
-out:
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- return status;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
}
/**
@@ -998,14 +886,15 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
{
bool autoneg = FALSE;
s32 status = IXGBE_SUCCESS;
- u32 autoc, pma_pmd_1g, link_mode, start_autoc;
+ u32 pma_pmd_1g, link_mode;
+ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
+ u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
+ u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 orig_autoc = 0;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- bool got_lock = FALSE;
DEBUGFUNC("ixgbe_setup_mac_link_82599");
@@ -1023,12 +912,10 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
- autoc = hw->mac.orig_autoc;
+ orig_autoc = hw->mac.orig_autoc;
else
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ orig_autoc = autoc;
- orig_autoc = autoc;
- start_autoc = hw->mac.cached_autoc;
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -1061,39 +948,18 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
autoc &= ~IXGBE_AUTOC_LMS_MASK;
- if (autoneg)
+ if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
autoc |= IXGBE_AUTOC_LMS_1G_AN;
else
autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
}
}
- if (autoc != start_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is on,
- * likewise reset_pipeline requires us to hold this lock as
- * it also writes to AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status != IXGBE_SUCCESS) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
- got_lock = TRUE;
- }
-
+ if (autoc != current_autoc) {
/* Restart link */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- hw->mac.cached_autoc = autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock) {
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- got_lock = FALSE;
- }
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE);
+ if (status != IXGBE_SUCCESS)
+ goto out;
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -1161,7 +1027,8 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
- u32 ctrl, i, autoc2;
+ u32 ctrl = 0;
+ u32 i, autoc, autoc2;
u32 curr_lms;
bool link_up = FALSE;
@@ -1197,11 +1064,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset(hw);
/* remember AUTOC from before we reset */
- if (hw->mac.cached_autoc)
- curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
- else
- curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
- IXGBE_AUTOC_LMS_MASK;
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
mac_reset_top:
/*
@@ -1221,7 +1084,7 @@ mac_reset_top:
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
- /* Poll for reset bit to self-clear indicating reset is complete */
+ /* Poll for reset bit to self-clear meaning reset is complete */
for (i = 0; i < 10; i++) {
usec_delay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
@@ -1238,8 +1101,8 @@ mac_reset_top:
/*
* Double resets are required for recovery from certain error
- * conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete.
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
@@ -1251,7 +1114,7 @@ mac_reset_top:
* stored off yet. Otherwise restore the stored original
* values since the reset operation sets back to defaults.
*/
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
/* Enable link if disabled in NVM */
@@ -1262,7 +1125,7 @@ mac_reset_top:
}
if (hw->mac.orig_link_settings_stored == FALSE) {
- hw->mac.orig_autoc = hw->mac.cached_autoc;
+ hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = TRUE;
} else {
@@ -1273,36 +1136,18 @@ mac_reset_top:
* Likewise if we support WoL we don't want change the
* LMS state.
*/
- if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
hw->wol_enabled)
hw->mac.orig_autoc =
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
curr_lms;
- if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is
- * on, likewise reset_pipeline requires us to hold
- * this lock as it also writes to AUTOC.
- */
- bool got_lock = FALSE;
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status != IXGBE_SUCCESS) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto reset_hw_out;
- }
-
- got_lock = TRUE;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
- hw->mac.cached_autoc = hw->mac.orig_autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ FALSE);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1349,13 +1194,34 @@ reset_hw_out:
}
/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ return IXGBE_SUCCESS;
+ usec_delay(10);
+ }
+
+ return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
+ s32 err;
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ u32 fdircmd;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
@@ -1364,16 +1230,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* Before starting reinitialization process,
* FDIRCMD.CMD must be zero.
*/
- for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- IXGBE_FDIRCMD_CMD_MASK))
- break;
- usec_delay(10);
- }
- if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- DEBUGOUT("Flow Director previous command isn't complete, "
- "aborting table re-initialization.\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
@@ -1497,8 +1357,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
+ * @cloud_mode: TRUE - cloud mode, FALSE - other mode
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode)
{
DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
@@ -1518,6 +1380,10 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
(4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+ if (cloud_mode)
+ fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
+ IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+
/* write hashes and fdirctrl register, poll for completion */
ixgbe_fdir_enable_82599(hw, fdirctrl);
@@ -1546,14 +1412,14 @@ do { \
bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
* @stream: input bitstream to compute the hash on
*
* This function is almost identical to the function above but contains
- * several optomizations such as unwinding all of the loops, letting the
+ * several optimizations such as unwinding all of the loops, letting the
* compiler work out all of the conditional ifs since the keys are static
* defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
@@ -1582,7 +1448,7 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
+ * so we do not add the VLAN until after bit 0 was processed
*/
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
@@ -1620,22 +1486,32 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @input: unique input dword
* @common: compressed common input dword
* @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
{
- u64 fdirhashcmd;
- u32 fdircmd;
+ u64 fdirhashcmd;
+ u8 flow_type;
+ bool tunnel;
+ u32 fdircmd;
+ s32 err;
DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
/*
* Get the flow_type in order to program FDIRCMD properly
* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ * fifth is FDIRCMD.TUNNEL_FILTER
*/
- switch (input.formatted.flow_type) {
+ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+ flow_type = input.formatted.flow_type &
+ (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+ switch (flow_type) {
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
@@ -1651,8 +1527,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ if (tunnel)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
/*
* The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
@@ -1662,6 +1540,12 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
return IXGBE_SUCCESS;
@@ -1674,14 +1558,14 @@ do { \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
* @atr_input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
- * This function serves two main purposes. First it applys the input_mask
+ * This function serves two main purposes. First it applies the input_mask
* to the atr_input resulting in a cleaned up atr_input data stream.
* Secondly it computes the hash and stores it in the bkt_hash field at
* the end of the input byte stream. This way it will be available for
@@ -1693,34 +1577,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 bucket_hash = 0;
+ u32 hi_dword = 0;
+ u32 i = 0;
/* Apply masks to input data */
- input->dword_stream[0] &= input_mask->dword_stream[0];
- input->dword_stream[1] &= input_mask->dword_stream[1];
- input->dword_stream[2] &= input_mask->dword_stream[2];
- input->dword_stream[3] &= input_mask->dword_stream[3];
- input->dword_stream[4] &= input_mask->dword_stream[4];
- input->dword_stream[5] &= input_mask->dword_stream[5];
- input->dword_stream[6] &= input_mask->dword_stream[6];
- input->dword_stream[7] &= input_mask->dword_stream[7];
- input->dword_stream[8] &= input_mask->dword_stream[8];
- input->dword_stream[9] &= input_mask->dword_stream[9];
- input->dword_stream[10] &= input_mask->dword_stream[10];
+ for (i = 0; i < 14; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
/* generate common hash dword */
- hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
- input->dword_stream[2] ^
- input->dword_stream[3] ^
- input->dword_stream[4] ^
- input->dword_stream[5] ^
- input->dword_stream[6] ^
- input->dword_stream[7] ^
- input->dword_stream[8] ^
- input->dword_stream[9] ^
- input->dword_stream[10]);
+ for (i = 1; i <= 13; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = IXGBE_NTOHL(hi_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1734,26 +1604,13 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
+ * so we do not add the VLAN until after bit 0 was processed
*/
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* Process remaining 30 bit of the key */
- IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+ for (i = 1; i <= 15; i++)
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
/*
* Limit hash to 13 bits since max bucket count is 8K.
@@ -1763,7 +1620,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
}
/**
- * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
* @input_mask: mask to be bit swapped
*
* The source and destination port masks for flow director are bit swapped
@@ -1800,12 +1657,12 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask)
+ union ixgbe_atr_input *input_mask, bool cloud_mode)
{
/* mask IPv6 since it is currently not supported */
u32 fdirm = IXGBE_FDIRM_DIPv6;
u32 fdirtcpm;
-
+ u32 fdirip6m;
DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
/*
@@ -1878,59 +1735,147 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return IXGBE_ERR_CONFIG;
}
- /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+ if (cloud_mode) {
+ fdirm |= IXGBE_FDIRM_L3P;
+ fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+ fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+
+ switch (input_mask->formatted.inner_mac[0] & 0xFF) {
+ case 0x00:
+ /* Mask inner MAC, fall through */
+ fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ case 0xFF:
+ break;
+ default:
+ DEBUGOUT(" Error on inner_mac byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
+ case 0x0:
+ /* Mask vxlan id */
+ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
+ break;
+ case 0x00FFFFFF:
+ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+ break;
+ case 0xFFFFFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on TNI/VNI byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.tunnel_type & 0xFFFF) {
+ case 0x0:
+ /* Mask turnnel type, fall through */
+ fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ case 0xFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on tunnel type byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
- /* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+ /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
+ * FDIRDIP4M in cloud mode to allow L3/L3 packets to
+ * tunnel.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+ }
- /* write both the same so that UDP and TCP use the same mask */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
- /* store source and destination IP masks (big-enian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- ~input_mask->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- ~input_mask->formatted.dst_ip[0]);
+ if (!cloud_mode) {
+ /* store the TCP/UDP port masks, bit reversed from port
+ * layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* also use it for SCTP */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ break;
+ default:
+ break;
+ }
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_mask->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_mask->formatted.dst_ip[0]);
+ }
return IXGBE_SUCCESS;
}
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
- u16 soft_id, u8 queue)
+ u16 soft_id, u8 queue, bool cloud_mode)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ u32 addr_low, addr_high;
+ u32 cloud_type = 0;
+ s32 err;
DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
+ if (!cloud_mode) {
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
+ input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address
+ * (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
+ input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+ }
- /* currently IPv6 is not supported, must be programmed with 0 */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
- input->formatted.src_ip[0]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
- input->formatted.src_ip[1]);
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
- input->formatted.src_ip[2]);
-
- /* record the source address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
-
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
- /* record source and destination port (little-endian)*/
- fdirport = IXGBE_NTOHS(input->formatted.dst_port);
- fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= IXGBE_NTOHS(input->formatted.src_port);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
-
- /* record vlan (little-endian) and flex_bytes(big-endian) */
+ /* record VLAN (little-endian) and flex_bytes(big-endian) */
fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+ if (cloud_mode) {
+ if (input->formatted.tunnel_type != 0)
+ cloud_type = 0x80000000;
+
+ addr_low = ((u32)input->formatted.inner_mac[0] |
+ ((u32)input->formatted.inner_mac[1] << 8) |
+ ((u32)input->formatted.inner_mac[2] << 16) |
+ ((u32)input->formatted.inner_mac[3] << 24));
+ addr_high = ((u32)input->formatted.inner_mac[4] |
+ ((u32)input->formatted.inner_mac[5] << 8));
+ cloud_type |= addr_high;
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
+ }
+
/* configure FDIRHASH register */
fdirhash = input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
@@ -1947,11 +1892,18 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
if (queue == IXGBE_FDIR_DROP_QUEUE)
fdircmd |= IXGBE_FDIRCMD_DROP;
+ if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
return IXGBE_SUCCESS;
}
@@ -1961,9 +1913,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id)
{
u32 fdirhash;
- u32 fdircmd = 0;
- u32 retry_count;
- s32 err = IXGBE_SUCCESS;
+ u32 fdircmd;
+ s32 err;
/* configure FDIRHASH register */
fdirhash = input->formatted.bkt_hash;
@@ -1976,18 +1927,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
/* Query if filter is present */
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
- for (retry_count = 10; retry_count; retry_count--) {
- /* allow 10us for query to process */
- usec_delay(10);
- /* verify query completed successfully */
- fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
- if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
- break;
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
}
- if (!retry_count)
- err = IXGBE_ERR_FDIR_REINIT_FAILED;
-
/* if filter exists in hardware then remove it */
if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1996,7 +1941,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
}
- return err;
+ return IXGBE_SUCCESS;
}
/**
@@ -2013,7 +1958,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
union ixgbe_atr_input *input_mask,
- u16 soft_id, u8 queue)
+ u16 soft_id, u8 queue, bool cloud_mode)
{
s32 err = IXGBE_ERR_CONFIG;
@@ -2025,6 +1970,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
*/
switch (input->formatted.flow_type) {
case IXGBE_ATR_FLOW_TYPE_IPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
if (input->formatted.dst_port || input->formatted.src_port) {
DEBUGOUT(" Error on src/dst port\n");
@@ -2032,12 +1978,15 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
}
break;
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
if (input->formatted.dst_port || input->formatted.src_port) {
DEBUGOUT(" Error on src/dst port\n");
return IXGBE_ERR_CONFIG;
}
case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
IXGBE_ATR_L4TYPE_MASK;
break;
@@ -2047,7 +1996,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
}
/* program input mask into the HW */
- err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
+ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
if (err)
return err;
@@ -2056,7 +2005,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* program filters to filter memory */
return ixgbe_fdir_write_perfect_filter_82599(hw, input,
- soft_id, queue);
+ soft_id, queue, cloud_mode);
}
/**
@@ -2146,7 +2095,7 @@ out:
**/
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ s32 status;
DEBUGFUNC("ixgbe_identify_phy_82599");
@@ -2155,7 +2104,7 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS) {
/* 82599 10GBASE-T requires an external PHY */
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
- goto out;
+ return status;
else
status = ixgbe_identify_module_generic(hw);
}
@@ -2163,14 +2112,13 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
/* Set PHY type none if no PHY detected */
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.type = ixgbe_phy_none;
- status = IXGBE_SUCCESS;
+ return IXGBE_SUCCESS;
}
/* Return error if SFP module has been detected but is not supported */
if (hw->phy.type == ixgbe_phy_sfp_unsupported)
- status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
-out:
return status;
}
@@ -2189,8 +2137,6 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u16 ext_ability = 0;
- u8 comp_codes_10g = 0;
- u8 comp_codes_1g = 0;
DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
@@ -2258,40 +2204,7 @@ sfp_check:
/* SFP check must be done last since DA modules are sometimes used to
* test KR mode - we need to id KR mode correctly before SFP module.
* Call identify_sfp because the pluggable module may have changed */
- hw->phy.ops.identify_sfp(hw);
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- goto out;
-
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
- break;
- case ixgbe_phy_sfp_ftl_active:
- case ixgbe_phy_sfp_active_unknown:
- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
- break;
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
- hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
- if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
- else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
- else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
- break;
- default:
- break;
- }
-
+ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
out:
return physical_layer;
}
@@ -2317,7 +2230,10 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
hw->mac.ops.disable_sec_rx_path(hw);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+ if (regval & IXGBE_RXCTRL_RXEN)
+ ixgbe_enable_rx(hw);
+ else
+ ixgbe_disable_rx(hw);
hw->mac.ops.enable_sec_rx_path(hw);
@@ -2325,7 +2241,7 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
}
/**
- * ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ * ixgbe_verify_fw_version_82599 - verify FW version for 82599
* @hw: pointer to hardware structure
*
* Verifies that installed the firmware version is 0.6 or higher
@@ -2419,7 +2335,7 @@ bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
(fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
goto out;
- /* get the lesm state word */
+ /* get the LESM state word */
status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
IXGBE_FW_LESM_STATE_1),
&fw_lesm_state);
@@ -2504,7 +2420,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
*
* Reset pipeline by asserting Restart_AN together with LMS change to ensure
- * full pipeline reset
+ * full pipeline reset. This function assumes the SW/FW lock is held.
**/
s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
@@ -2520,10 +2436,11 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
- autoc_reg = hw->mac.cached_autoc;
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
msec_delay(4);
@@ -2549,4 +2466,116 @@ reset_pipeline_out:
}
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+ DEBUGFUNC("ixgbe_read_i2c_byte_82599");
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ DEBUGFUNC("ixgbe_write_i2c_byte_82599");
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_82599.h b/sys/dev/ixgbe/ixgbe_82599.h
index a77d153..8c973ac 100644
--- a/sys/dev/ixgbe/ixgbe_82599.h
+++ b/sys/dev/ixgbe/ixgbe_82599.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,9 +41,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
@@ -61,4 +60,6 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
#endif /* _IXGBE_82599_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 925866b..76d7414 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -78,13 +78,23 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_82599EB:
status = ixgbe_init_ops_82599(hw);
break;
+ case ixgbe_mac_X540:
+ status = ixgbe_init_ops_X540(hw);
+ break;
+ case ixgbe_mac_X550:
+ status = ixgbe_init_ops_X550(hw);
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ status = ixgbe_init_ops_X550EM(hw);
+ break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
status = ixgbe_init_ops_vf(hw);
break;
- case ixgbe_mac_X540:
- status = ixgbe_init_ops_X540(hw);
- break;
default:
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
@@ -138,6 +148,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_BYPASS:
@@ -153,9 +164,35 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X540_vf;
break;
case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
break;
+ case IXGBE_DEV_ID_X550T:
+ hw->mac.type = ixgbe_mac_X550;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ hw->mac.type = ixgbe_mac_X550EM_x;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ hw->mac.type = ixgbe_mac_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_X550_VF:
+ case IXGBE_DEV_ID_X550_VF_HV:
+ hw->mac.type = ixgbe_mac_X550_vf;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_VF:
+ case IXGBE_DEV_ID_X550EM_X_VF_HV:
+ hw->mac.type = ixgbe_mac_X550EM_x_vf;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_VF:
+ case IXGBE_DEV_ID_X550EM_A_VF_HV:
+ hw->mac.type = ixgbe_mac_X550EM_a_vf;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
@@ -512,6 +549,20 @@ s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_internal_phy - Configure integrated PHY
+ * @hw: pointer to hardware structure
+ *
+ * Reconfigure the integrated PHY in order to enable talk to the external PHY.
+ * Returns success if not implemented, since nothing needs to be done in this
+ * case.
+ */
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw),
+ IXGBE_SUCCESS);
+}
+
+/**
* ixgbe_check_phy_link - Determine link and speed status
* @hw: pointer to hardware structure
*
@@ -541,6 +592,17 @@ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
+ * ixgbe_set_phy_power - Control the phy power state
+ * @hw: pointer to hardware structure
+ * @on: TRUE for on, FALSE for off
+ */
+s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_check_link - Get link and speed status
* @hw: pointer to hardware structure
*
@@ -609,6 +671,22 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
+ * ixgbe_setup_mac_link - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ *
+ * Configures link settings. Restarts the link.
+ * Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_get_link_capabilities - Returns link capabilities
* @hw: pointer to hardware structure
*
@@ -1002,6 +1080,18 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_set_fw_drv_ver - Try to send the driver version number FW
* @hw: pointer to hardware structure
* @maj: driver major number to be sent to firmware
@@ -1018,6 +1108,177 @@ s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+/**
+ * ixgbe_dmac_config - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enable dmac.
+ **/
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC and set high priority bit for
+ * FCOE TC. The dmac enable bit must be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_eee - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_ee flag.
+ * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ * are modified.
+ *
+ **/
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_source_address_pruning - Enable/Disable source address pruning
+ * @hw: pointer to hardware structure
+ * @enbale: enable or disable source address pruning
+ * @pool: Rx pool - Rx pool to toggle source address pruning
+ **/
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ if (hw->mac.ops.set_source_address_pruning)
+ hw->mac.ops.set_source_address_pruning(hw, enable, pool);
+}
+
+/**
+ * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ if (hw->mac.ops.set_ethertype_anti_spoofing)
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *phy_data)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 phy_data)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mdd - Disable malicious driver detection
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_disable_mdd(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
+}
+
+/**
+ * ixgbe_enable_mdd - Enable malicious driver detection
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_mdd(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
+}
+
+/**
+ * ixgbe_mdd_event - Handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: vf bitmap of malicious vfs
+ *
+ **/
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+ if (hw->mac.ops.mdd_event)
+ hw->mac.ops.mdd_event(hw, vf_bitmap);
+}
+
+/**
+ * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver
+ * detection event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ *
+ **/
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
+{
+ if (hw->mac.ops.restore_mdd_vf)
+ hw->mac.ops.restore_mdd_vf(hw, vf);
+}
+
+/**
+ * ixgbe_enter_lplu - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0).
+ **/
+s32 ixgbe_enter_lplu(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
/**
* ixgbe_read_analog_reg8 - Reads 8 bit analog register
@@ -1064,6 +1325,7 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
* ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: I2C bus address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
@@ -1076,9 +1338,25 @@ s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
}
/**
+ * ixgbe_read_i2c_combined - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined, (hw, addr,
+ reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_write_i2c_byte - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: I2C bus address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface
@@ -1092,6 +1370,21 @@ s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
}
/**
+ * ixgbe_write_i2c_combined - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined, (hw, addr,
+ reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1179,7 +1472,7 @@ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
{
return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
(hw, mask), IXGBE_NOT_IMPLEMENTED);
@@ -1193,9 +1486,34 @@ s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through SW_FW_SYNC register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
{
if (hw->mac.ops.release_swfw_sync)
hw->mac.ops.release_swfw_sync(hw, mask);
}
+
+void ixgbe_disable_rx(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_rx)
+ hw->mac.ops.disable_rx(hw);
+}
+
+void ixgbe_enable_rx(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_rx)
+ hw->mac.ops.enable_rx(hw);
+}
+
+/**
+ * ixgbe_set_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the rate select.
+ */
+void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
+{
+ if (hw->mac.ops.set_rate_select_speed)
+ hw->mac.ops.set_rate_select_speed(hw, speed);
+}
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 91023ae..650ae67 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,8 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
@@ -69,17 +71,21 @@ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 phy_data);
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw);
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@@ -123,6 +129,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
u8 ver);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@@ -139,16 +146,17 @@ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode);
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask);
+ union ixgbe_atr_input *input_mask, bool cloud_mode);
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
- u16 soft_id, u8 queue);
+ u16 soft_id, u8 queue, bool cloud_mode);
s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
@@ -156,7 +164,8 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
union ixgbe_atr_input *mask,
u16 soft_id,
- u8 queue);
+ u8 queue,
+ bool cloud_mode);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
@@ -164,16 +173,38 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
+s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
+s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee);
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+ unsigned int vf);
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable,
+ int vf);
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *phy_data);
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 phy_data);
+void ixgbe_disable_mdd(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd(struct ixgbe_hw *hw);
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
+s32 ixgbe_enter_lplu(struct ixgbe_hw *hw);
+void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
+void ixgbe_disable_rx(struct ixgbe_hw *hw);
+void ixgbe_enable_rx(struct ixgbe_hw *hw);
#endif /* _IXGBE_API_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 1734345..57fe1b5 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -75,62 +75,67 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_ops_generic");
/* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
if (eec & IXGBE_EEC_PRES) {
- eeprom->ops.read = &ixgbe_read_eerd_generic;
- eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+ eeprom->ops.read = ixgbe_read_eerd_generic;
+ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
} else {
- eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
eeprom->ops.read_buffer =
- &ixgbe_read_eeprom_buffer_bit_bang_generic;
+ ixgbe_read_eeprom_buffer_bit_bang_generic;
}
- eeprom->ops.write = &ixgbe_write_eeprom_generic;
- eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
+ eeprom->ops.write = ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
eeprom->ops.validate_checksum =
- &ixgbe_validate_eeprom_checksum_generic;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
+ ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
/* MAC */
- mac->ops.init_hw = &ixgbe_init_hw_generic;
+ mac->ops.init_hw = ixgbe_init_hw_generic;
mac->ops.reset_hw = NULL;
- mac->ops.start_hw = &ixgbe_start_hw_generic;
- mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+ mac->ops.start_hw = ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
mac->ops.get_media_type = NULL;
mac->ops.get_supported_physical_layer = NULL;
- mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
- mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
- mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
- mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
- mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
- mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
- mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
+ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
+ mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
+ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
+ mac->ops.prot_autoc_read = prot_autoc_read_generic;
+ mac->ops.prot_autoc_write = prot_autoc_write_generic;
/* LEDs */
- mac->ops.led_on = &ixgbe_led_on_generic;
- mac->ops.led_off = &ixgbe_led_off_generic;
- mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
- mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+ mac->ops.led_on = ixgbe_led_on_generic;
+ mac->ops.led_off = ixgbe_led_off_generic;
+ mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
/* RAR, Multicast, VLAN */
- mac->ops.set_rar = &ixgbe_set_rar_generic;
- mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+ mac->ops.set_rar = ixgbe_set_rar_generic;
+ mac->ops.clear_rar = ixgbe_clear_rar_generic;
mac->ops.insert_mac_addr = NULL;
mac->ops.set_vmdq = NULL;
mac->ops.clear_vmdq = NULL;
- mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
- mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
- mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
- mac->ops.enable_mc = &ixgbe_enable_mc_generic;
- mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+ mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = ixgbe_disable_mc_generic;
mac->ops.clear_vfta = NULL;
mac->ops.set_vfta = NULL;
mac->ops.set_vlvf = NULL;
mac->ops.init_uta_tables = NULL;
+ mac->ops.enable_rx = ixgbe_enable_rx_generic;
+ mac->ops.disable_rx = ixgbe_disable_rx_generic;
/* Flow Control */
- mac->ops.fc_enable = &ixgbe_fc_enable_generic;
+ mac->ops.fc_enable = ixgbe_fc_enable_generic;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
/* Link */
mac->ops.get_link_capabilities = NULL;
@@ -162,6 +167,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
/* if link is down, assume supported */
@@ -179,7 +185,9 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X540_BYPASS:
+ case IXGBE_DEV_ID_X550T:
supported = TRUE;
break;
default:
@@ -196,24 +204,21 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_setup_fc_generic - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
- bool got_lock = FALSE;
+ bool locked = FALSE;
- DEBUGFUNC("ixgbe_setup_fc");
+ DEBUGFUNC("ixgbe_setup_fc_generic");
- /*
- * Validate the requested mode. Strict IEEE mode does not allow
- * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
- */
+ /* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
"ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
@@ -234,11 +239,18 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
+ case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ /* only backplane uses autoc so fall though */
case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
- case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
break;
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
@@ -310,7 +322,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
break;
}
- if (hw->mac.type != ixgbe_mac_X540) {
+ if (hw->mac.type < ixgbe_mac_X540) {
/*
* Enable auto-negotiation between the MAC & PHY;
* the MAC will advertise clause 37 flow control.
@@ -333,35 +345,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
reg_bp |= IXGBE_AUTOC_AN_RESTART;
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on, likewise reset_pipeline requries the lock as
- * it also writes AUTOC.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val != IXGBE_SUCCESS) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
- got_lock = TRUE;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw))) {
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
}
- DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
out:
return ret_val;
}
@@ -575,7 +568,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
@@ -793,7 +786,7 @@ s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
return ret_val;
} else {
if (eeprom_buf_size > (u32)(pba->word[1] +
- pba->pba_block[0])) {
+ pba_block_size)) {
memcpy(pba->pba_block,
&eeprom_buf[pba->word[1]],
pba_block_size * sizeof(u16));
@@ -965,7 +958,8 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
{
struct ixgbe_mac_info *mac = &hw->mac;
- hw->bus.type = ixgbe_bus_type_pci_express;
+ if (hw->bus.type == ixgbe_bus_type_unknown)
+ hw->bus.type = ixgbe_bus_type_pci_express;
switch (link_status & IXGBE_PCI_LINK_WIDTH) {
case IXGBE_PCI_LINK_WIDTH_1:
@@ -1071,7 +1065,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
hw->adapter_stopped = TRUE;
/* Disable the receive unit */
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
+ ixgbe_disable_rx(hw);
/* Clear interrupt mask to stop interrupts from being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2107,8 +2101,10 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
/**
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
**/
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -2121,33 +2117,44 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
+ if (hw->eeprom.ops.read(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
checksum += word;
}
/* Include all data from pointers except for the fw pointer */
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
- hw->eeprom.ops.read(hw, i, &pointer);
+ if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* If the pointer seems invalid */
+ if (pointer == 0xFFFF || pointer == 0)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
- /* Make sure the pointer seems valid */
- if (pointer != 0xFFFF && pointer != 0) {
- hw->eeprom.ops.read(hw, pointer, &length);
+ if (length == 0xFFFF || length == 0)
+ continue;
- if (length != 0xFFFF && length != 0) {
- for (j = pointer+1; j <= pointer+length; j++) {
- hw->eeprom.ops.read(hw, j, &word);
- checksum += word;
- }
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
}
+ checksum += word;
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return checksum;
+ return (s32)checksum;
}
/**
@@ -2167,32 +2174,38 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
+ /* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
- if (status == IXGBE_SUCCESS) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
- hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ checksum = (u16)(status & 0xffff);
- /*
- * Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
-
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
- } else {
+ status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (status) {
DEBUGOUT("EEPROM read failed\n");
+ return status;
}
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
return status;
}
@@ -2207,21 +2220,24 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
+ /* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status == IXGBE_SUCCESS) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
- status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
- } else {
+ if (status) {
DEBUGOUT("EEPROM read failed\n");
+ return status;
}
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
return status;
}
@@ -2793,10 +2809,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
/*
* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
- * to the maximum FCRTH value. This allows the Tx
- * switch to function even under heavy Rx workloads.
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
*/
- fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
@@ -2888,8 +2905,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
(!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "Auto-Negotiation did not complete or timed out");
+ DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
goto out;
}
@@ -2924,16 +2940,14 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "Auto-Negotiation did not complete");
+ DEBUGOUT("Auto-Negotiation did not complete\n");
goto out;
}
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
- ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "Link partner is not AN enabled");
+ DEBUGOUT("Link partner is not AN enabled\n");
goto out;
}
}
@@ -3012,6 +3026,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -3101,6 +3116,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u32 i, poll;
+ u16 value;
DEBUGFUNC("ixgbe_disable_pcie_master");
@@ -3108,7 +3124,8 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Exit if master requests are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ IXGBE_REMOVED(hw->hw_addr))
goto out;
/* Poll for master request bit to clear */
@@ -3136,8 +3153,10 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
poll = ixgbe_pcie_timeout_poll(hw);
for (i = 0; i < poll; i++) {
usec_delay(100);
- if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
- IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -3157,7 +3176,7 @@ out:
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -3204,7 +3223,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr;
u32 swmask = mask;
@@ -3258,6 +3277,37 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
}
/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ *
+ * The default case requires no protection so just to the register read.
+ */
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = FALSE;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ *
+ * The default case requires no protection so just to the register write.
+ */
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ UNREFERENCED_1PARAMETER(locked);
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_enable_sec_rx_path_generic - Enables the receive data path
* @hw: pointer to hardware structure
*
@@ -3288,7 +3338,10 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
DEBUGFUNC("ixgbe_enable_rx_dma_generic");
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+ if (regval & IXGBE_RXCTRL_RXEN)
+ ixgbe_enable_rx(hw);
+ else
+ ixgbe_disable_rx(hw);
return IXGBE_SUCCESS;
}
@@ -3302,9 +3355,10 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
bool link_up = 0;
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = IXGBE_SUCCESS;
+ bool locked = FALSE;
DEBUGFUNC("ixgbe_blink_led_start_generic");
@@ -3315,29 +3369,18 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
if (!link_up) {
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- bool got_lock = FALSE;
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val != IXGBE_SUCCESS) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
- got_lock = TRUE;
- }
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
- IXGBE_WRITE_FLUSH(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ IXGBE_WRITE_FLUSH(hw);
msec_delay(10);
}
@@ -3357,36 +3400,23 @@ out:
**/
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = IXGBE_SUCCESS;
- bool got_lock = FALSE;
+ bool locked = FALSE;
DEBUGFUNC("ixgbe_blink_led_stop_generic");
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val != IXGBE_SUCCESS) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
- got_lock = TRUE;
- }
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
@@ -3541,6 +3571,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -3550,6 +3583,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ msix_count = 0;
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
@@ -3653,6 +3688,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto done;
+
if (!mpsar_lo && !mpsar_hi)
goto done;
@@ -4048,17 +4086,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*link_up = FALSE;
}
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_100_82599)
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- else
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
return IXGBE_SUCCESS;
}
@@ -4216,7 +4264,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for VLAN anti-spoofing
- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
*
**/
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
@@ -4310,41 +4358,51 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
*
* Communicates with the manageability block. On success return IXGBE_SUCCESS
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length)
+ u32 length, u32 timeout, bool return_data)
{
- u32 hicr, i, bi;
+ u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u8 buf_len, dword_len;
-
- s32 ret_val = IXGBE_SUCCESS;
+ u16 buf_len;
+ u16 dword_len;
DEBUGFUNC("ixgbe_host_interface_command");
- if (length == 0 || length & 0x3 ||
- length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
- DEBUGOUT("Buffer length failure.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
+ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Set bit 9 of FWSTS clearing FW reset indication */
+ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+ IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if ((hicr & IXGBE_HICR_EN) == 0) {
DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ DEBUGOUT("Buffer length failure, not aligned to dword");
+ return IXGBE_ERR_INVALID_ARGUMENT;
}
- /* Calculate length in DWORDs */
dword_len = length >> 2;
- /*
- * The device driver writes the relevant command block
+ /* The device driver writes the relevant command block
* into the ram area.
*/
for (i = 0; i < dword_len; i++)
@@ -4354,21 +4412,24 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
- for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_C))
break;
msec_delay(1);
}
- /* Check command successful completion. */
- if (i == IXGBE_HI_COMMAND_TIMEOUT ||
- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
- DEBUGOUT("Command has failed with no status valid.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
+ /* Check command completion */
+ if ((timeout != 0 && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "Command has failed with no status valid.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ if (!return_data)
+ return 0;
+
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@@ -4381,25 +4442,23 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* If there is any thing in data position pull it in */
buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
if (buf_len == 0)
- goto out;
+ return 0;
- if (length < (buf_len + hdr_size)) {
+ if (length < buf_len + hdr_size) {
DEBUGOUT("Buffer not large enough for reply message.\n");
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
- goto out;
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (bi is where we left off)*/
+ /* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
IXGBE_LE32_TO_CPUS(&buffer[bi]);
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -4446,7 +4505,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
- sizeof(fw_cmd));
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ TRUE);
if (ret_val != IXGBE_SUCCESS)
continue;
@@ -4533,7 +4594,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
**/
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
{
- u32 gcr_ext, hlreg0;
+ u32 gcr_ext, hlreg0, i, poll;
+ u16 value;
/*
* If double reset is not requested then all transactions should
@@ -4550,6 +4612,25 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+ /* Wait for a last completion before clearing buffers */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(3);
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usec_delay(100);
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+out:
/* initiate cleaning flow for buffers in the PCIe transaction layer */
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
@@ -4582,3 +4663,316 @@ void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
return;
}
+
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = TRUE;
+ } else {
+ hw->mac.set_lben = FALSE;
+ }
+ }
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+}
+
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.set_lben) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = FALSE;
+ }
+ }
+}
+
+/**
+ * ixgbe_mng_present - returns TRUE when management capability is present
+ * @hw: pointer to hardware structure
+ */
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return FALSE;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm &= IXGBE_FWSM_MODE_MASK;
+ return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
+
+/**
+ * ixgbe_mng_enabled - Is the manageability engine enabled?
+ * @hw: pointer to hardware structure
+ *
+ * Returns TRUE if the manageability engine is enabled.
+ **/
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+ u32 fwsm, manc, factps;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+ return FALSE;
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+ return FALSE;
+
+ if (hw->mac.type <= ixgbe_mac_X540) {
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (factps & IXGBE_FACTPS_MNGCG)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Set the link speed in the MAC and/or PHY register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 status = IXGBE_SUCCESS;
+ u32 speedcnt = 0;
+ u32 i = 0;
+ bool autoneg, link_up = FALSE;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ speed &= link_speed;
+
+ /* Try each speed one by one, highest priority first. We do this in
+ * software because 10Gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ ixgbe_set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_10GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, FALSE);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ ixgbe_set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_soft_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the soft rate select.
+ */
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index 7d1ee82..94c7f97 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,6 +41,7 @@
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
+#define IXGBE_REMOVED(a) (0)
#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
struct ixgbe_pba {
u16 word[2];
@@ -89,7 +90,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -113,12 +114,16 @@ s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
@@ -155,10 +160,20 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length);
+ u32 length, u32 timeout, bool return_data);
+
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
+
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
#endif /* IXGBE_COMMON */
diff --git a/sys/dev/ixgbe/ixgbe_dcb.c b/sys/dev/ixgbe/ixgbe_dcb.c
index 149aad1..6f848e7 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.c
+++ b/sys/dev/ixgbe/ixgbe_dcb.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -394,6 +394,9 @@ s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
break;
@@ -422,6 +425,9 @@ s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
break;
@@ -461,6 +467,9 @@ s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
tsa, map);
@@ -500,6 +509,9 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
bwgid, tsa);
@@ -541,6 +553,9 @@ s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
bwgid, tsa,
@@ -576,6 +591,9 @@ s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
break;
@@ -602,6 +620,9 @@ s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
break;
@@ -647,6 +668,9 @@ s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ixgbe_dcb_config_82599(hw, dcb_config);
ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
@@ -679,6 +703,9 @@ s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
break;
@@ -702,6 +729,9 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);
diff --git a/sys/dev/ixgbe/ixgbe_dcb.h b/sys/dev/ixgbe/ixgbe_dcb.h
index 05e548e..878bbf8 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.h
+++ b/sys/dev/ixgbe/ixgbe_dcb.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -35,7 +35,6 @@
#ifndef _IXGBE_DCB_H_
#define _IXGBE_DCB_H_
-
#include "ixgbe_type.h"
/* DCB defines */
diff --git a/sys/dev/ixgbe/ixgbe_dcb_82598.c b/sys/dev/ixgbe/ixgbe_dcb_82598.c
index aee808f..a5a090d 100644
--- a/sys/dev/ixgbe/ixgbe_dcb_82598.c
+++ b/sys/dev/ixgbe/ixgbe_dcb_82598.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -347,6 +347,8 @@ s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *tsa)
{
+ UNREFERENCED_1PARAMETER(link_speed);
+
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
tsa);
diff --git a/sys/dev/ixgbe/ixgbe_dcb_82598.h b/sys/dev/ixgbe/ixgbe_dcb_82598.h
index 9b634a3..47f19f6 100644
--- a/sys/dev/ixgbe/ixgbe_dcb_82598.h
+++ b/sys/dev/ixgbe/ixgbe_dcb_82598.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixgbe/ixgbe_dcb_82599.c b/sys/dev/ixgbe/ixgbe_dcb_82599.c
index 085ca0c..0232d3c 100644
--- a/sys/dev/ixgbe/ixgbe_dcb_82599.c
+++ b/sys/dev/ixgbe/ixgbe_dcb_82599.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -299,7 +299,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
*/
reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
- if (hw->mac.type == ixgbe_mac_X540)
+ if (hw->mac.type >= ixgbe_mac_X540)
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
if (pfc_en)
@@ -329,7 +329,14 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
- reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
@@ -573,6 +580,7 @@ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
+ UNREFERENCED_1PARAMETER(link_speed);
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
map);
diff --git a/sys/dev/ixgbe/ixgbe_dcb_82599.h b/sys/dev/ixgbe/ixgbe_dcb_82599.h
index 91428c0..7702dc9 100644
--- a/sys/dev/ixgbe/ixgbe_dcb_82599.h
+++ b/sys/dev/ixgbe/ixgbe_dcb_82599.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c
index f9c1efa..067bba0 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.c
+++ b/sys/dev/ixgbe/ixgbe_mbx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2012, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -77,10 +77,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
DEBUGFUNC("ixgbe_write_mbx");
- if (size > mbx->size)
+ if (size > mbx->size) {
ret_val = IXGBE_ERR_MBX;
-
- else if (mbx->ops.write)
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %d", size);
+ } else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
@@ -170,6 +171,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
usec_delay(mbx->usec_delay);
}
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox message timedout", mbx_id);
+
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
@@ -198,6 +203,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
usec_delay(mbx->usec_delay);
}
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox ack timedout", mbx_id);
+
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
@@ -596,6 +605,9 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
@@ -633,6 +645,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = IXGBE_SUCCESS;
+ else
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to obtain mailbox lock for VF%d", vf_number);
+
return ret_val;
}
@@ -727,6 +743,9 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h
index adcba9e..2cffb8a 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.h
+++ b/sys/dev/ixgbe/ixgbe_mbx.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index cb237fd..5ac719c 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -46,11 +46,193 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
-static bool ixgbe_get_i2c_data(u32 *i2cctl);
+static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
/**
+ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ * @hw: pointer to the hardware structure
+ * @byte: byte to send
+ *
+ * Returns an error code on error.
+ */
+static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ * @hw: pointer to the hardware structure
+ * @byte: pointer to a u8 to receive the byte
+ *
+ * Returns an error code on error.
+ */
+static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_in_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ /* ACK */
+ return ixgbe_clock_out_i2c_bit(hw, FALSE);
+}
+
+/**
+ * ixgbe_ones_comp_byte_add - Perform one's complement addition
+ * @add1 - addend 1
+ * @add2 - addend 2
+ *
+ * Returns one's complement 8-bit sum.
+ */
+static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+ u16 sum = add1 + add2;
+
+ sum = (sum & 0xFF) + (sum >> 8);
+ return sum & 0xFF;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int max_retry = 10;
+ int retry = 0;
+ u8 csum_byte;
+ u8 high_bits;
+ u8 low_bits;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ~csum;
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ /* Re-start condition */
+ ixgbe_i2c_start(hw);
+ /* Device Address and read indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+ goto fail;
+ /* Get upper bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+ goto fail;
+ /* Get low bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+ goto fail;
+ /* Get csum */
+ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+ goto fail;
+ /* NACK */
+ if (ixgbe_clock_out_i2c_bit(hw, FALSE))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ *val = (high_bits << 8) | low_bits;
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read combined error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ int max_retry = 1;
+ int retry = 0;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+ csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+ csum = ~csum;
+ do {
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write data 15:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+ goto fail;
+ /* Write data 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write combined error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
* @hw: pointer to the hardware structure
*
@@ -63,25 +245,27 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_phy_ops_generic");
/* PHY */
- phy->ops.identify = &ixgbe_identify_phy_generic;
- phy->ops.reset = &ixgbe_reset_phy_generic;
- phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
- phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
- phy->ops.read_reg_mdi = &ixgbe_read_phy_reg_mdi;
- phy->ops.write_reg_mdi = &ixgbe_write_phy_reg_mdi;
- phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
- phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+ phy->ops.identify = ixgbe_identify_phy_generic;
+ phy->ops.reset = ixgbe_reset_phy_generic;
+ phy->ops.read_reg = ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = ixgbe_write_phy_reg_generic;
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi;
+ phy->ops.setup_link = ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic;
phy->ops.check_link = NULL;
phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
- phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
- phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
- phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic;
- phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
- phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
- phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
- phy->ops.identify_sfp = &ixgbe_identify_module_generic;
+ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic;
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic;
+ phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear;
+ phy->ops.identify_sfp = ixgbe_identify_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
- phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
+ phy->ops.read_i2c_combined = ixgbe_read_i2c_combined_generic;
+ phy->ops.write_i2c_combined = ixgbe_write_i2c_combined_generic;
+ phy->ops.check_overtemp = ixgbe_tn_check_overtemp;
return IXGBE_SUCCESS;
}
@@ -99,6 +283,13 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_identify_phy_generic");
+ if (!hw->phy.phy_semaphore_mask) {
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ }
+
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
if (ixgbe_validate_phy_addr(hw, phy_addr)) {
@@ -126,11 +317,13 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
break;
}
}
- /* clear value if nothing found */
+
+ /* Certain media types do not have a phy so an address will not
+ * be found and the code will take this path. Caller has to
+ * decide if it is an error or not.
+ */
if (status != IXGBE_SUCCESS) {
hw->phy.addr = 0;
- ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
- "Could not identify valid PHY address");
}
} else {
status = IXGBE_SUCCESS;
@@ -140,6 +333,35 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return faluse since the link can not be blocked
+ * via this method.
+ **/
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ DEBUGFUNC("ixgbe_check_reset_blocked");
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return FALSE;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+ "MNG_VETO bit detected.\n");
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/**
* ixgbe_validate_phy_addr - Determines phy address is valid
* @hw: pointer to hardware structure
*
@@ -204,6 +426,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case X550_PHY_ID:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -213,6 +436,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case ATH_PHY_ID:
phy_type = ixgbe_phy_nl;
break;
+ case X557_PHY_ID:
+ phy_type = ixgbe_phy_x550em_ext_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -245,6 +471,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
(IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
goto out;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -368,15 +598,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
s32 status;
- u16 gssr;
+ u32 gssr = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_read_phy_reg_generic");
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
-
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
phy_data);
@@ -474,15 +699,10 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
s32 status;
- u16 gssr;
+ u32 gssr = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_write_phy_reg_generic");
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
-
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
phy_data);
@@ -495,16 +715,14 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
}
/**
- * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * ixgbe_setup_phy_link_generic - Set and restart auto-neg
* @hw: pointer to hardware structure
*
- * Restart autonegotiation and PHY and waits for completion.
+ * Restart auto-negotiation and PHY and waits for completion.
**/
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
- u32 time_out;
- u32 max_time_out = 10;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = FALSE;
ixgbe_link_speed speed;
@@ -528,6 +746,44 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_5GB_FULL)
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL)
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+ }
+
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
/* Set or unset auto-negotiation 1G advertisement */
hw->phy.ops.read_reg(hw,
@@ -561,7 +817,11 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
- /* Restart PHY autonegotiation and wait for completion */
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
+ /* Restart PHY auto-negotiation. */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
@@ -570,25 +830,6 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
- /* Wait for autonegotiation to finish */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- usec_delay(10);
- /* Restart PHY autonegotiation and wait for completion */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
- break;
- }
-
- if (time_out == max_time_out) {
- status = IXGBE_ERR_LINK_SETUP;
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "PHY autonegotiation time out");
- }
-
return status;
}
@@ -614,6 +855,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
@@ -632,13 +879,14 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*
- * Determines the link capabilities by reading the AUTOC register.
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
**/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
- s32 status = IXGBE_ERR_LINK_SETUP;
+ s32 status;
u16 speed_ability;
DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
@@ -659,6 +907,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_100_FULL;
}
+ /* Internal PHY does not support 100 Mbps */
+ if (hw->mac.type == ixgbe_mac_X550EM_x)
+ *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ *speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ *speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ }
+
return status;
}
@@ -712,16 +969,14 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/**
- * ixgbe_setup_phy_link_tnx - Set and restart autoneg
+ * ixgbe_setup_phy_link_tnx - Set and restart auto-neg
* @hw: pointer to hardware structure
*
- * Restart autonegotiation and PHY and waits for completion.
+ * Restart auto-negotiation and PHY and waits for completion.
**/
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
- u32 time_out;
- u32 max_time_out = 10;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = FALSE;
ixgbe_link_speed speed;
@@ -775,7 +1030,11 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
autoneg_reg);
}
- /* Restart PHY autonegotiation and wait for completion */
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
+ /* Restart PHY auto-negotiation. */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
@@ -784,24 +1043,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
- /* Wait for autonegotiation to finish */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- usec_delay(10);
- /* Restart PHY autonegotiation and wait for completion */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
- break;
- }
-
- if (time_out == max_time_out) {
- status = IXGBE_ERR_LINK_SETUP;
- DEBUGOUT("ixgbe_setup_phy_link_tnx: time out");
- }
-
return status;
}
@@ -813,7 +1054,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version)
{
- s32 status = IXGBE_SUCCESS;
+ s32 status;
DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
@@ -832,7 +1073,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version)
{
- s32 status = IXGBE_SUCCESS;
+ s32 status;
DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
@@ -858,6 +1099,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_reset_phy_nl");
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
@@ -972,6 +1217,9 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
status = ixgbe_identify_sfp_module_generic(hw);
break;
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
default:
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1009,6 +1257,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
+ /* LAN ID is needed for I2C access */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1016,9 +1267,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS)
goto err_read_i2c_eeprom;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1068,7 +1316,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type = ixgbe_sfp_type_lr;
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ } else {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1261,6 +1509,266 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+/**
+ * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current SFP.
+ */
+s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic");
+
+ hw->phy.ops.identify_sfp(hw);
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return physical_layer;
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ break;
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = FALSE;
+
+ DEBUGFUNC("ixgbe_identify_qsfp_module_generic");
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = TRUE;
+
+ if (!active_cable) {
+ /* check for active DA cables that pre-date
+ * SFF-8436 v3.6 */
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = TRUE;
+ }
+
+ if (active_cable) {
+ hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = TRUE;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = FALSE;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = TRUE;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = ixgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+ ixgbe_get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+ status = IXGBE_SUCCESS;
+ } else {
+ if (hw->allow_unsupported_sfp == TRUE) {
+ EWARN(hw, "WARNING: Intel (R) Network "
+ "Connections are quality tested "
+ "using Intel (R) Ethernet Optics."
+ " Using untested modules is not "
+ "supported and may cause unstable"
+ " operation or damage to the "
+ "module or the adapter. Intel "
+ "Corporation is not responsible "
+ "for any harm caused by using "
+ "untested modules.\n", status);
+ status = IXGBE_SUCCESS;
+ } else {
+ DEBUGOUT("QSFP module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
/**
@@ -1409,6 +1917,21 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
+ * ixgbe_is_sfp_probe - Returns TRUE if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+ if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+ offset == IXGBE_SFF_IDENTIFIER &&
+ hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return TRUE;
+ return FALSE;
+}
+
+/**
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
@@ -1420,26 +1943,21 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
- s32 status = IXGBE_SUCCESS;
+ s32 status;
u32 max_retry = 10;
u32 retry = 0;
- u16 swfw_mask = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = 1;
*data = 0;
DEBUGFUNC("ixgbe_read_i2c_byte_generic");
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
+ if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+ max_retry = IXGBE_SFP_DETECT_RETRIES;
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != IXGBE_SUCCESS) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto read_byte_out;
- }
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
@@ -1480,7 +1998,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- break;
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return IXGBE_SUCCESS;
fail:
ixgbe_i2c_bus_clear(hw);
@@ -1494,9 +2013,6 @@ fail:
} while (retry < max_retry);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
-read_byte_out:
return status;
}
@@ -1515,15 +2031,10 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status = IXGBE_SUCCESS;
u32 max_retry = 1;
u32 retry = 0;
- u16 swfw_mask = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_write_i2c_byte_generic");
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- swfw_mask = IXGBE_GSSR_PHY1_SM;
- else
- swfw_mask = IXGBE_GSSR_PHY0_SM;
-
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto write_byte_out;
@@ -1557,7 +2068,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- break;
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return IXGBE_SUCCESS;
fail:
ixgbe_i2c_bus_clear(hw);
@@ -1579,13 +2091,16 @@ write_byte_out:
* @hw: pointer to hardware structure
*
* Sets I2C start condition (High -> Low on SDA while SCL is High)
+ * Set bit-bang mode on X550 hardware.
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
DEBUGFUNC("ixgbe_i2c_start");
+ i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
+
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1610,10 +2125,15 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ * Disables bit-bang mode and negates data output enable on X550
+ * hardware.
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+ u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_i2c_stop");
@@ -1628,6 +2148,13 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
/* bus free time between stop and start (4.7us)*/
usec_delay(IXGBE_I2C_T_BUF);
+
+ if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+ i2cctl &= ~bb_en_bit;
+ i2cctl |= data_oe_bit | clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
}
/**
@@ -1644,6 +2171,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
DEBUGFUNC("ixgbe_clock_in_i2c_byte");
+ *data = 0;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
*data |= bit << i;
@@ -1664,7 +2192,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
s32 status = IXGBE_SUCCESS;
s32 i;
u32 i2cctl;
- bool bit = 0;
+ bool bit;
DEBUGFUNC("ixgbe_clock_out_i2c_byte");
@@ -1677,9 +2205,10 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
}
/* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- i2cctl |= IXGBE_I2C_DATA_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -1693,34 +2222,39 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
**/
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
s32 status = IXGBE_SUCCESS;
u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
u32 timeout = 10;
bool ack = 1;
DEBUGFUNC("ixgbe_get_i2c_ack");
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
-
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- ack = ixgbe_get_i2c_data(&i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ ack = ixgbe_get_i2c_data(hw, &i2cctl);
usec_delay(1);
- if (ack == 0)
+ if (!ack)
break;
}
- if (ack == 1) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "I2C ack was not received.\n");
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
status = IXGBE_ERR_I2C;
}
@@ -1741,17 +2275,24 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_clock_in_i2c_bit");
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- *data = ixgbe_get_i2c_data(&i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1771,7 +2312,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
DEBUGFUNC("ixgbe_clock_out_i2c_bit");
@@ -1796,31 +2337,39 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
return status;
}
+
/**
* ixgbe_raise_i2c_clk - Raises the I2C SCL clock
* @hw: pointer to hardware structure
* @i2cctl: Current value of I2CCTL register
*
* Raises the I2C clock line '0'->'1'
+ * Negates the I2C clock output enable on X550 hardware.
**/
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
u32 i = 0;
u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
u32 i2cctl_r = 0;
DEBUGFUNC("ixgbe_raise_i2c_clk");
+ if (clk_oe_bit) {
+ *i2cctl |= clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ }
+
for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT;
+ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
usec_delay(IXGBE_I2C_T_RISE);
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (i2cctl_r & IXGBE_I2C_CLK_IN)
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
break;
}
}
@@ -1831,15 +2380,16 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @i2cctl: Current value of I2CCTL register
*
* Lowers the I2C clock line '1'->'0'
+ * Asserts the I2C clock output enable on X550 hardware.
**/
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
-
DEBUGFUNC("ixgbe_lower_i2c_clk");
- *i2cctl &= ~IXGBE_I2C_CLK_OUT;
+ *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
+ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -1853,27 +2403,38 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @data: I2C data value (0 or 1) to set
*
* Sets the I2C data bit
+ * Asserts the I2C data output enable on X550 hardware.
**/
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_set_i2c_data");
if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT;
+ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT;
+ *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
+ *i2cctl &= ~data_oe_bit;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+ if (!data) /* Can't verify data in this case */
+ return IXGBE_SUCCESS;
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- if (data != ixgbe_get_i2c_data(i2cctl)) {
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
status = IXGBE_ERR_I2C;
ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
"Error - I2C data was not set to %X.\n",
@@ -1889,14 +2450,23 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Returns the I2C data bit value
+ * Negates the I2C data output enable on X550 hardware.
**/
-static bool ixgbe_get_i2c_data(u32 *i2cctl)
+static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
bool data;
DEBUGFUNC("ixgbe_get_i2c_data");
- if (*i2cctl & IXGBE_I2C_DATA_IN)
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(IXGBE_I2C_T_FALL);
+ }
+
+ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
data = 1;
else
data = 0;
@@ -1913,12 +2483,13 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
**/
void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i2cctl;
u32 i;
DEBUGFUNC("ixgbe_i2c_bus_clear");
ixgbe_i2c_start(hw);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1968,3 +2539,33 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
out:
return status;
}
+
+/**
+ * ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: TRUE for on, FALSE for off
+ */
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ u32 status;
+ u16 reg;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ if (on) {
+ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ }
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index dd57d26..021d5f0 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -54,6 +54,15 @@
#define IXGBE_SFF_SFF_8472_COMP 0x5E
#define IXGBE_SFF_SFF_8472_OSCB 0x6E
#define IXGBE_SFF_SFF_8472_ESCB 0x76
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR 0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -67,6 +76,11 @@
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -74,6 +88,11 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
+#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -101,16 +120,15 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#ifndef IXGBE_SFP_DETECT_RETRIES
+#define IXGBE_SFP_DETECT_RETRIES 10
+
+#endif /* IXGBE_SFP_DETECT_RETRIES */
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-#define IXGBE_SFF_SFF_8472_REV_9_3 0x01
-#define IXGBE_SFF_SFF_8472_REV_9_5 0x02
-#define IXGBE_SFF_SFF_8472_REV_10_2 0x03
-#define IXGBE_SFF_SFF_8472_REV_10_4 0x04
-#define IXGBE_SFF_SFF_8472_REV_11_0 0x05
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
@@ -133,6 +151,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -145,8 +164,11 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 49f5bc0..24ba046 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,8 @@
#include "ixgbe_osdep.h"
+/* Override this by setting IOMEM in your ixgbe_osdep.h header */
+#define IOMEM
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -100,16 +102,21 @@
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
@@ -121,6 +128,21 @@
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
#define IXGBE_DEV_ID_X540_BYPASS 0x155C
+#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X550T 0x1563
+/* Placeholder value, pending official value. */
+#define IXGBE_DEV_ID_X550EM_A_KR 0xABCD
+#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15B3
+#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -128,7 +150,10 @@
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL 0x00028
+#define IXGBE_I2CCTL_82599 0x00028
+#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_I2CCTL_X550 : IXGBE_I2CCTL_82599))
#define IXGBE_PHY_GPIO 0x00028
#define IXGBE_MAC_GPIO 0x00030
#define IXGBE_PHYINT_STATUS0 0x00100
@@ -163,10 +188,20 @@
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00004000 : 0x00000001)
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000200 : 0x00000002)
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00001000 : 0x00000004)
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000400 : 0x00000008)
+#define IXGBE_I2C_BB_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
+ 0x00000100 : 0)
+#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
+ 0x00000800 : 0)
+#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
+ 0x00002000 : 0)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
@@ -301,6 +336,8 @@
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_PFFLPL 0x050B0
+#define IXGBE_PFFLPH 0x050B4
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
/* 64 Mailboxes, 16 DW each */
@@ -317,6 +354,12 @@
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX 0x2FA8
+#define IXGBE_LVMMC_TX 0x8108
+#define IXGBE_LMVM_RX 0x2FA4
+#define IXGBE_LMVM_TX 0x8124
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
#define IXGBE_RXFECCERR0 0x051B8
#define IXGBE_LLITHRESH 0x0EC90
@@ -325,8 +368,16 @@
#define IXGBE_IMIRVP 0x05AC0
#define IXGBE_VMD_CTL 0x0581C
#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
/* Flow Director registers */
#define IXGBE_FDIRCTRL 0x0EE00
@@ -336,6 +387,7 @@
#define IXGBE_FDIRSIP4M 0x0EE40
#define IXGBE_FDIRTCPM 0x0EE44
#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRSCTPM 0x0EE78
#define IXGBE_FDIRIP6M 0x0EE74
#define IXGBE_FDIRM 0x0EE70
@@ -379,6 +431,8 @@
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -387,6 +441,8 @@
#define IXGBE_SPOOF_MACAS_MASK 0xFF
#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
+#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
#define IXGBE_PFVFSPOOF_REG_COUNT 8
/* 16 of these (0-15) */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
@@ -408,16 +464,22 @@
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */
+#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */
+#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
-#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
/* Ext Flexible Host Filter Table */
-#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100))
+#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100))
+#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100))
/* Four Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
/* Six Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
+/* Eight Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
/* Each Flexible Filter is at most 128 (0x80) bytes in length */
@@ -450,10 +512,14 @@
#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
/* Mask for Ext. flex filters */
#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
/* Wake Up Status */
@@ -473,6 +539,23 @@
#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK
+/* Proxy Status */
+#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */
+#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
@@ -489,6 +572,56 @@
#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+/* Power Management */
+/* DMA Coalescing configuration */
+struct ixgbe_dmac_config {
+ u16 watchdog_timer; /* usec units */
+ bool fcoe_en;
+ u32 link_speed;
+ u8 fcoe_tc;
+ u8 num_tcs;
+};
+
+/*
+ * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed.
+ * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 ==
+ * 87500 bytes [85KB]
+ */
+#define IXGBE_DMACRXT_10G 0x55
+#define IXGBE_DMACRXT_1G 0x09
+#define IXGBE_DMACRXT_100M 0x01
+
+/* DMA Coalescing registers */
+#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */
+#define IXGBE_DMACR 0x02400 /* Control register */
+#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */
+/* DMA Coalescing register fields */
+#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */
+#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */
+#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */
+#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000
+#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16
+#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */
+#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */
+#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */
+#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */
+#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */
+
+/* EEE registers */
+#define IXGBE_EEER 0x043A0 /* EEE register */
+#define IXGBE_EEE_STAT 0x04398 /* EEE Status */
+#define IXGBE_EEE_SU 0x04380 /* EEE Set up */
+#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */
+#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */
+
+/* EEE register fields */
+#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */
+#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */
+#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */
+#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */
+#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */
+
/* Security Control Registers */
@@ -630,6 +763,8 @@
/* FCoE DMA Context Registers */
+/* FCoE Direct DMA Context */
+#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
@@ -653,6 +788,12 @@
#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
/* FCoE Filter Context Registers */
+#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */
+#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */
+#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16
+/* FCoE Direct Filter Context */
+#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4))
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
@@ -683,6 +824,10 @@
#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */
#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */
+/* Higher 7 bits for the queue index */
+#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000
+#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16
/* Stats registers */
#define IXGBE_CRCERRS 0x04000
@@ -781,11 +926,6 @@
#define IXGBE_BXOFFRXC 0x041E0
#define IXGBE_BXONTXC 0x041E4
#define IXGBE_BXOFFTXC 0x041E8
-#define IXGBE_PCRC8ECL 0x0E810
-#define IXGBE_PCRC8ECH 0x0E811
-#define IXGBE_PCRC8ECH_MASK 0x1F
-#define IXGBE_LDPCECL 0x0E820
-#define IXGBE_LDPCECH 0x0E821
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -807,6 +947,7 @@
#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
/* Management Bit Fields and Masks */
+#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */
#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
@@ -869,6 +1010,12 @@
#define IXGBE_PBACLR_82599 0x11068
#define IXGBE_CIAA_82599 0x11088
#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
+#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -905,6 +1052,7 @@
#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
+#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */
#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
@@ -921,6 +1069,9 @@
#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */
+#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */
+#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */
/* Diagnostic Registers */
#define IXGBE_RDSTATCTL 0x02C20
@@ -1077,6 +1228,7 @@
/* RDRXCTL Bit Masks */
#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */
#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */
#define IXGBE_RDRXCTL_MVMEN 0x00000020
#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
@@ -1086,6 +1238,8 @@
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -1209,8 +1363,14 @@
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */
+#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */
+#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */
#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
@@ -1224,15 +1384,50 @@
#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */
+#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */
+#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */
+
+#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */
+#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */
+#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */
/* MII clause 22/28 definitions */
#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */
+
#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
@@ -1240,6 +1435,8 @@
#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800
#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
#define IXGBE_MII_RESTART 0x200
@@ -1254,6 +1451,8 @@
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
+#define X557_PHY_ID 0x01540240
#define AQ_FW_REV 0x20
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
@@ -1277,6 +1476,15 @@
#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+ IXGBE_SDP0_GPIEN_X540 : IXGBE_SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+ IXGBE_SDP1_GPIEN_X540 : IXGBE_SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+ IXGBE_SDP2_GPIEN_X540 : IXGBE_SDP2_GPIEN)
#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
@@ -1454,6 +1662,18 @@ enum {
#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+ IXGBE_EICR_GPI_SDP0_X540 : \
+ IXGBE_EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+ IXGBE_EICR_GPI_SDP1_X540 : \
+ IXGBE_EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+ IXGBE_EICR_GPI_SDP2_X540 : \
+ IXGBE_EICR_GPI_SDP2)
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -1472,6 +1692,9 @@ enum {
#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1491,6 +1714,9 @@ enum {
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1509,6 +1735,9 @@ enum {
#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1587,6 +1816,7 @@ enum {
#define IXGBE_MAX_ETQF_FILTERS 8
#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
@@ -1607,11 +1837,15 @@ enum {
* FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
* FIP (0x8914): Filter 4
+ * LLDP (0x88CC): Filter 5
+ * LACP (0x8809): Filter 6
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
#define IXGBE_ETQF_FILTER_FIP 4
+#define IXGBE_ETQF_FILTER_LLDP 5
+#define IXGBE_ETQF_FILTER_LACP 6
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -1735,6 +1969,9 @@ enum {
#define IXGBE_MACC_FS 0x00040000
#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1753,6 +1990,7 @@ enum {
#define IXGBE_LINKS_TL_FAULT 0x00001000
#define IXGBE_LINKS_SIGNAL 0x00000F00
+#define IXGBE_LINKS_SPEED_NON_STD 0x08000000
#define IXGBE_LINKS_SPEED_82599 0x30000000
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
@@ -1795,12 +2033,16 @@ enum {
#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM 0x0001
-#define IXGBE_GSSR_PHY0_SM 0x0002
-#define IXGBE_GSSR_PHY1_SM 0x0004
-#define IXGBE_GSSR_MAC_CSR_SM 0x0008
-#define IXGBE_GSSR_FLASH_SM 0x0010
-#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
+#define IXGBE_GSSR_I2C_MASK 0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK 0xF
/* FW Status register bitmask */
#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@@ -1830,32 +2072,40 @@ enum {
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
#define IXGBE_EEPROM_OPCODE_BITS 8
+/* FLA Register */
+#define IXGBE_FLA_LOCKED 0x00000040
+
/* Part Number String Length */
#define IXGBE_PBANUM_LENGTH 11
/* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
-#define IXGBE_EEPROM_CHECKSUM 0x3F
-#define IXGBE_EEPROM_SUM 0xBABA
-#define IXGBE_PCIE_ANALOG_PTR 0x03
-#define IXGBE_ATLAS0_CONFIG_PTR 0x04
-#define IXGBE_PHY_PTR 0x04
-#define IXGBE_ATLAS1_CONFIG_PTR 0x05
-#define IXGBE_OPTION_ROM_PTR 0x05
-#define IXGBE_PCIE_GENERAL_PTR 0x06
-#define IXGBE_PCIE_CONFIG0_PTR 0x07
-#define IXGBE_PCIE_CONFIG1_PTR 0x08
-#define IXGBE_CORE0_PTR 0x09
-#define IXGBE_CORE1_PTR 0x0A
-#define IXGBE_MAC0_PTR 0x0B
-#define IXGBE_MAC1_PTR 0x0C
-#define IXGBE_CSR0_CONFIG_PTR 0x0D
-#define IXGBE_CSR1_CONFIG_PTR 0x0E
-#define IXGBE_FW_PTR 0x0F
-#define IXGBE_PBANUM0_PTR 0x15
-#define IXGBE_PBANUM1_PTR 0x16
-#define IXGBE_ALT_MAC_ADDR_PTR 0x37
-#define IXGBE_FREE_SPACE_PTR 0X3E
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
+#define IXGBE_PCIE_CONFIG_SIZE 0x08
+#define IXGBE_EEPROM_LAST_WORD 0x41
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
+#define IXGBE_FREE_SPACE_PTR 0X3E
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
@@ -1896,6 +2146,11 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
+#define NVM_INIT_CTRL_3 0x38
+#define NVM_INIT_CTRL_3_LPLU 0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100
+
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
@@ -2010,7 +2265,7 @@ enum {
#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
-#define IXGBE_RFCTL_RSC_DIS 0x00000010
+#define IXGBE_RFCTL_RSC_DIS 0x00000020
#define IXGBE_RFCTL_NFSW_DIS 0x00000040
#define IXGBE_RFCTL_NFSR_DIS 0x00000080
#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
@@ -2049,6 +2304,14 @@ enum {
#define IXGBE_TSAUXC_EN_CLK 0x00000004
#define IXGBE_TSAUXC_SYNCLK 0x00000008
#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_TT0 0x00000001
+#define IXGBE_TSAUXC_EN_TT1 0x00000002
+#define IXGBE_TSAUXC_ST0 0x00000010
+#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000
+
+#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0
+#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080
+#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100
#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
@@ -2058,8 +2321,19 @@ enum {
#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08
#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
+#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */
+#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */
+
+#define IXGBE_TSIM_SYS_WRAP 0x00000001
+#define IXGBE_TSIM_TXTS 0x00000002
+#define IXGBE_TSIM_TADJ 0x00000080
+
+#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP
+#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS
+#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ
#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
@@ -2118,10 +2392,12 @@ enum {
#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
/* Queue Drop Enable */
#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_HIDE_VLAN 0x00000002
#define IXGBE_QDE_IDX_MASK 0x00007F00
#define IXGBE_QDE_IDX_SHIFT 8
#define IXGBE_QDE_WRITE 0x00010000
@@ -2163,10 +2439,12 @@ enum {
#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */
#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
@@ -2180,8 +2458,9 @@ enum {
#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
-#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */
#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
@@ -2212,6 +2491,7 @@ enum {
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -2222,6 +2502,10 @@ enum {
/* SRRCTL bit definitions */
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6)
+ * + at bit 8 offset (<< 8)
+ * = (<< 2)
+ */
#define IXGBE_SRRCTL_RDMTS_SHIFT 22
#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
#define IXGBE_SRRCTL_DROP_EN 0x10000000
@@ -2269,6 +2553,8 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
+#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
@@ -2319,6 +2605,68 @@ enum {
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
+ (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \
+ : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \
+ : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \
+ : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \
+ : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \
+ : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \
+ : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \
+ : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
+ : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
/* Little Endian defines */
#ifndef __le16
@@ -2355,7 +2703,11 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000
#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
@@ -2369,6 +2721,13 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_L4P 0x00000008
#define IXGBE_FDIRM_FLEX 0x00000010
#define IXGBE_FDIRM_DIPv6 0x00000020
+#define IXGBE_FDIRM_L3P 0x00000040
+
+#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */
+#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */
+#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */
+#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */
+#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -2410,26 +2769,21 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
-
+#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000
#define IXGBE_FDIR_DROP_QUEUE 127
-#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */
-/* iTS sensor related defines*/
-#define IXGBE_TEMP_STATUS_ADDR_X540 0xC830
-#define IXGBE_TEMP_VALUE_ADDR_X540 0xC820
-#define IXGBE_TEMP_PROV_2_ADDR_X540 0xC421
-#define IXGBE_TEMP_PROV_4_ADDR_X540 0xC423
-#define IXGBE_TEMP_STATUS_PAGE_X540 0x1E
-#define IXGBE_TEMP_HIGH_FAILURE_BIT_X540 0xE
-#define IXGBE_TEMP_HIGH_WARNING_BIT_X540 0xC
/* Manageablility Host Interface defines */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
/* CEM Support */
#define FW_CEM_HDR_LEN 0x4
@@ -2439,7 +2793,17 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
-
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_WRITE_SHADOW_RAM_CMD 0x33
+#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD 0x36
+#define FW_SHADOW_RAM_DUMP_LEN 0
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 1024
+#define FW_DISABLE_RXEN_CMD 0xDE
+#define FW_DISABLE_RXEN_LEN 0x1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2452,6 +2816,25 @@ struct ixgbe_hic_hdr {
u8 checksum;
};
+struct ixgbe_hic_hdr2_req {
+ u8 cmd;
+ u8 buf_lenh;
+ u8 buf_lenl;
+ u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+ struct ixgbe_hic_hdr2_req req;
+ struct ixgbe_hic_hdr2_rsp rsp;
+};
+
struct ixgbe_hic_drv_info {
struct ixgbe_hic_hdr hdr;
u8 port_num;
@@ -2463,6 +2846,33 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+ union ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad2;
+ u16 pad3;
+};
+
+
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
u64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -2604,6 +3014,12 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */
+#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */
+
/* Autonegotiation advertised speeds */
typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
@@ -2611,6 +3027,8 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
+#define IXGBE_LINK_SPEED_5GB_FULL 0x0800
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
@@ -2710,6 +3128,7 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3
#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
enum ixgbe_atr_flow_type {
IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
@@ -2719,6 +3138,14 @@ enum ixgbe_atr_flow_type {
IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
};
/* Flow Director ATR input struct. */
@@ -2730,6 +3157,9 @@ union ixgbe_atr_input {
* flow_type - 1 byte
* vlan_id - 2 bytes
* src_ip - 16 bytes
+ * inner_mac - 6 bytes
+ * cloud_mode - 2 bytes
+ * tni_vni - 4 bytes
* dst_ip - 16 bytes
* src_port - 2 bytes
* dst_port - 2 bytes
@@ -2742,12 +3172,15 @@ union ixgbe_atr_input {
__be16 vlan_id;
__be32 dst_ip[4];
__be32 src_ip[4];
+ u8 inner_mac[6];
+ __be16 tunnel_type;
+ __be32 tni_vni;
__be16 src_port;
__be16 dst_port;
__be16 flex_bytes;
__be16 bkt_hash;
} formatted;
- __be32 dword_stream[11];
+ __be32 dword_stream[14];
};
/* Flow Director compressed ATR hash input struct */
@@ -2792,6 +3225,17 @@ enum ixgbe_mac_type {
ixgbe_mac_82599_vf,
ixgbe_mac_X540,
ixgbe_mac_X540_vf,
+ /*
+ * X550EM MAC type decoder:
+ * ixgbe_mac_X550EM_x: "x" = Xeon
+ * ixgbe_mac_X550EM_a: "a" = Atom
+ */
+ ixgbe_mac_X550,
+ ixgbe_mac_X550EM_x,
+ ixgbe_mac_X550EM_a,
+ ixgbe_mac_X550_vf,
+ ixgbe_mac_X550EM_x_vf,
+ ixgbe_mac_X550EM_a_vf,
ixgbe_num_macs
};
@@ -2800,6 +3244,9 @@ enum ixgbe_phy_type {
ixgbe_phy_none,
ixgbe_phy_tn,
ixgbe_phy_aq,
+ ixgbe_phy_x550em_kr,
+ ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_ext_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -2812,6 +3259,10 @@ enum ixgbe_phy_type {
ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
+ ixgbe_phy_qsfp_passive_unknown,
+ ixgbe_phy_qsfp_active_unknown,
+ ixgbe_phy_qsfp_intel,
+ ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
ixgbe_phy_generic
};
@@ -2851,6 +3302,7 @@ enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
ixgbe_media_type_fiber_fixed,
+ ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
@@ -2880,6 +3332,7 @@ enum ixgbe_bus_type {
ixgbe_bus_type_pci,
ixgbe_bus_type_pcix,
ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_internal,
ixgbe_bus_type_reserved
};
@@ -3036,7 +3489,7 @@ struct ixgbe_eeprom_operations {
s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
- u16 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@@ -3062,17 +3515,21 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
- void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
+ void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
/* Packet Buffer manipulation */
void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
@@ -3107,13 +3564,26 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
- s32 (*dmac_config)(struct ixgbe_hw *hw);
+ void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
+ void (*disable_rx)(struct ixgbe_hw *hw);
+ void (*enable_rx)(struct ixgbe_hw *hw);
+ void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+ unsigned int);
+ void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
- void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
+ s32 (*dmac_config)(struct ixgbe_hw *hw);
+ s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ void (*disable_mdd)(struct ixgbe_hw *hw);
+ void (*enable_mdd)(struct ixgbe_hw *hw);
+ void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
+ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
};
struct ixgbe_phy_operations {
@@ -3126,6 +3596,7 @@ struct ixgbe_phy_operations {
s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_internal_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@@ -3135,7 +3606,11 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
void (*i2c_bus_clear)(struct ixgbe_hw *);
+ s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@@ -3145,6 +3620,7 @@ struct ixgbe_eeprom_info {
u16 word_size;
u16 address_bits;
u16 word_page_size;
+ u16 ctrl_word_3;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
@@ -3169,7 +3645,6 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
- u32 cached_autoc;
u8 san_mac_rar_index;
bool get_link_status;
u32 orig_autoc2;
@@ -3178,6 +3653,8 @@ struct ixgbe_mac_info {
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
+ struct ixgbe_dmac_config dmac_config;
+ bool set_lben;
};
struct ixgbe_phy_info {
@@ -3189,12 +3666,14 @@ struct ixgbe_phy_info {
bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
+ u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
};
#include "ixgbe_mbx.h"
@@ -3229,7 +3708,7 @@ struct ixgbe_mbx_info {
};
struct ixgbe_hw {
- u8 *hw_addr;
+ u8 IOMEM *hw_addr;
void *back;
struct ixgbe_mac_info mac;
struct ixgbe_addr_filter_info addr_ctrl;
@@ -3247,7 +3726,6 @@ struct ixgbe_hw {
int api_version;
bool force_full_reset;
bool allow_unsupported_sfp;
- bool mng_fw_enabled;
bool wol_enabled;
};
@@ -3292,7 +3770,80 @@ struct ixgbe_hw {
#define IXGBE_ERR_OUT_OF_MEM -34
#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36
#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
+#define IXGBE_KRM_AN_CNTL_1(P) ((P == 0) ? (0x422C) : (0x822C))
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+
+#define IXGBE_KX4_LINK_CNTL_1 0x4C
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
+ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
+#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
+#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
+
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index 2774f9b..964514c 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -89,6 +89,49 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
+/* ixgbe_virt_clr_reg - Set register to default (power on) state.
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
+{
+ int i;
+ u32 vfsrrctl;
+ u32 vfdca_rxctrl;
+ u32 vfdca_txctrl;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+ /* DCA_RXCTRL default value */
+ vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+
+ /* DCA_TXCTRL default value */
+ vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ for (i = 0; i < 7; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
/**
* ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -134,7 +177,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
- u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
DEBUGFUNC("ixgbevf_reset_hw_vf");
@@ -145,8 +188,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
DEBUGOUT("Issuing a function level reset to MAC\n");
- ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
- IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl);
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
msec_delay(50);
@@ -160,6 +202,9 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
if (!timeout)
return IXGBE_ERR_RESET_FAILED;
+ /* Reset VF registers to initial values */
+ ixgbe_virt_clr_reg(hw);
+
/* mailbox timeout can now become active */
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
@@ -224,6 +269,8 @@ s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
+ /* Clear packet split and pool config */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
/* flush all queues disables */
IXGBE_WRITE_FLUSH(hw);
@@ -512,6 +559,21 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ usec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
diff --git a/sys/dev/ixgbe/ixgbe_vf.h b/sys/dev/ixgbe/ixgbe_vf.h
index 8500768..8f4d46e 100644
--- a/sys/dev/ixgbe/ixgbe_vf.h
+++ b/sys/dev/ixgbe/ixgbe_vf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -84,6 +84,9 @@
#define IXGBE_VFGOTC_LSB 0x02020
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
+#define IXGBE_VFMRQC 0x3000
+#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
struct ixgbevf_hw_stats {
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
index 9622f0e..93c6ca4 100644
--- a/sys/dev/ixgbe/ixgbe_x540.c
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,6 +38,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
+
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
@@ -63,65 +70,67 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
/* EEPROM */
- eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
- eeprom->ops.read = &ixgbe_read_eerd_X540;
- eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
- eeprom->ops.write = &ixgbe_write_eewr_X540;
- eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
- eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
- eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
- eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = ixgbe_read_eerd_X540;
+ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
+ eeprom->ops.write = ixgbe_write_eewr_X540;
+ eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
/* PHY */
- phy->ops.init = &ixgbe_init_phy_ops_generic;
+ phy->ops.init = ixgbe_init_phy_ops_generic;
phy->ops.reset = NULL;
+ if (!ixgbe_mng_present(hw))
+ phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
/* MAC */
- mac->ops.reset_hw = &ixgbe_reset_hw_X540;
- mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
- mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+ mac->ops.reset_hw = ixgbe_reset_hw_X540;
+ mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = ixgbe_get_media_type_X540;
mac->ops.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540;
+ ixgbe_get_supported_physical_layer_X540;
mac->ops.read_analog_reg8 = NULL;
mac->ops.write_analog_reg8 = NULL;
- mac->ops.start_hw = &ixgbe_start_hw_X540;
- mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
- mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
- mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
- mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
- mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
- mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
- mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
- mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+ mac->ops.start_hw = ixgbe_start_hw_X540;
+ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
/* RAR, Multicast, VLAN */
- mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
- mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
- mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
- mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
- mac->ops.set_vfta = &ixgbe_set_vfta_generic;
- mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
- mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
- mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
- mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
- mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+ mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
/* Link */
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_generic;
- mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
- mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
- mac->ops.check_link = &ixgbe_check_mac_link_generic;
-
-
- mac->mcft_size = 128;
- mac->vft_size = 128;
- mac->num_rar_entries = 128;
- mac->rx_pb_size = 384;
- mac->max_tx_queues = 128;
- mac->max_rx_queues = 128;
+ ixgbe_get_copper_link_capabilities_generic;
+ mac->ops.setup_link = ixgbe_setup_mac_link_X540;
+ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
+ mac->ops.check_link = ixgbe_check_mac_link_generic;
+
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/*
@@ -139,9 +148,9 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
/* Manageability interface */
- mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
- mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
return ret_val;
}
@@ -469,18 +478,20 @@ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
* be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
*
* @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
**/
-u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
- u16 i;
- u16 j;
+ u16 i, j;
u16 checksum = 0;
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
+ u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
+ u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
- /*
- * Do not use hw->eeprom.ops.read because we do not want to take
+ /* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores here. Instead use
* ixgbe_read_eerd_generic
*/
@@ -488,25 +499,25 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
/* Include 0x0-0x3F in the checksum */
- for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) {
+ for (i = 0; i <= checksum_last_word; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
- checksum += word;
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += word;
}
- /*
- * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
- for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
- if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) {
+ if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
DEBUGOUT("EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
/* Skip pointer section if the pointer is invalid. */
@@ -514,10 +525,9 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
pointer >= hw->eeprom.word_size)
continue;
- if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
- IXGBE_SUCCESS) {
+ if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
DEBUGOUT("EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
/* Skip pointer section if length is invalid. */
@@ -525,11 +535,10 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
(pointer + length) >= hw->eeprom.word_size)
continue;
- for (j = pointer+1; j <= pointer+length; j++) {
- if (ixgbe_read_eerd_generic(hw, j, &word) !=
- IXGBE_SUCCESS) {
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word)) {
DEBUGOUT("EEPROM read failed\n");
- break;
+ return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@@ -537,7 +546,7 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return checksum;
+ return (s32)checksum;
}
/**
@@ -557,48 +566,49 @@ s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
+ /* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status != IXGBE_SUCCESS) {
+ if (status) {
DEBUGOUT("EEPROM read failed\n");
- goto out;
+ return status;
}
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- IXGBE_SUCCESS) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+ return IXGBE_ERR_SWFW_SYNC;
- /*
- * Do not use hw->eeprom.ops.read because we do not want to take
- * the synchronization semaphores twice here.
- */
- ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
- &read_checksum);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
- /*
- * Verify read checksum from EEPROM is the same as
- * calculated checksum
- */
- if (read_checksum != checksum) {
- status = IXGBE_ERR_EEPROM_CHECKSUM;
- ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
- "Invalid EEPROM checksum");
- }
+ checksum = (u16)(status & 0xffff);
- /* If the user cares, return the calculated checksum */
- if (checksum_val)
- *checksum_val = checksum;
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
+ /* Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ goto out;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
}
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
return status;
}
@@ -617,33 +627,36 @@ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
+ /* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
-
- if (status != IXGBE_SUCCESS)
+ if (status) {
DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- IXGBE_SUCCESS) {
- checksum = hw->eeprom.ops.calc_checksum(hw);
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+ return IXGBE_ERR_SWFW_SYNC;
- /*
- * Do not use hw->eeprom.ops.write because we do not want to
- * take the synchronization semaphores twice here.
- */
- status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
- if (status == IXGBE_SUCCESS)
- status = ixgbe_update_flash_X540(hw);
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
- }
+ checksum = (u16)(status & 0xffff);
+
+ /* Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+ if (status)
+ goto out;
+
+ status = ixgbe_update_flash_X540(hw);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@@ -658,7 +671,7 @@ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
u32 flup;
- s32 status = IXGBE_ERR_EEPROM;
+ s32 status;
DEBUGFUNC("ixgbe_update_flash_X540");
@@ -716,7 +729,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
break;
}
- usec_delay(5);
+ msec_delay(5);
}
if (i == IXGBE_FLUDONE_ATTEMPTS)
@@ -734,58 +747,55 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 5;
- u32 hwmask = 0;
+ u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+ u32 fwmask = swmask << 5;
+ u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
u32 timeout = 200;
+ u32 hwmask = 0;
+ u32 swfw_sync;
u32 i;
- s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
- if (swmask == IXGBE_GSSR_EEP_SM)
- hwmask = IXGBE_GSSR_FLASH_SM;
+ if (swmask & IXGBE_GSSR_EEP_SM)
+ hwmask |= IXGBE_GSSR_FLASH_SM;
/* SW only mask doesn't have FW bit pair */
- if (swmask == IXGBE_GSSR_SW_MNG_SM)
- fwmask = 0;
+ if (mask & IXGBE_GSSR_SW_MNG_SM)
+ swmask |= IXGBE_GSSR_SW_MNG_SM;
+ swmask |= swi2c_mask;
+ fwmask |= swi2c_mask << 2;
for (i = 0; i < timeout; i++) {
- /*
- * SW NVM semaphore bit is used for access to all
+ /* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
- if (ixgbe_get_swfw_sync_semaphore(hw)) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- goto out;
- } else {
- /*
- * Firmware currently using resource (fwmask), hardware
- * currently using resource (hwmask), or other software
- * thread currently using resource (swmask)
- */
- ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
+ return IXGBE_SUCCESS;
}
+ /* Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
}
/* Failed to get SW only semaphore */
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"Failed to get SW only semaphore");
- goto out;
+ return IXGBE_ERR_SWFW_SYNC;
}
/* If the resource is not released by the FW/HW the SW can assume that
@@ -793,32 +803,34 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (swfw_sync & (fwmask | hwmask)) {
- if (ixgbe_get_swfw_sync_semaphore(hw)) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
+ return IXGBE_SUCCESS;
}
/* If the resource is not released by other SW the SW can assume that
* the other SW malfunctions. In that case the SW should clear all SW
* flags that it does not own and then repeat the whole process once
* again.
*/
- else if (swfw_sync & swmask) {
- ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM |
- IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM |
- IXGBE_GSSR_MAC_CSR_SM);
- ret_val = IXGBE_ERR_SWFW_SYNC;
+ if (swfw_sync & swmask) {
+ u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+
+ if (swi2c_mask)
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ return IXGBE_ERR_SWFW_SYNC;
}
+ ixgbe_release_swfw_sync_semaphore(hw);
-out:
- return ret_val;
+ return IXGBE_ERR_SWFW_SYNC;
}
/**
@@ -829,13 +841,15 @@ out:
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
+ u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
u32 swfw_sync;
- u32 swmask = mask;
DEBUGFUNC("ixgbe_release_swfw_sync_X540");
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ swmask |= mask & IXGBE_GSSR_I2C_MASK;
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
@@ -843,10 +857,11 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
}
/**
- * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
* @hw: pointer to hardware structure
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
@@ -904,7 +919,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
- * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.
diff --git a/sys/dev/ixgbe/ixgbe_x540.h b/sys/dev/ixgbe/ixgbe_x540.h
index c86048b..12da827 100644
--- a/sys/dev/ixgbe/ixgbe_x540.h
+++ b/sys/dev/ixgbe/ixgbe_x540.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2013, Intel Corporation
+ Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -55,11 +55,11 @@ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
u16 *data);
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
-u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c
deleted file mode 100644
index edbdfca..0000000
--- a/sys/dev/ixgbe/ixv.c
+++ /dev/null
@@ -1,4006 +0,0 @@
-/******************************************************************************
-
- Copyright (c) 2001-2013, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "ixv.h"
-
-/*********************************************************************
- * Driver version
- *********************************************************************/
-char ixv_driver_version[] = "1.1.4";
-
-/*********************************************************************
- * PCI Device ID Table
- *
- * Used by probe to select devices to load on
- * Last field stores an index into ixv_strings
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
- *********************************************************************/
-
-static ixv_vendor_info_t ixv_vendor_info_array[] =
-{
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
- {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
- /* required last entry */
- {0, 0, 0, 0, 0}
-};
-
-/*********************************************************************
- * Table of branding strings
- *********************************************************************/
-
-static char *ixv_strings[] = {
- "Intel(R) PRO/10GbE Virtual Function Network Driver"
-};
-
-/*********************************************************************
- * Function prototypes
- *********************************************************************/
-static int ixv_probe(device_t);
-static int ixv_attach(device_t);
-static int ixv_detach(device_t);
-static int ixv_shutdown(device_t);
-#if __FreeBSD_version < 800000
-static void ixv_start(struct ifnet *);
-static void ixv_start_locked(struct tx_ring *, struct ifnet *);
-#else
-static int ixv_mq_start(struct ifnet *, struct mbuf *);
-static int ixv_mq_start_locked(struct ifnet *,
- struct tx_ring *, struct mbuf *);
-static void ixv_qflush(struct ifnet *);
-#endif
-static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixv_init(void *);
-static void ixv_init_locked(struct adapter *);
-static void ixv_stop(void *);
-static void ixv_media_status(struct ifnet *, struct ifmediareq *);
-static int ixv_media_change(struct ifnet *);
-static void ixv_identify_hardware(struct adapter *);
-static int ixv_allocate_pci_resources(struct adapter *);
-static int ixv_allocate_msix(struct adapter *);
-static int ixv_allocate_queues(struct adapter *);
-static int ixv_setup_msix(struct adapter *);
-static void ixv_free_pci_resources(struct adapter *);
-static void ixv_local_timer(void *);
-static void ixv_setup_interface(device_t, struct adapter *);
-static void ixv_config_link(struct adapter *);
-
-static int ixv_allocate_transmit_buffers(struct tx_ring *);
-static int ixv_setup_transmit_structures(struct adapter *);
-static void ixv_setup_transmit_ring(struct tx_ring *);
-static void ixv_initialize_transmit_units(struct adapter *);
-static void ixv_free_transmit_structures(struct adapter *);
-static void ixv_free_transmit_buffers(struct tx_ring *);
-
-static int ixv_allocate_receive_buffers(struct rx_ring *);
-static int ixv_setup_receive_structures(struct adapter *);
-static int ixv_setup_receive_ring(struct rx_ring *);
-static void ixv_initialize_receive_units(struct adapter *);
-static void ixv_free_receive_structures(struct adapter *);
-static void ixv_free_receive_buffers(struct rx_ring *);
-
-static void ixv_enable_intr(struct adapter *);
-static void ixv_disable_intr(struct adapter *);
-static bool ixv_txeof(struct tx_ring *);
-static bool ixv_rxeof(struct ix_queue *, int);
-static void ixv_rx_checksum(u32, struct mbuf *, u32);
-static void ixv_set_multi(struct adapter *);
-static void ixv_update_link_status(struct adapter *);
-static void ixv_refresh_mbufs(struct rx_ring *, int);
-static int ixv_xmit(struct tx_ring *, struct mbuf **);
-static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
-static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
-static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixv_dma_malloc(struct adapter *, bus_size_t,
- struct ixv_dma_alloc *, int);
-static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
-static void ixv_add_rx_process_limit(struct adapter *, const char *,
- const char *, int *, int);
-static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
-static void ixv_set_ivar(struct adapter *, u8, u8, s8);
-static void ixv_configure_ivars(struct adapter *);
-static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
-
-static void ixv_setup_vlan_support(struct adapter *);
-static void ixv_register_vlan(void *, struct ifnet *, u16);
-static void ixv_unregister_vlan(void *, struct ifnet *, u16);
-
-static void ixv_save_stats(struct adapter *);
-static void ixv_init_stats(struct adapter *);
-static void ixv_update_stats(struct adapter *);
-
-static __inline void ixv_rx_discard(struct rx_ring *, int);
-static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
- struct mbuf *, u32);
-
-/* The MSI/X Interrupt handlers */
-static void ixv_msix_que(void *);
-static void ixv_msix_mbx(void *);
-
-/* Deferred interrupt tasklets */
-static void ixv_handle_que(void *, int);
-static void ixv_handle_mbx(void *, int);
-
-/*********************************************************************
- * FreeBSD Device Interface Entry Points
- *********************************************************************/
-
-static device_method_t ixv_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, ixv_probe),
- DEVMETHOD(device_attach, ixv_attach),
- DEVMETHOD(device_detach, ixv_detach),
- DEVMETHOD(device_shutdown, ixv_shutdown),
- DEVMETHOD_END
-};
-
-static driver_t ixv_driver = {
- "ix", ixv_methods, sizeof(struct adapter),
-};
-
-extern devclass_t ixgbe_devclass;
-DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
-MODULE_DEPEND(ixv, pci, 1, 1, 1);
-MODULE_DEPEND(ixv, ether, 1, 1, 1);
-
-/*
-** TUNEABLE PARAMETERS:
-*/
-
-/*
-** AIM: Adaptive Interrupt Moderation
-** which means that the interrupt rate
-** is varied over time based on the
-** traffic for that interrupt vector
-*/
-static int ixv_enable_aim = FALSE;
-TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
-
-/* How many packets rxeof tries to clean at a time */
-static int ixv_rx_process_limit = 128;
-TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
-
-/* Flow control setting, default to full */
-static int ixv_flow_control = ixgbe_fc_full;
-TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
-
-/*
- * Header split: this causes the hardware to DMA
- * the header into a seperate mbuf from the payload,
- * it can be a performance win in some workloads, but
- * in others it actually hurts, its off by default.
- */
-static int ixv_header_split = FALSE;
-TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
-
-/*
-** Number of TX descriptors per ring,
-** setting higher than RX as this seems
-** the better performing choice.
-*/
-static int ixv_txd = DEFAULT_TXD;
-TUNABLE_INT("hw.ixv.txd", &ixv_txd);
-
-/* Number of RX descriptors per ring */
-static int ixv_rxd = DEFAULT_RXD;
-TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
-
-/*
-** Shadow VFTA table, this is needed because
-** the real filter table gets cleared during
-** a soft reset and we need to repopulate it.
-*/
-static u32 ixv_shadow_vfta[VFTA_SIZE];
-
-/*********************************************************************
- * Device identification routine
- *
- * ixv_probe determines if the driver should be loaded on
- * adapter based on PCI vendor/device id of the adapter.
- *
- * return BUS_PROBE_DEFAULT on success, positive on failure
- *********************************************************************/
-
-static int
-ixv_probe(device_t dev)
-{
- ixv_vendor_info_t *ent;
-
- u16 pci_vendor_id = 0;
- u16 pci_device_id = 0;
- u16 pci_subvendor_id = 0;
- u16 pci_subdevice_id = 0;
- char adapter_name[256];
-
-
- pci_vendor_id = pci_get_vendor(dev);
- if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
- return (ENXIO);
-
- pci_device_id = pci_get_device(dev);
- pci_subvendor_id = pci_get_subvendor(dev);
- pci_subdevice_id = pci_get_subdevice(dev);
-
- ent = ixv_vendor_info_array;
- while (ent->vendor_id != 0) {
- if ((pci_vendor_id == ent->vendor_id) &&
- (pci_device_id == ent->device_id) &&
-
- ((pci_subvendor_id == ent->subvendor_id) ||
- (ent->subvendor_id == 0)) &&
-
- ((pci_subdevice_id == ent->subdevice_id) ||
- (ent->subdevice_id == 0))) {
- sprintf(adapter_name, "%s, Version - %s",
- ixv_strings[ent->index],
- ixv_driver_version);
- device_set_desc_copy(dev, adapter_name);
- return (BUS_PROBE_DEFAULT);
- }
- ent++;
- }
- return (ENXIO);
-}
-
-/*********************************************************************
- * Device initialization routine
- *
- * The attach entry point is called when the driver is being loaded.
- * This routine identifies the type of hardware, allocates all resources
- * and initializes the hardware.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixv_attach(device_t dev)
-{
- struct adapter *adapter;
- struct ixgbe_hw *hw;
- int error = 0;
-
- INIT_DEBUGOUT("ixv_attach: begin");
-
- /* Allocate, clear, and link in our adapter structure */
- adapter = device_get_softc(dev);
- adapter->dev = adapter->osdep.dev = dev;
- hw = &adapter->hw;
-
- /* Core Lock Init*/
- IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
-
- /* SYSCTL APIs */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixv_sysctl_stats, "I", "Statistics");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
- adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "enable_aim", CTLFLAG_RW,
- &ixv_enable_aim, 1, "Interrupt Moderation");
-
- /* Set up the timer callout */
- callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
-
- /* Determine hardware revision */
- ixv_identify_hardware(adapter);
-
- /* Do base PCI setup - map BAR0 */
- if (ixv_allocate_pci_resources(adapter)) {
- device_printf(dev, "Allocation of PCI resources failed\n");
- error = ENXIO;
- goto err_out;
- }
-
- /* Do descriptor calc and sanity checks */
- if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
- ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
- device_printf(dev, "TXD config issue, using default!\n");
- adapter->num_tx_desc = DEFAULT_TXD;
- } else
- adapter->num_tx_desc = ixv_txd;
-
- if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
- ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
- device_printf(dev, "RXD config issue, using default!\n");
- adapter->num_rx_desc = DEFAULT_RXD;
- } else
- adapter->num_rx_desc = ixv_rxd;
-
- /* Allocate our TX/RX Queues */
- if (ixv_allocate_queues(adapter)) {
- error = ENOMEM;
- goto err_out;
- }
-
- /*
- ** Initialize the shared code: its
- ** at this point the mac type is set.
- */
- error = ixgbe_init_shared_code(hw);
- if (error) {
- device_printf(dev,"Shared Code Initialization Failure\n");
- error = EIO;
- goto err_late;
- }
-
- /* Setup the mailbox */
- ixgbe_init_mbx_params_vf(hw);
-
- ixgbe_reset_hw(hw);
-
- /* Get Hardware Flow Control setting */
- hw->fc.requested_mode = ixgbe_fc_full;
- hw->fc.pause_time = IXV_FC_PAUSE;
- hw->fc.low_water[0] = IXV_FC_LO;
- hw->fc.high_water[0] = IXV_FC_HI;
- hw->fc.send_xon = TRUE;
-
- error = ixgbe_init_hw(hw);
- if (error) {
- device_printf(dev,"Hardware Initialization Failure\n");
- error = EIO;
- goto err_late;
- }
-
- error = ixv_allocate_msix(adapter);
- if (error)
- goto err_late;
-
- /* Setup OS specific network interface */
- ixv_setup_interface(dev, adapter);
-
- /* Sysctl for limiting the amount of work done in the taskqueue */
- ixv_add_rx_process_limit(adapter, "rx_processing_limit",
- "max number of rx packets to process", &adapter->rx_process_limit,
- ixv_rx_process_limit);
-
- /* Do the stats setup */
- ixv_save_stats(adapter);
- ixv_init_stats(adapter);
-
- /* Register for VLAN events */
- adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
- ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
- adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
- ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-
- INIT_DEBUGOUT("ixv_attach: end");
- return (0);
-
-err_late:
- ixv_free_transmit_structures(adapter);
- ixv_free_receive_structures(adapter);
-err_out:
- ixv_free_pci_resources(adapter);
- return (error);
-
-}
-
-/*********************************************************************
- * Device removal routine
- *
- * The detach entry point is called when the driver is being removed.
- * This routine stops the adapter and deallocates all the resources
- * that were allocated for driver operation.
- *
- * return 0 on success, positive on failure
- *********************************************************************/
-
-static int
-ixv_detach(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- struct ix_queue *que = adapter->queues;
-
- INIT_DEBUGOUT("ixv_detach: begin");
-
- /* Make sure VLANS are not using driver */
- if (adapter->ifp->if_vlantrunk != NULL) {
- device_printf(dev,"Vlan in use, detach first\n");
- return (EBUSY);
- }
-
- IXV_CORE_LOCK(adapter);
- ixv_stop(adapter);
- IXV_CORE_UNLOCK(adapter);
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_drain(que->tq, &que->que_task);
- taskqueue_free(que->tq);
- }
- }
-
- /* Drain the Link queue */
- if (adapter->tq) {
- taskqueue_drain(adapter->tq, &adapter->mbx_task);
- taskqueue_free(adapter->tq);
- }
-
- /* Unregister VLAN events */
- if (adapter->vlan_attach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
- if (adapter->vlan_detach != NULL)
- EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-
- ether_ifdetach(adapter->ifp);
- callout_drain(&adapter->timer);
- ixv_free_pci_resources(adapter);
- bus_generic_detach(dev);
- if_free(adapter->ifp);
-
- ixv_free_transmit_structures(adapter);
- ixv_free_receive_structures(adapter);
-
- IXV_CORE_LOCK_DESTROY(adapter);
- return (0);
-}
-
-/*********************************************************************
- *
- * Shutdown entry point
- *
- **********************************************************************/
-static int
-ixv_shutdown(device_t dev)
-{
- struct adapter *adapter = device_get_softc(dev);
- IXV_CORE_LOCK(adapter);
- ixv_stop(adapter);
- IXV_CORE_UNLOCK(adapter);
- return (0);
-}
-
-#if __FreeBSD_version < 800000
-/*********************************************************************
- * Transmit entry point
- *
- * ixv_start is called by the stack to initiate a transmit.
- * The driver will remain in this routine as long as there are
- * packets to transmit and transmit resources are available.
- * In case resources are not available stack is notified and
- * the packet is requeued.
- **********************************************************************/
-static void
-ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
-{
- struct mbuf *m_head;
- struct adapter *adapter = txr->adapter;
-
- IXV_TX_LOCK_ASSERT(txr);
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING)
- return;
- if (!adapter->link_active)
- return;
-
- while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
- if (m_head == NULL)
- break;
-
- if (ixv_xmit(txr, &m_head)) {
- if (m_head == NULL)
- break;
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
- break;
- }
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, m_head);
-
- /* Set watchdog on */
- txr->watchdog_check = TRUE;
- txr->watchdog_time = ticks;
-
- }
- return;
-}
-
-/*
- * Legacy TX start - called by the stack, this
- * always uses the first tx ring, and should
- * not be used with multiqueue tx enabled.
- */
-static void
-ixv_start(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXV_TX_LOCK(txr);
- ixv_start_locked(txr, ifp);
- IXV_TX_UNLOCK(txr);
- }
- return;
-}
-
-#else
-
-/*
-** Multiqueue Transmit driver
-**
-*/
-static int
-ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ix_queue *que;
- struct tx_ring *txr;
- int i = 0, err = 0;
-
- /* Which queue to use */
- if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
- i = m->m_pkthdr.flowid % adapter->num_queues;
-
- txr = &adapter->tx_rings[i];
- que = &adapter->queues[i];
-
- if (IXV_TX_TRYLOCK(txr)) {
- err = ixv_mq_start_locked(ifp, txr, m);
- IXV_TX_UNLOCK(txr);
- } else {
- err = drbr_enqueue(ifp, txr->br, m);
- taskqueue_enqueue(que->tq, &que->que_task);
- }
-
- return (err);
-}
-
-static int
-ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
-{
- struct adapter *adapter = txr->adapter;
- struct mbuf *next;
- int enqueued, err = 0;
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING || adapter->link_active == 0) {
- if (m != NULL)
- err = drbr_enqueue(ifp, txr->br, m);
- return (err);
- }
-
- /* Do a clean if descriptors are low */
- if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
- ixv_txeof(txr);
-
- enqueued = 0;
- if (m != NULL) {
- err = drbr_enqueue(ifp, txr->br, m);
- if (err) {
- return (err);
- }
- }
- /* Process the queue */
- while ((next = drbr_peek(ifp, txr->br)) != NULL) {
- if ((err = ixv_xmit(txr, &next)) != 0) {
- if (next == NULL) {
- drbr_advance(ifp, txr->br);
- } else {
- drbr_putback(ifp, txr->br, next);
- }
- break;
- }
- drbr_advance(ifp, txr->br);
- enqueued++;
- if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
- if (next->m_flags & M_MCAST)
- if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
- /* Send a copy of the frame to the BPF listener */
- ETHER_BPF_MTAP(ifp, next);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
- if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
- }
-
- if (enqueued > 0) {
- /* Set watchdog on */
- txr->watchdog_check = TRUE;
- txr->watchdog_time = ticks;
- }
-
- return (err);
-}
-
-/*
-** Flush all ring buffers
-*/
-static void
-ixv_qflush(struct ifnet *ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr = adapter->tx_rings;
- struct mbuf *m;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXV_TX_LOCK(txr);
- while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
- m_freem(m);
- IXV_TX_UNLOCK(txr);
- }
- if_qflush(ifp);
-}
-
-#endif
-
-/*********************************************************************
- * Ioctl entry point
- *
- * ixv_ioctl is called when the user wants to configure the
- * interface.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *) data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *) data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixv_init(adapter);
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
- error = EINVAL;
- } else {
- IXV_CORE_LOCK(adapter);
- ifp->if_mtu = ifr->ifr_mtu;
- adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXV_CORE_LOCK(adapter);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- ixv_init_locked(adapter);
- } else
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixv_stop(adapter);
- adapter->if_flags = ifp->if_flags;
- IXV_CORE_UNLOCK(adapter);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXV_CORE_LOCK(adapter);
- ixv_disable_intr(adapter);
- ixv_set_multi(adapter);
- ixv_enable_intr(adapter);
- IXV_CORE_UNLOCK(adapter);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
- if (mask & IFCAP_HWCSUM)
- ifp->if_capenable ^= IFCAP_HWCSUM;
- if (mask & IFCAP_TSO4)
- ifp->if_capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXV_CORE_LOCK(adapter);
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
- }
- VLAN_CAPABILITIES(ifp);
- break;
- }
-
- default:
- IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-/*********************************************************************
- * Init entry point
- *
- * This routine is used in two ways. It is used by the stack as
- * init entry point in network interface structure. It is also used
- * by the driver as a hw/sw initialization routine to get to a
- * consistent state.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-#define IXGBE_MHADD_MFS_SHIFT 16
-
-static void
-ixv_init_locked(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 mhadd, gpie;
-
- INIT_DEBUGOUT("ixv_init: begin");
- mtx_assert(&adapter->core_mtx, MA_OWNED);
- hw->adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- callout_stop(&adapter->timer);
-
- /* reprogram the RAR[0] in case user changed it. */
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- /* Get the latest mac address, User can use a LAA */
- bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
- IXGBE_ETH_LENGTH_OF_ADDRESS);
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
- hw->addr_ctrl.rar_used_count = 1;
-
- /* Prepare transmit descriptors and buffers */
- if (ixv_setup_transmit_structures(adapter)) {
- device_printf(dev,"Could not setup transmit structures\n");
- ixv_stop(adapter);
- return;
- }
-
- ixgbe_reset_hw(hw);
- ixv_initialize_transmit_units(adapter);
-
- /* Setup Multicast table */
- ixv_set_multi(adapter);
-
- /*
- ** Determine the correct mbuf pool
- ** for doing jumbo/headersplit
- */
- if (ifp->if_mtu > ETHERMTU)
- adapter->rx_mbuf_sz = MJUMPAGESIZE;
- else
- adapter->rx_mbuf_sz = MCLBYTES;
-
- /* Prepare receive descriptors and buffers */
- if (ixv_setup_receive_structures(adapter)) {
- device_printf(dev,"Could not setup receive structures\n");
- ixv_stop(adapter);
- return;
- }
-
- /* Configure RX settings */
- ixv_initialize_receive_units(adapter);
-
- /* Enable Enhanced MSIX mode */
- gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
- gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
- gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO4)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
- ifp->if_hwassist |= CSUM_SCTP;
-#endif
- }
-
- /* Set MTU size */
- if (ifp->if_mtu > ETHERMTU) {
- mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
- mhadd &= ~IXGBE_MHADD_MFS_MASK;
- mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
- }
-
- /* Set up VLAN offload and filter */
- ixv_setup_vlan_support(adapter);
-
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
-
- /* Set up MSI/X routing */
- ixv_configure_ivars(adapter);
-
- /* Set up auto-mask */
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
-
- /* Set moderation on the Link interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
-
- /* Stats init */
- ixv_init_stats(adapter);
-
- /* Config/Enable Link */
- ixv_config_link(adapter);
-
- /* And now turn on interrupts */
- ixv_enable_intr(adapter);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
- return;
-}
-
-static void
-ixv_init(void *arg)
-{
- struct adapter *adapter = arg;
-
- IXV_CORE_LOCK(adapter);
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
- return;
-}
-
-
-/*
-**
-** MSIX Interrupt Handlers and Tasklets
-**
-*/
-
-static inline void
-ixv_enable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 queue = 1 << vector;
- u32 mask;
-
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline void
-ixv_disable_queue(struct adapter *adapter, u32 vector)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
-
- mask = (IXGBE_EIMS_RTX_QUEUE & queue);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
-}
-
-static inline void
-ixv_rearm_queues(struct adapter *adapter, u64 queues)
-{
- u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
-}
-
-
-static void
-ixv_handle_que(void *context, int pending)
-{
- struct ix_queue *que = context;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct ifnet *ifp = adapter->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixv_rxeof(que, adapter->rx_process_limit);
- IXV_TX_LOCK(txr);
- ixv_txeof(txr);
-#if __FreeBSD_version >= 800000
- if (!drbr_empty(ifp, txr->br))
- ixv_mq_start_locked(ifp, txr, NULL);
-#else
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- ixv_start_locked(txr, ifp);
-#endif
- IXV_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->que_task);
- return;
- }
- }
-
- /* Reenable this interrupt */
- ixv_enable_queue(adapter, que->msix);
- return;
-}
-
-/*********************************************************************
- *
- * MSI Queue Interrupt Service routine
- *
- **********************************************************************/
-void
-ixv_msix_que(void *arg)
-{
- struct ix_queue *que = arg;
- struct adapter *adapter = que->adapter;
- struct tx_ring *txr = que->txr;
- struct rx_ring *rxr = que->rxr;
- bool more_tx, more_rx;
- u32 newitr = 0;
-
- ixv_disable_queue(adapter, que->msix);
- ++que->irqs;
-
- more_rx = ixv_rxeof(que, adapter->rx_process_limit);
-
- IXV_TX_LOCK(txr);
- more_tx = ixv_txeof(txr);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
-#if __FreeBSD_version < 800000
- if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
-#else
- if (!drbr_empty(adapter->ifp, txr->br))
-#endif
- more_tx = 1;
- IXV_TX_UNLOCK(txr);
-
- more_rx = ixv_rxeof(que, adapter->rx_process_limit);
-
- /* Do AIM now? */
-
- if (ixv_enable_aim == FALSE)
- goto no_calc;
- /*
- ** Do Adaptive Interrupt Moderation:
- ** - Write out last calculated setting
- ** - Calculate based on average size over
- ** the last interval.
- */
- if (que->eitr_setting)
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VTEITR(que->msix),
- que->eitr_setting);
-
- que->eitr_setting = 0;
-
- /* Idle, do nothing */
- if ((txr->bytes == 0) && (rxr->bytes == 0))
- goto no_calc;
-
- if ((txr->bytes) && (txr->packets))
- newitr = txr->bytes/txr->packets;
- if ((rxr->bytes) && (rxr->packets))
- newitr = max(newitr,
- (rxr->bytes / rxr->packets));
- newitr += 24; /* account for hardware frame, crc */
-
- /* set an upper boundary */
- newitr = min(newitr, 3000);
-
- /* Be nice to the mid range */
- if ((newitr > 300) && (newitr < 1200))
- newitr = (newitr / 3);
- else
- newitr = (newitr / 2);
-
- newitr |= newitr << 16;
-
- /* save for next interrupt */
- que->eitr_setting = newitr;
-
- /* Reset state */
- txr->bytes = 0;
- txr->packets = 0;
- rxr->bytes = 0;
- rxr->packets = 0;
-
-no_calc:
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->que_task);
- else /* Reenable this interrupt */
- ixv_enable_queue(adapter, que->msix);
- return;
-}
-
-static void
-ixv_msix_mbx(void *arg)
-{
- struct adapter *adapter = arg;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 reg;
-
- ++adapter->mbx_irq;
-
- /* First get the cause */
- reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- /* Clear interrupt with write */
- IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
-
- /* Link status change */
- if (reg & IXGBE_EICR_LSC)
- taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
-static void
-ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
-{
- struct adapter *adapter = ifp->if_softc;
-
- INIT_DEBUGOUT("ixv_media_status: begin");
- IXV_CORE_LOCK(adapter);
- ixv_update_link_status(adapter);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
-
- if (!adapter->link_active) {
- IXV_CORE_UNLOCK(adapter);
- return;
- }
-
- ifmr->ifm_status |= IFM_ACTIVE;
-
- switch (adapter->link_speed) {
- case IXGBE_LINK_SPEED_1GB_FULL:
- ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_10GB_FULL:
- ifmr->ifm_active |= IFM_FDX;
- break;
- }
-
- IXV_CORE_UNLOCK(adapter);
-
- return;
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called when the user changes speed/duplex using
- * media/mediopt option with ifconfig.
- *
- **********************************************************************/
-static int
-ixv_media_change(struct ifnet * ifp)
-{
- struct adapter *adapter = ifp->if_softc;
- struct ifmedia *ifm = &adapter->media;
-
- INIT_DEBUGOUT("ixv_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- switch (IFM_SUBTYPE(ifm->ifm_media)) {
- case IFM_AUTO:
- break;
- default:
- device_printf(adapter->dev, "Only auto media type\n");
- return (EINVAL);
- }
-
- return (0);
-}
-
-/*********************************************************************
- *
- * This routine maps the mbufs to tx descriptors, allowing the
- * TX engine to transmit the packets.
- * - return 0 on success, positive on failure
- *
- **********************************************************************/
-
-static int
-ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
-{
- struct adapter *adapter = txr->adapter;
- u32 olinfo_status = 0, cmd_type_len;
- u32 paylen = 0;
- int i, j, error, nsegs;
- int first, last = 0;
- struct mbuf *m_head;
- bus_dma_segment_t segs[32];
- bus_dmamap_t map;
- struct ixv_tx_buf *txbuf, *txbuf_mapped;
- union ixgbe_adv_tx_desc *txd = NULL;
-
- m_head = *m_headp;
-
- /* Basic descriptor defines */
- cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
-
- if (m_head->m_flags & M_VLANTAG)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
-
- /*
- * Important to capture the first descriptor
- * used because it will contain the index of
- * the one we tell the hardware to report back
- */
- first = txr->next_avail_desc;
- txbuf = &txr->tx_buffers[first];
- txbuf_mapped = txbuf;
- map = txbuf->map;
-
- /*
- * Map the packet for DMA.
- */
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == EFBIG) {
- struct mbuf *m;
-
- m = m_defrag(*m_headp, M_NOWAIT);
- if (m == NULL) {
- adapter->mbuf_defrag_failed++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (ENOBUFS);
- }
- *m_headp = m;
-
- /* Try it again */
- error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
- *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
- if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- adapter->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
- } else if (error == ENOMEM) {
- adapter->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- adapter->no_tx_dma_setup++;
- m_freem(*m_headp);
- *m_headp = NULL;
- return (error);
- }
-
- /* Make certain there are enough descriptors */
- if (nsegs > txr->tx_avail - 2) {
- txr->no_desc_avail++;
- error = ENOBUFS;
- goto xmit_fail;
- }
- m_head = *m_headp;
-
- /*
- ** Set up the appropriate offload context
- ** this becomes the first descriptor of
- ** a packet.
- */
- if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- if (ixv_tso_setup(txr, m_head, &paylen)) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
- olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
- ++adapter->tso_tx;
- } else
- return (ENXIO);
- } else if (ixv_tx_ctx_setup(txr, m_head))
- olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-
- /* Record payload length */
- if (paylen == 0)
- olinfo_status |= m_head->m_pkthdr.len <<
- IXGBE_ADVTXD_PAYLEN_SHIFT;
-
- i = txr->next_avail_desc;
- for (j = 0; j < nsegs; j++) {
- bus_size_t seglen;
- bus_addr_t segaddr;
-
- txbuf = &txr->tx_buffers[i];
- txd = &txr->tx_base[i];
- seglen = segs[j].ds_len;
- segaddr = htole64(segs[j].ds_addr);
-
- txd->read.buffer_addr = segaddr;
- txd->read.cmd_type_len = htole32(txr->txd_cmd |
- cmd_type_len |seglen);
- txd->read.olinfo_status = htole32(olinfo_status);
- last = i; /* descriptor that will get completion IRQ */
-
- if (++i == adapter->num_tx_desc)
- i = 0;
-
- txbuf->m_head = NULL;
- txbuf->eop_index = -1;
- }
-
- txd->read.cmd_type_len |=
- htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
- txr->tx_avail -= nsegs;
- txr->next_avail_desc = i;
-
- txbuf->m_head = m_head;
- txr->tx_buffers[first].map = txbuf->map;
- txbuf->map = map;
- bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
-
- /* Set the index of the descriptor that will be marked done */
- txbuf = &txr->tx_buffers[first];
- txbuf->eop_index = last;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /*
- * Advance the Transmit Descriptor Tail (Tdt), this tells the
- * hardware that this frame is available to transmit.
- */
- ++txr->total_packets;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
-
- return (0);
-
-xmit_fail:
- bus_dmamap_unload(txr->txtag, txbuf->map);
- return (error);
-
-}
-
-
-/*********************************************************************
- * Multicast Update
- *
- * This routine is called whenever multicast address list is updated.
- *
- **********************************************************************/
-#define IXGBE_RAR_ENTRIES 16
-
-static void
-ixv_set_multi(struct adapter *adapter)
-{
- u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 *update_ptr;
- struct ifmultiaddr *ifma;
- int mcnt = 0;
- struct ifnet *ifp = adapter->ifp;
-
- IOCTL_DEBUGOUT("ixv_set_multi: begin");
-
-#if __FreeBSD_version < 800000
- IF_ADDR_LOCK(ifp);
-#else
- if_maddr_rlock(ifp);
-#endif
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
- &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
- IXGBE_ETH_LENGTH_OF_ADDRESS);
- mcnt++;
- }
-#if __FreeBSD_version < 800000
- IF_ADDR_UNLOCK(ifp);
-#else
- if_maddr_runlock(ifp);
-#endif
-
- update_ptr = mta;
-
- ixgbe_update_mc_addr_list(&adapter->hw,
- update_ptr, mcnt, ixv_mc_array_itr, TRUE);
-
- return;
-}
-
-/*
- * This is an iterator function now needed by the multicast
- * shared code. It simply feeds the shared code routine the
- * addresses in the array of ixv_set_multi() one by one.
- */
-static u8 *
-ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
-{
- u8 *addr = *update_ptr;
- u8 *newptr;
- *vmdq = 0;
-
- newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
- *update_ptr = newptr;
- return addr;
-}
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- **********************************************************************/
-
-static void
-ixv_local_timer(void *arg)
-{
- struct adapter *adapter = arg;
- device_t dev = adapter->dev;
- struct tx_ring *txr = adapter->tx_rings;
- int i;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- ixv_update_link_status(adapter);
-
- /* Stats Update */
- ixv_update_stats(adapter);
-
- /*
- * If the interface has been paused
- * then don't do the watchdog check
- */
- if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
- goto out;
- /*
- ** Check for time since any descriptor was cleaned
- */
- for (i = 0; i < adapter->num_queues; i++, txr++) {
- IXV_TX_LOCK(txr);
- if (txr->watchdog_check == FALSE) {
- IXV_TX_UNLOCK(txr);
- continue;
- }
- if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
- goto hung;
- IXV_TX_UNLOCK(txr);
- }
-out:
- ixv_rearm_queues(adapter, adapter->que_mask);
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
- return;
-
-hung:
- device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
- IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
- IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
- device_printf(dev,"TX(%d) desc avail = %d,"
- "Next TX to Clean = %d\n",
- txr->me, txr->tx_avail, txr->next_to_clean);
- adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- adapter->watchdog_events++;
- IXV_TX_UNLOCK(txr);
- ixv_init_locked(adapter);
-}
-
-/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
-static void
-ixv_update_link_status(struct adapter *adapter)
-{
- struct ifnet *ifp = adapter->ifp;
- struct tx_ring *txr = adapter->tx_rings;
- device_t dev = adapter->dev;
-
-
- if (adapter->link_up){
- if (adapter->link_active == FALSE) {
- if (bootverbose)
- device_printf(dev,"Link is up %d Gbps %s \n",
- ((adapter->link_speed == 128)? 10:1),
- "Full Duplex");
- adapter->link_active = TRUE;
- if_link_state_change(ifp, LINK_STATE_UP);
- }
- } else { /* Link down */
- if (adapter->link_active == TRUE) {
- if (bootverbose)
- device_printf(dev,"Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- adapter->link_active = FALSE;
- for (int i = 0; i < adapter->num_queues;
- i++, txr++)
- txr->watchdog_check = FALSE;
- }
- }
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-ixv_stop(void *arg)
-{
- struct ifnet *ifp;
- struct adapter *adapter = arg;
- struct ixgbe_hw *hw = &adapter->hw;
- ifp = adapter->ifp;
-
- mtx_assert(&adapter->core_mtx, MA_OWNED);
-
- INIT_DEBUGOUT("ixv_stop: begin\n");
- ixv_disable_intr(adapter);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- ixgbe_reset_hw(hw);
- adapter->hw.adapter_stopped = FALSE;
- ixgbe_stop_adapter(hw);
- callout_stop(&adapter->timer);
-
- /* reprogram the RAR[0] in case user changed it. */
- ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * Determine hardware revision.
- *
- **********************************************************************/
-static void
-ixv_identify_hardware(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- u16 pci_cmd_word;
-
- /*
- ** Make sure BUSMASTER is set, on a VM under
- ** KVM it may not be and will break things.
- */
- pci_enable_busmaster(dev);
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
-
- /* Save off the information about this board */
- adapter->hw.vendor_id = pci_get_vendor(dev);
- adapter->hw.device_id = pci_get_device(dev);
- adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
- adapter->hw.subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- adapter->hw.subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- return;
-}
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers
- *
- **********************************************************************/
-static int
-ixv_allocate_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que = adapter->queues;
- int error, rid, vector = 0;
-
- for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
- rid = vector + 1;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev,"Unable to allocate"
- " bus resource: que interrupt [%d]\n", vector);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixv_msix_que, que, &que->tag);
- if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register QUE handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
-#endif
- que->msix = vector;
- adapter->que_mask |= (u64)(1 << que->msix);
- /*
- ** Bind the msix vector, and thus the
- ** ring to the corresponding cpu.
- */
- if (adapter->num_queues > 1)
- bus_bind_intr(dev, que->res, i);
-
- TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
- que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(adapter->dev));
- }
-
- /* and Mailbox */
- rid = vector + 1;
- adapter->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res) {
- device_printf(dev,"Unable to allocate"
- " bus resource: MBX interrupt [%d]\n", rid);
- return (ENXIO);
- }
- /* Set the mbx handler function */
- error = bus_setup_intr(dev, adapter->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixv_msix_mbx, adapter, &adapter->tag);
- if (error) {
- adapter->res = NULL;
- device_printf(dev, "Failed to register LINK handler");
- return (error);
- }
-#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
-#endif
- adapter->mbxvec = vector;
- /* Tasklets for Mailbox */
- TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
- adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
- taskqueue_thread_enqueue, &adapter->tq);
- taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
- device_get_nameunit(adapter->dev));
- /*
- ** Due to a broken design QEMU will fail to properly
- ** enable the guest for MSIX unless the vectors in
- ** the table are all set up, so we must rewrite the
- ** ENABLE in the MSIX control register again at this
- ** point to cause it to successfully initialize us.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
- int msix_ctrl;
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- return (0);
-}
-
-/*
- * Setup MSIX resources, note that the VF
- * device MUST use MSIX, there is no fallback.
- */
-static int
-ixv_setup_msix(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- int rid, want;
-
-
- /* First try MSI/X */
- rid = PCIR_BAR(3);
- adapter->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (adapter->msix_mem == NULL) {
- device_printf(adapter->dev,
- "Unable to map MSIX table \n");
- goto out;
- }
-
- /*
- ** Want two vectors: one for a queue,
- ** plus an additional for mailbox.
- */
- want = 2;
- if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
- device_printf(adapter->dev,
- "Using MSIX interrupts with %d vectors\n", want);
- return (want);
- }
- /* Release in case alloc was insufficient */
- pci_release_msi(dev);
-out:
- if (adapter->msix_mem != NULL) {
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, adapter->msix_mem);
- adapter->msix_mem = NULL;
- }
- device_printf(adapter->dev,"MSIX config error\n");
- return (ENXIO);
-}
-
-
-static int
-ixv_allocate_pci_resources(struct adapter *adapter)
-{
- int rid;
- device_t dev = adapter->dev;
-
- rid = PCIR_BAR(0);
- adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(adapter->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
- return (ENXIO);
- }
-
- adapter->osdep.mem_bus_space_tag =
- rman_get_bustag(adapter->pci_mem);
- adapter->osdep.mem_bus_space_handle =
- rman_get_bushandle(adapter->pci_mem);
- adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
-
- adapter->num_queues = 1;
- adapter->hw.back = &adapter->osdep;
-
- /*
- ** Now setup MSI/X, should
- ** return us the number of
- ** configured vectors.
- */
- adapter->msix = ixv_setup_msix(adapter);
- if (adapter->msix == ENXIO)
- return (ENXIO);
- else
- return (0);
-}
-
-static void
-ixv_free_pci_resources(struct adapter * adapter)
-{
- struct ix_queue *que = adapter->queues;
- device_t dev = adapter->dev;
- int rid, memrid;
-
- memrid = PCIR_BAR(MSIX_BAR);
-
- /*
- ** There is a slight possibility of a failure mode
- ** in attach that will result in entering this function
- ** before interrupt resources have been initialized, and
- ** in that case we do not want to execute the loops below
- ** We can detect this reliably by the state of the adapter
- ** res pointer.
- */
- if (adapter->res == NULL)
- goto mem;
-
- /*
- ** Release all msix queue resources:
- */
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- }
-
-
- /* Clean the Legacy or Link interrupt last */
- if (adapter->mbxvec) /* we are doing MSIX */
- rid = adapter->mbxvec + 1;
- else
- (adapter->msix != 0) ? (rid = 1):(rid = 0);
-
- if (adapter->tag != NULL) {
- bus_teardown_intr(dev, adapter->res, adapter->tag);
- adapter->tag = NULL;
- }
- if (adapter->res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
-
-mem:
- if (adapter->msix)
- pci_release_msi(dev);
-
- if (adapter->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, adapter->msix_mem);
-
- if (adapter->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(0), adapter->pci_mem);
-
- return;
-}
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static void
-ixv_setup_interface(device_t dev, struct adapter *adapter)
-{
- struct ifnet *ifp;
-
- INIT_DEBUGOUT("ixv_setup_interface: begin");
-
- ifp = adapter->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_baudrate = 1000000000;
- ifp->if_init = ixv_init;
- ifp->if_softc = adapter;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixv_ioctl;
-#if __FreeBSD_version >= 800000
- ifp->if_transmit = ixv_mq_start;
- ifp->if_qflush = ixv_qflush;
-#else
- ifp->if_start = ixv_start;
-#endif
- ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
-
- ether_ifattach(ifp, adapter->hw.mac.addr);
-
- adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU;
- ifp->if_capenable = ifp->if_capabilities;
-
- /* Don't enable LRO by default */
- ifp->if_capabilities |= IFCAP_LRO;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
- ixv_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
-
- return;
-}
-
-static void
-ixv_config_link(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg, err = 0;
-
- if (hw->mac.ops.check_link)
- err = hw->mac.ops.check_link(hw, &autoneg,
- &adapter->link_up, FALSE);
- if (err)
- goto out;
-
- if (hw->mac.ops.setup_link)
- err = hw->mac.ops.setup_link(hw,
- autoneg, adapter->link_up);
-out:
- return;
-}
-
-/********************************************************************
- * Manage DMA'able memory.
- *******************************************************************/
-static void
-ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
-{
- if (error)
- return;
- *(bus_addr_t *) arg = segs->ds_addr;
- return;
-}
-
-static int
-ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
- struct ixv_dma_alloc *dma, int mapflags)
-{
- device_t dev = adapter->dev;
- int r;
-
- r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
- DBA_ALIGN, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- size, /* maxsize */
- 1, /* nsegments */
- size, /* maxsegsize */
- BUS_DMA_ALLOCNOW, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &dma->dma_tag);
- if (r != 0) {
- device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
- "error %u\n", r);
- goto fail_0;
- }
- r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
- BUS_DMA_NOWAIT, &dma->dma_map);
- if (r != 0) {
- device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
- "error %u\n", r);
- goto fail_1;
- }
- r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
- size,
- ixv_dmamap_cb,
- &dma->dma_paddr,
- mapflags | BUS_DMA_NOWAIT);
- if (r != 0) {
- device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
- "error %u\n", r);
- goto fail_2;
- }
- dma->dma_size = size;
- return (0);
-fail_2:
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
-fail_1:
- bus_dma_tag_destroy(dma->dma_tag);
-fail_0:
- dma->dma_tag = NULL;
- return (r);
-}
-
-static void
-ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
-{
- bus_dmamap_sync(dma->dma_tag, dma->dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(dma->dma_tag, dma->dma_map);
- bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
- bus_dma_tag_destroy(dma->dma_tag);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for the transmit and receive rings, and then
- * the descriptors associated with each, called only once at attach.
- *
- **********************************************************************/
-static int
-ixv_allocate_queues(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ix_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize, error = 0;
- int txconf = 0, rxconf = 0;
-
- /* First allocate the top level queue structs */
- if (!(adapter->queues =
- (struct ix_queue *) malloc(sizeof(struct ix_queue) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* First allocate the TX ring struct memory */
- if (!(adapter->tx_rings =
- (struct tx_ring *) malloc(sizeof(struct tx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate TX ring memory\n");
- error = ENOMEM;
- goto tx_fail;
- }
-
- /* Next allocate the RX */
- if (!(adapter->rx_rings =
- (struct rx_ring *) malloc(sizeof(struct rx_ring) *
- adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate RX ring memory\n");
- error = ENOMEM;
- goto rx_fail;
- }
-
- /* For the ring itself */
- tsize = roundup2(adapter->num_tx_desc *
- sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
-
- /*
- * Now set up the TX queues, txconf is needed to handle the
- * possibility that things fail midcourse and we need to
- * undo memory gracefully
- */
- for (int i = 0; i < adapter->num_queues; i++, txconf++) {
- /* Set up some basics */
- txr = &adapter->tx_rings[i];
- txr->adapter = adapter;
- txr->me = i;
-
- /* Initialize the TX side lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), txr->me);
- mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
-
- if (ixv_dma_malloc(adapter, tsize,
- &txr->txdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
- txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
- bzero((void *)txr->tx_base, tsize);
-
- /* Now allocate transmit buffers for the ring */
- if (ixv_allocate_transmit_buffers(txr)) {
- device_printf(dev,
- "Critical Failure setting up transmit buffers\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#if __FreeBSD_version >= 800000
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
- M_WAITOK, &txr->tx_mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up buf ring\n");
- error = ENOMEM;
- goto err_tx_desc;
- }
-#endif
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
- rxr = &adapter->rx_rings[i];
- /* Set up some basics */
- rxr->adapter = adapter;
- rxr->me = i;
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), rxr->me);
- mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (ixv_dma_malloc(adapter, rsize,
- &rxr->rxdma, BUS_DMA_NOWAIT)) {
- device_printf(dev,
- "Unable to allocate RxDescriptor memory\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
- bzero((void *)rxr->rx_base, rsize);
-
- /* Allocate receive buffers for the ring*/
- if (ixv_allocate_receive_buffers(rxr)) {
- device_printf(dev,
- "Critical Failure setting up receive buffers\n");
- error = ENOMEM;
- goto err_rx_desc;
- }
- }
-
- /*
- ** Finally set up the queue holding structs
- */
- for (int i = 0; i < adapter->num_queues; i++) {
- que = &adapter->queues[i];
- que->adapter = adapter;
- que->txr = &adapter->tx_rings[i];
- que->rxr = &adapter->rx_rings[i];
- }
-
- return (0);
-
-err_rx_desc:
- for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
- ixv_dma_free(adapter, &rxr->rxdma);
-err_tx_desc:
- for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
- ixv_dma_free(adapter, &txr->txdma);
- free(adapter->rx_rings, M_DEVBUF);
-rx_fail:
- free(adapter->tx_rings, M_DEVBUF);
-tx_fail:
- free(adapter->queues, M_DEVBUF);
-fail:
- return (error);
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for tx_buffer structures. The tx_buffer stores all
- * the information needed to transmit a packet on the wire. This is
- * called only once at attach, setup is done every reset.
- *
- **********************************************************************/
-static int
-ixv_allocate_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- device_t dev = adapter->dev;
- struct ixv_tx_buf *txbuf;
- int error, i;
-
- /*
- * Setup DMA descriptor areas.
- */
- if ((error = bus_dma_tag_create(
- bus_get_dma_tag(adapter->dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- IXV_TSO_SIZE, /* maxsize */
- 32, /* nsegments */
- PAGE_SIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &txr->txtag))) {
- device_printf(dev,"Unable to allocate TX DMA tag\n");
- goto fail;
- }
-
- if (!(txr->tx_buffers =
- (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
- adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate tx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- /* Create the descriptor buffer dma maps */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
- if (error != 0) {
- device_printf(dev, "Unable to create TX DMA map\n");
- goto fail;
- }
- }
-
- return 0;
-fail:
- /* We free all, it handles case where we are in the middle */
- ixv_free_transmit_structures(adapter);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize a transmit ring.
- *
- **********************************************************************/
-static void
-ixv_setup_transmit_ring(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixv_tx_buf *txbuf;
- int i;
-
- /* Clear the old ring contents */
- IXV_TX_LOCK(txr);
- bzero((void *)txr->tx_base,
- (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
- /* Reset indices */
- txr->next_avail_desc = 0;
- txr->next_to_clean = 0;
-
- /* Free any existing tx buffers. */
- txbuf = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
- if (txbuf->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, txbuf->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag, txbuf->map);
- m_freem(txbuf->m_head);
- txbuf->m_head = NULL;
- }
- /* Clear the EOP index */
- txbuf->eop_index = -1;
- }
-
- /* Set number of descriptors available */
- txr->tx_avail = adapter->num_tx_desc;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- IXV_TX_UNLOCK(txr);
-}
-
-/*********************************************************************
- *
- * Initialize all transmit rings.
- *
- **********************************************************************/
-static int
-ixv_setup_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++)
- ixv_setup_transmit_ring(txr);
-
- return (0);
-}
-
-/*********************************************************************
- *
- * Enable transmit unit.
- *
- **********************************************************************/
-static void
-ixv_initialize_transmit_units(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
-
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- u64 tdba = txr->txdma.dma_paddr;
- u32 txctrl, txdctl;
-
- /* Set WTHRESH to 8, burst writeback */
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
- /* Now enable */
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
-
- /* Set the HW Tx Head and Tail indices */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
-
- /* Setup Transmit Descriptor Cmd Settings */
- txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
- txr->watchdog_check = FALSE;
-
- /* Set Ring parameters */
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
- (tdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
- adapter->num_tx_desc *
- sizeof(struct ixgbe_legacy_tx_desc));
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
- break;
- }
-
- return;
-}
-
-/*********************************************************************
- *
- * Free all transmit rings.
- *
- **********************************************************************/
-static void
-ixv_free_transmit_structures(struct adapter *adapter)
-{
- struct tx_ring *txr = adapter->tx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- IXV_TX_LOCK(txr);
- ixv_free_transmit_buffers(txr);
- ixv_dma_free(adapter, &txr->txdma);
- IXV_TX_UNLOCK(txr);
- IXV_TX_LOCK_DESTROY(txr);
- }
- free(adapter->tx_rings, M_DEVBUF);
-}
-
-/*********************************************************************
- *
- * Free transmit ring related data structures.
- *
- **********************************************************************/
-static void
-ixv_free_transmit_buffers(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ixv_tx_buf *tx_buffer;
- int i;
-
- INIT_DEBUGOUT("free_transmit_ring: begin");
-
- if (txr->tx_buffers == NULL)
- return;
-
- tx_buffer = txr->tx_buffers;
- for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->m_head != NULL) {
- bus_dmamap_sync(txr->txtag, tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- } else if (tx_buffer->map != NULL) {
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- bus_dmamap_destroy(txr->txtag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
- }
-#if __FreeBSD_version >= 800000
- if (txr->br != NULL)
- buf_ring_free(txr->br, M_DEVBUF);
-#endif
- if (txr->tx_buffers != NULL) {
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
- if (txr->txtag != NULL) {
- bus_dma_tag_destroy(txr->txtag);
- txr->txtag = NULL;
- }
- return;
-}
-
-/*********************************************************************
- *
- * Advanced Context Descriptor setup for VLAN or CSUM
- *
- **********************************************************************/
-
-static bool
-ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ixv_tx_buf *tx_buffer;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct ip6_hdr *ip6;
- int ehdrlen, ip_hlen = 0;
- u16 etype;
- u8 ipproto = 0;
- bool offload = TRUE;
- int ctxd = txr->next_avail_desc;
- u16 vtag = 0;
-
-
- if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
- offload = FALSE;
-
-
- tx_buffer = &txr->tx_buffers[ctxd];
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- /*
- ** In advanced descriptors the vlan tag must
- ** be placed into the descriptor itself.
- */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- } else if (offload == FALSE)
- return FALSE;
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present,
- * helpful for QinQ too.
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- etype = ntohs(eh->evl_proto);
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- } else {
- etype = ntohs(eh->evl_encap_proto);
- ehdrlen = ETHER_HDR_LEN;
- }
-
- /* Set the ether header length */
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
-
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
- if (mp->m_len < ehdrlen + ip_hlen)
- return (FALSE);
- ipproto = ip->ip_p;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- break;
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr);
- if (mp->m_len < ehdrlen + ip_hlen)
- return (FALSE);
- ipproto = ip6->ip6_nxt;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
- default:
- offload = FALSE;
- break;
- }
-
- vlan_macip_lens |= ip_hlen;
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
-
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
-
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & CSUM_UDP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- break;
-
-#if __FreeBSD_version >= 800000
- case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
-#endif
- default:
- offload = FALSE;
- break;
- }
-
- /* Now copy bits into descriptor */
- TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
- TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
- TXD->seqnum_seed = htole32(0);
- TXD->mss_l4len_idx = htole32(0);
-
- tx_buffer->m_head = NULL;
- tx_buffer->eop_index = -1;
-
- /* We've consumed the first desc, adjust counters */
- if (++ctxd == adapter->num_tx_desc)
- ctxd = 0;
- txr->next_avail_desc = ctxd;
- --txr->tx_avail;
-
- return (offload);
-}
-
-/**********************************************************************
- *
- * Setup work for hardware segmentation offload (TSO) on
- * adapters using advanced tx descriptors
- *
- **********************************************************************/
-static bool
-ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
-{
- struct adapter *adapter = txr->adapter;
- struct ixgbe_adv_tx_context_desc *TXD;
- struct ixv_tx_buf *tx_buffer;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- u32 mss_l4len_idx = 0;
- u16 vtag = 0;
- int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
- struct ether_vlan_header *eh;
- struct ip *ip;
- struct tcphdr *th;
-
-
- /*
- * Determine where frame payload starts.
- * Jump over vlan headers if already present
- */
- eh = mtod(mp, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
- ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- else
- ehdrlen = ETHER_HDR_LEN;
-
- /* Ensure we have at least the IP+TCP header in the first mbuf. */
- if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
- return FALSE;
-
- ctxd = txr->next_avail_desc;
- tx_buffer = &txr->tx_buffers[ctxd];
- TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
-
- ip = (struct ip *)(mp->m_data + ehdrlen);
- if (ip->ip_p != IPPROTO_TCP)
- return FALSE; /* 0 */
- ip->ip_sum = 0;
- ip_hlen = ip->ip_hl << 2;
- th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
- th->th_sum = in_pseudo(ip->ip_src.s_addr,
- ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- tcp_hlen = th->th_off << 2;
- hdrlen = ehdrlen + ip_hlen + tcp_hlen;
-
- /* This is used in the transmit desc in encap */
- *paylen = mp->m_pkthdr.len - hdrlen;
-
- /* VLAN MACLEN IPLEN */
- if (mp->m_flags & M_VLANTAG) {
- vtag = htole16(mp->m_pkthdr.ether_vtag);
- vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- }
-
- vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= ip_hlen;
- TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
-
- /* ADV DTYPE TUCMD */
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
-
-
- /* MSS L4LEN IDX */
- mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
- TXD->mss_l4len_idx = htole32(mss_l4len_idx);
-
- TXD->seqnum_seed = htole32(0);
- tx_buffer->m_head = NULL;
- tx_buffer->eop_index = -1;
-
- if (++ctxd == adapter->num_tx_desc)
- ctxd = 0;
-
- txr->tx_avail--;
- txr->next_avail_desc = ctxd;
- return TRUE;
-}
-
-
-/**********************************************************************
- *
- * Examine each tx_buffer in the used queue. If the hardware is done
- * processing the packet then free associated resources. The
- * tx_buffer is put back on the free queue.
- *
- **********************************************************************/
-static bool
-ixv_txeof(struct tx_ring *txr)
-{
- struct adapter *adapter = txr->adapter;
- struct ifnet *ifp = adapter->ifp;
- u32 first, last, done;
- struct ixv_tx_buf *tx_buffer;
- struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
-
- mtx_assert(&txr->tx_mtx, MA_OWNED);
-
- if (txr->tx_avail == adapter->num_tx_desc)
- return FALSE;
-
- first = txr->next_to_clean;
- tx_buffer = &txr->tx_buffers[first];
- /* For cleanup we just use legacy struct */
- tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
- last = tx_buffer->eop_index;
- if (last == -1)
- return FALSE;
- eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
-
- /*
- ** Get the index of the first descriptor
- ** BEYOND the EOP and call that 'done'.
- ** I do this so the comparison in the
- ** inner while loop below can be simple
- */
- if (++last == adapter->num_tx_desc) last = 0;
- done = last;
-
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_POSTREAD);
- /*
- ** Only the EOP descriptor of a packet now has the DD
- ** bit set, this is what we look for...
- */
- while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
- /* We clean the range of the packet */
- while (first != done) {
- tx_desc->upper.data = 0;
- tx_desc->lower.data = 0;
- tx_desc->buffer_addr = 0;
- ++txr->tx_avail;
-
- if (tx_buffer->m_head) {
- bus_dmamap_sync(txr->txtag,
- tx_buffer->map,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txtag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- tx_buffer->map = NULL;
- }
- tx_buffer->eop_index = -1;
- txr->watchdog_time = ticks;
-
- if (++first == adapter->num_tx_desc)
- first = 0;
-
- tx_buffer = &txr->tx_buffers[first];
- tx_desc =
- (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
- }
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- /* See if there is more work now */
- last = tx_buffer->eop_index;
- if (last != -1) {
- eop_desc =
- (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
- /* Get next done point */
- if (++last == adapter->num_tx_desc) last = 0;
- done = last;
- } else
- break;
- }
- bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- txr->next_to_clean = first;
-
- /*
- * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
- * it is OK to send packets. If there are no pending descriptors,
- * clear the timeout. Otherwise, if some descriptors have been freed,
- * restart the timeout.
- */
- if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (txr->tx_avail == adapter->num_tx_desc) {
- txr->watchdog_check = FALSE;
- return FALSE;
- }
- }
-
- return TRUE;
-}
-
-/*********************************************************************
- *
- * Refresh mbuf buffers for RX descriptor rings
- * - now keeps its own state so discards due to resource
- * exhaustion are unnecessary, if an mbuf cannot be obtained
- * it just returns, keeping its placeholder, thus it can simply
- * be recalled to try again.
- *
- **********************************************************************/
-static void
-ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
-{
- struct adapter *adapter = rxr->adapter;
- bus_dma_segment_t hseg[1];
- bus_dma_segment_t pseg[1];
- struct ixv_rx_buf *rxbuf;
- struct mbuf *mh, *mp;
- int i, j, nsegs, error;
- bool refreshed = FALSE;
-
- i = j = rxr->next_to_refresh;
- /* Get the control variable, one beyond refresh point */
- if (++j == adapter->num_rx_desc)
- j = 0;
- while (j != limit) {
- rxbuf = &rxr->rx_buffers[i];
- if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
- mh = m_gethdr(M_NOWAIT, MT_DATA);
- if (mh == NULL)
- goto update;
- mh->m_pkthdr.len = mh->m_len = MHLEN;
- mh->m_len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- m_adj(mh, ETHER_ALIGN);
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mh);
- goto update;
- }
- rxbuf->m_head = mh;
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.hdr_addr =
- htole64(hseg[0].ds_addr);
- }
-
- if (rxbuf->m_pack == NULL) {
- mp = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (mp == NULL)
- goto update;
- } else
- mp = rxbuf->m_pack;
-
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) {
- printf("GET BUF: dmamap load"
- " failure - %d\n", error);
- m_free(mp);
- rxbuf->m_pack = NULL;
- goto update;
- }
- rxbuf->m_pack = mp;
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_PREREAD);
- rxr->rx_base[i].read.pkt_addr =
- htole64(pseg[0].ds_addr);
-
- refreshed = TRUE;
- rxr->next_to_refresh = i = j;
- /* Calculate next index */
- if (++j == adapter->num_rx_desc)
- j = 0;
- }
-update:
- if (refreshed) /* update tail index */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
- return;
-}
-
-/*********************************************************************
- *
- * Allocate memory for rx_buffer structures. Since we use one
- * rx_buffer per received packet, the maximum number of rx_buffer's
- * that we'll need is equal to the number of receive descriptors
- * that we've allocated.
- *
- **********************************************************************/
-static int
-ixv_allocate_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- device_t dev = adapter->dev;
- struct ixv_rx_buf *rxbuf;
- int i, bsize, error;
-
- bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
- if (!(rxr->rx_buffers =
- (struct ixv_rx_buf *) malloc(bsize,
- M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate rx_buffer memory\n");
- error = ENOMEM;
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MSIZE, /* maxsize */
- 1, /* nsegments */
- MSIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->htag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
- 1, 0, /* alignment, bounds */
- BUS_SPACE_MAXADDR, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- MJUMPAGESIZE, /* maxsize */
- 1, /* nsegments */
- MJUMPAGESIZE, /* maxsegsize */
- 0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockfuncarg */
- &rxr->ptag))) {
- device_printf(dev, "Unable to create RX DMA tag\n");
- goto fail;
- }
-
- for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
- rxbuf = &rxr->rx_buffers[i];
- error = bus_dmamap_create(rxr->htag,
- BUS_DMA_NOWAIT, &rxbuf->hmap);
- if (error) {
- device_printf(dev, "Unable to create RX head map\n");
- goto fail;
- }
- error = bus_dmamap_create(rxr->ptag,
- BUS_DMA_NOWAIT, &rxbuf->pmap);
- if (error) {
- device_printf(dev, "Unable to create RX pkt map\n");
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- /* Frees all, but can handle partial completion */
- ixv_free_receive_structures(adapter);
- return (error);
-}
-
-static void
-ixv_free_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ixv_rx_buf *rxbuf;
- int i;
-
- adapter = rxr->adapter;
- for (i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, rxbuf->hmap);
- rxbuf->m_head->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_head);
- }
- if (rxbuf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->m_pack->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_pack);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- }
-}
-
-
-/*********************************************************************
- *
- * Initialize a receive ring and its buffers.
- *
- **********************************************************************/
-static int
-ixv_setup_receive_ring(struct rx_ring *rxr)
-{
- struct adapter *adapter;
- struct ifnet *ifp;
- device_t dev;
- struct ixv_rx_buf *rxbuf;
- bus_dma_segment_t pseg[1], hseg[1];
- struct lro_ctrl *lro = &rxr->lro;
- int rsize, nsegs, error = 0;
-
- adapter = rxr->adapter;
- ifp = adapter->ifp;
- dev = adapter->dev;
-
- /* Clear the ring contents */
- IXV_RX_LOCK(rxr);
- rsize = roundup2(adapter->num_rx_desc *
- sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
- bzero((void *)rxr->rx_base, rsize);
-
- /* Free current RX buffer structs and their mbufs */
- ixv_free_receive_ring(rxr);
-
- /* Configure header split? */
- if (ixv_header_split)
- rxr->hdr_split = TRUE;
-
- /* Now replenish the mbufs */
- for (int j = 0; j != adapter->num_rx_desc; ++j) {
- struct mbuf *mh, *mp;
-
- rxbuf = &rxr->rx_buffers[j];
- /*
- ** Dont allocate mbufs if not
- ** doing header split, its wasteful
- */
- if (rxr->hdr_split == FALSE)
- goto skip_head;
-
- /* First the header */
- rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
- if (rxbuf->m_head == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- m_adj(rxbuf->m_head, ETHER_ALIGN);
- mh = rxbuf->m_head;
- mh->m_len = mh->m_pkthdr.len = MHLEN;
- mh->m_flags |= M_PKTHDR;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->htag,
- rxbuf->hmap, rxbuf->m_head, hseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0) /* Nothing elegant to do here */
- goto fail;
- bus_dmamap_sync(rxr->htag,
- rxbuf->hmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
- /* Now the payload cluster */
- rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
- M_PKTHDR, adapter->rx_mbuf_sz);
- if (rxbuf->m_pack == NULL) {
- error = ENOBUFS;
- goto fail;
- }
- mp = rxbuf->m_pack;
- mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
- /* Get the memory mapping */
- error = bus_dmamap_load_mbuf_sg(rxr->ptag,
- rxbuf->pmap, mp, pseg,
- &nsegs, BUS_DMA_NOWAIT);
- if (error != 0)
- goto fail;
- bus_dmamap_sync(rxr->ptag,
- rxbuf->pmap, BUS_DMASYNC_PREREAD);
- /* Update descriptor */
- rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
- }
-
-
- /* Setup our descriptor indices */
- rxr->next_to_check = 0;
- rxr->next_to_refresh = 0;
- rxr->lro_enabled = FALSE;
- rxr->rx_split_packets = 0;
- rxr->rx_bytes = 0;
- rxr->discard = FALSE;
-
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /*
- ** Now set up the LRO interface:
- */
- if (ifp->if_capenable & IFCAP_LRO) {
- int err = tcp_lro_init(lro);
- if (err) {
- device_printf(dev, "LRO Initialization failed!\n");
- goto fail;
- }
- INIT_DEBUGOUT("RX Soft LRO Initialized\n");
- rxr->lro_enabled = TRUE;
- lro->ifp = adapter->ifp;
- }
-
- IXV_RX_UNLOCK(rxr);
- return (0);
-
-fail:
- ixv_free_receive_ring(rxr);
- IXV_RX_UNLOCK(rxr);
- return (error);
-}
-
-/*********************************************************************
- *
- * Initialize all receive rings.
- *
- **********************************************************************/
-static int
-ixv_setup_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- int j;
-
- for (j = 0; j < adapter->num_queues; j++, rxr++)
- if (ixv_setup_receive_ring(rxr))
- goto fail;
-
- return (0);
-fail:
- /*
- * Free RX buffers allocated so far, we will only handle
- * the rings that completed, the failing case will have
- * cleaned up for itself. 'j' failed, so its the terminus.
- */
- for (int i = 0; i < j; ++i) {
- rxr = &adapter->rx_rings[i];
- ixv_free_receive_ring(rxr);
- }
-
- return (ENOBUFS);
-}
-
-/*********************************************************************
- *
- * Setup receive registers and features.
- *
- **********************************************************************/
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-
-static void
-ixv_initialize_receive_units(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- u32 bufsz, fctrl, rxcsum, hlreg;
-
-
- /* Enable broadcasts */
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF;
- fctrl |= IXGBE_FCTRL_PMCF;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
- /* Set for Jumbo Frames? */
- hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (ifp->if_mtu > ETHERMTU) {
- hlreg |= IXGBE_HLREG0_JUMBOEN;
- bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- } else {
- hlreg &= ~IXGBE_HLREG0_JUMBOEN;
- bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- u64 rdba = rxr->rxdma.dma_paddr;
- u32 reg, rxdctl;
-
- /* Do the queue enabling first */
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
- for (int k = 0; k < 10; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
- IXGBE_RXDCTL_ENABLE)
- break;
- else
- msec_delay(1);
- }
- wmb();
-
- /* Setup the Base and Length of the Rx Descriptor Ring */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
- (rdba & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
- (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
- adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
-
- /* Set up the SRRCTL register */
- reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
- reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- reg |= bufsz;
- if (rxr->hdr_split) {
- /* Use a standard mbuf for the header */
- reg |= ((IXV_RX_HDR <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
- & IXGBE_SRRCTL_BSIZEHDR_MASK);
- reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
- reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
-
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
- adapter->num_rx_desc - 1);
- }
-
- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
-
- if (ifp->if_capenable & IFCAP_RXCSUM)
- rxcsum |= IXGBE_RXCSUM_PCSD;
-
- if (!(rxcsum & IXGBE_RXCSUM_PCSD))
- rxcsum |= IXGBE_RXCSUM_IPPCSE;
-
- IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-
- return;
-}
-
-/*********************************************************************
- *
- * Free all receive rings.
- *
- **********************************************************************/
-static void
-ixv_free_receive_structures(struct adapter *adapter)
-{
- struct rx_ring *rxr = adapter->rx_rings;
-
- for (int i = 0; i < adapter->num_queues; i++, rxr++) {
- struct lro_ctrl *lro = &rxr->lro;
- ixv_free_receive_buffers(rxr);
- /* Free LRO memory */
- tcp_lro_free(lro);
- /* Free the ring memory as well */
- ixv_dma_free(adapter, &rxr->rxdma);
- }
-
- free(adapter->rx_rings, M_DEVBUF);
-}
-
-
-/*********************************************************************
- *
- * Free receive ring data structures
- *
- **********************************************************************/
-static void
-ixv_free_receive_buffers(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
- struct ixv_rx_buf *rxbuf;
-
- INIT_DEBUGOUT("free_receive_structures: begin");
-
- /* Cleanup any existing buffers */
- if (rxr->rx_buffers != NULL) {
- for (int i = 0; i < adapter->num_rx_desc; i++) {
- rxbuf = &rxr->rx_buffers[i];
- if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->htag, rxbuf->hmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->htag, rxbuf->hmap);
- rxbuf->m_head->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_head);
- }
- if (rxbuf->m_pack != NULL) {
- bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
- rxbuf->m_pack->m_flags |= M_PKTHDR;
- m_freem(rxbuf->m_pack);
- }
- rxbuf->m_head = NULL;
- rxbuf->m_pack = NULL;
- if (rxbuf->hmap != NULL) {
- bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
- rxbuf->hmap = NULL;
- }
- if (rxbuf->pmap != NULL) {
- bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
- rxbuf->pmap = NULL;
- }
- }
- if (rxr->rx_buffers != NULL) {
- free(rxr->rx_buffers, M_DEVBUF);
- rxr->rx_buffers = NULL;
- }
- }
-
- if (rxr->htag != NULL) {
- bus_dma_tag_destroy(rxr->htag);
- rxr->htag = NULL;
- }
- if (rxr->ptag != NULL) {
- bus_dma_tag_destroy(rxr->ptag);
- rxr->ptag = NULL;
- }
-
- return;
-}
-
-static __inline void
-ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
-{
-
- /*
- * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
- * should be computed by hardware. Also it should not have VLAN tag in
- * ethernet header.
- */
- if (rxr->lro_enabled &&
- (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
- (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
- (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
- /*
- * Send to the stack if:
- ** - LRO not enabled, or
- ** - no LRO resources, or
- ** - lro enqueue fails
- */
- if (rxr->lro.lro_cnt != 0)
- if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
- return;
- }
- IXV_RX_UNLOCK(rxr);
- (*ifp->if_input)(ifp, m);
- IXV_RX_LOCK(rxr);
-}
-
-static __inline void
-ixv_rx_discard(struct rx_ring *rxr, int i)
-{
- struct ixv_rx_buf *rbuf;
-
- rbuf = &rxr->rx_buffers[i];
-
- if (rbuf->fmp != NULL) {/* Partial chain ? */
- rbuf->fmp->m_flags |= M_PKTHDR;
- m_freem(rbuf->fmp);
- rbuf->fmp = NULL;
- }
-
- /*
- ** With advanced descriptors the writeback
- ** clobbers the buffer addrs, so its easier
- ** to just free the existing mbufs and take
- ** the normal refresh path to get new buffers
- ** and mapping.
- */
- if (rbuf->m_head) {
- m_free(rbuf->m_head);
- rbuf->m_head = NULL;
- }
-
- if (rbuf->m_pack) {
- m_free(rbuf->m_pack);
- rbuf->m_pack = NULL;
- }
-
- return;
-}
-
-
-/*********************************************************************
- *
- * This routine executes in interrupt context. It replenishes
- * the mbufs in the descriptor and sends data which has been
- * dma'ed into host memory to upper layer.
- *
- * We loop at most count times if count is > 0, or until done if
- * count < 0.
- *
- * Return TRUE for more work, FALSE for all clean.
- *********************************************************************/
-static bool
-ixv_rxeof(struct ix_queue *que, int count)
-{
- struct adapter *adapter = que->adapter;
- struct rx_ring *rxr = que->rxr;
- struct ifnet *ifp = adapter->ifp;
- struct lro_ctrl *lro = &rxr->lro;
- struct lro_entry *queued;
- int i, nextp, processed = 0;
- u32 staterr = 0;
- union ixgbe_adv_rx_desc *cur;
- struct ixv_rx_buf *rbuf, *nbuf;
-
- IXV_RX_LOCK(rxr);
-
- for (i = rxr->next_to_check; count != 0;) {
- struct mbuf *sendmp, *mh, *mp;
- u32 rsc, ptype;
- u16 hlen, plen, hdr, vtag;
- bool eop;
-
- /* Sync the ring. */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- cur = &rxr->rx_base[i];
- staterr = le32toh(cur->wb.upper.status_error);
-
- if ((staterr & IXGBE_RXD_STAT_DD) == 0)
- break;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- break;
-
- count--;
- sendmp = NULL;
- nbuf = NULL;
- rsc = 0;
- cur->wb.upper.status_error = 0;
- rbuf = &rxr->rx_buffers[i];
- mh = rbuf->m_head;
- mp = rbuf->m_pack;
-
- plen = le16toh(cur->wb.upper.length);
- ptype = le32toh(cur->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_PKTTYPE_MASK;
- hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
- vtag = le16toh(cur->wb.upper.vlan);
- eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
-
- /* Make sure all parts of a bad packet are discarded */
- if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
- (rxr->discard)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
- rxr->rx_discarded++;
- if (!eop)
- rxr->discard = TRUE;
- else
- rxr->discard = FALSE;
- ixv_rx_discard(rxr, i);
- goto next_desc;
- }
-
- if (!eop) {
- nextp = i + 1;
- if (nextp == adapter->num_rx_desc)
- nextp = 0;
- nbuf = &rxr->rx_buffers[nextp];
- prefetch(nbuf);
- }
- /*
- ** The header mbuf is ONLY used when header
- ** split is enabled, otherwise we get normal
- ** behavior, ie, both header and payload
- ** are DMA'd into the payload buffer.
- **
- ** Rather than using the fmp/lmp global pointers
- ** we now keep the head of a packet chain in the
- ** buffer struct and pass this along from one
- ** descriptor to the next, until we get EOP.
- */
- if (rxr->hdr_split && (rbuf->fmp == NULL)) {
- /* This must be an initial descriptor */
- hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IXV_RX_HDR)
- hlen = IXV_RX_HDR;
- mh->m_len = hlen;
- mh->m_flags |= M_PKTHDR;
- mh->m_next = NULL;
- mh->m_pkthdr.len = mh->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_head = NULL;
- /*
- ** Check the payload length, this
- ** could be zero if its a small
- ** packet.
- */
- if (plen > 0) {
- mp->m_len = plen;
- mp->m_next = NULL;
- mp->m_flags &= ~M_PKTHDR;
- mh->m_next = mp;
- mh->m_pkthdr.len += mp->m_len;
- /* Null buf pointer so it is refreshed */
- rbuf->m_pack = NULL;
- rxr->rx_split_packets++;
- }
- /*
- ** Now create the forward
- ** chain so when complete
- ** we wont have to.
- */
- if (eop == 0) {
- /* stash the chain head */
- nbuf->fmp = mh;
- /* Make forward chain */
- if (plen)
- mp->m_next = nbuf->m_pack;
- else
- mh->m_next = nbuf->m_pack;
- } else {
- /* Singlet, prepare to send */
- sendmp = mh;
- if ((adapter->num_vlans) &&
- (staterr & IXGBE_RXD_STAT_VP)) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- } else {
- /*
- ** Either no header split, or a
- ** secondary piece of a fragmented
- ** split packet.
- */
- mp->m_len = plen;
- /*
- ** See if there is a stored head
- ** that determines what we are
- */
- sendmp = rbuf->fmp;
- rbuf->m_pack = rbuf->fmp = NULL;
-
- if (sendmp != NULL) /* secondary frag */
- sendmp->m_pkthdr.len += mp->m_len;
- else {
- /* first desc of a non-ps chain */
- sendmp = mp;
- sendmp->m_flags |= M_PKTHDR;
- sendmp->m_pkthdr.len = mp->m_len;
- if (staterr & IXGBE_RXD_STAT_VP) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
- }
- /* Pass the head pointer on */
- if (eop == 0) {
- nbuf->fmp = sendmp;
- sendmp = NULL;
- mp->m_next = nbuf->m_pack;
- }
- }
- ++processed;
- /* Sending this frame? */
- if (eop) {
- sendmp->m_pkthdr.rcvif = ifp;
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- rxr->rx_packets++;
- /* capture data for AIM */
- rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes += sendmp->m_pkthdr.len;
- if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
- ixv_rx_checksum(staterr, sendmp, ptype);
-#if __FreeBSD_version >= 800000
- sendmp->m_pkthdr.flowid = que->msix;
- M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
-#endif
- }
-next_desc:
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Advance our pointers to the next descriptor. */
- if (++i == adapter->num_rx_desc)
- i = 0;
-
- /* Now send to the stack or do LRO */
- if (sendmp != NULL)
- ixv_rx_input(rxr, ifp, sendmp, ptype);
-
- /* Every 8 descriptors we go to refresh mbufs */
- if (processed == 8) {
- ixv_refresh_mbufs(rxr, i);
- processed = 0;
- }
- }
-
- /* Refresh any remaining buf structs */
- if (ixv_rx_unrefreshed(rxr))
- ixv_refresh_mbufs(rxr, i);
-
- rxr->next_to_check = i;
-
- /*
- * Flush any outstanding LRO work
- */
- while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
- SLIST_REMOVE_HEAD(&lro->lro_active, next);
- tcp_lro_flush(lro, queued);
- }
-
- IXV_RX_UNLOCK(rxr);
-
- /*
- ** We still have cleaning to do?
- ** Schedule another interrupt if so.
- */
- if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
- ixv_rearm_queues(adapter, (u64)(1 << que->msix));
- return (TRUE);
- }
-
- return (FALSE);
-}
-
-
-/*********************************************************************
- *
- * Verify that the hardware indicated that the checksum is valid.
- * Inform the stack about the status of checksum so that stack
- * doesn't spend time verifying the checksum.
- *
- *********************************************************************/
-static void
-ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
-{
- u16 status = (u16) staterr;
- u8 errors = (u8) (staterr >> 24);
- bool sctp = FALSE;
-
- if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
- (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
- sctp = TRUE;
-
- if (status & IXGBE_RXD_STAT_IPCS) {
- if (!(errors & IXGBE_RXD_ERR_IPE)) {
- /* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-
- } else
- mp->m_pkthdr.csum_flags = 0;
- }
- if (status & IXGBE_RXD_STAT_L4CS) {
- u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
- if (sctp)
- type = CSUM_SCTP_VALID;
-#endif
- if (!(errors & IXGBE_RXD_ERR_TCPE)) {
- mp->m_pkthdr.csum_flags |= type;
- if (!sctp)
- mp->m_pkthdr.csum_data = htons(0xffff);
- }
- }
- return;
-}
-
-static void
-ixv_setup_vlan_support(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, vid, vfta, retry;
-
-
- /*
- ** We get here thru init_locked, meaning
- ** a soft reset, this has already cleared
- ** the VFTA and other state, so if there
- ** have been no vlan's registered do nothing.
- */
- if (adapter->num_vlans == 0)
- return;
-
- /* Enable the queues */
- for (int i = 0; i < adapter->num_queues; i++) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
- }
-
- /*
- ** A soft reset zero's out the VFTA, so
- ** we need to repopulate it now.
- */
- for (int i = 0; i < VFTA_SIZE; i++) {
- if (ixv_shadow_vfta[i] == 0)
- continue;
- vfta = ixv_shadow_vfta[i];
- /*
- ** Reconstruct the vlan id's
- ** based on the bits set in each
- ** of the array ints.
- */
- for ( int j = 0; j < 32; j++) {
- retry = 0;
- if ((vfta & (1 << j)) == 0)
- continue;
- vid = (i * 32) + j;
- /* Call the shared code mailbox routine */
- while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
- if (++retry > 5)
- break;
- }
- }
- }
-}
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct adapter *adapter = ifp->if_softc;
- u16 index, bit;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXV_CORE_LOCK(adapter);
- index = (vtag >> 5) & 0x7F;
- bit = vtag & 0x1F;
- ixv_shadow_vfta[index] |= (1 << bit);
- ++adapter->num_vlans;
- /* Re-init to load the changes */
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct adapter *adapter = ifp->if_softc;
- u16 index, bit;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXV_CORE_LOCK(adapter);
- index = (vtag >> 5) & 0x7F;
- bit = vtag & 0x1F;
- ixv_shadow_vfta[index] &= ~(1 << bit);
- --adapter->num_vlans;
- /* Re-init to load the changes */
- ixv_init_locked(adapter);
- IXV_CORE_UNLOCK(adapter);
-}
-
-static void
-ixv_enable_intr(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
-
- for (int i = 0; i < adapter->num_queues; i++, que++)
- ixv_enable_queue(adapter, que->msix);
-
- IXGBE_WRITE_FLUSH(hw);
-
- return;
-}
-
-static void
-ixv_disable_intr(struct adapter *adapter)
-{
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- return;
-}
-
-/*
-** Setup the correct IVAR register for a particular MSIX interrupt
-** - entry is the register array entry
-** - vector is the MSIX vector for this queue
-** - type is RX/TX/MISC
-*/
-static void
-ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 ivar, index;
-
- vector |= IXGBE_IVAR_ALLOC_VAL;
-
- if (type == -1) { /* MISC IVAR */
- ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
- ivar &= ~0xFF;
- ivar |= vector;
- IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
- } else { /* RX/TX IVARS */
- index = (16 * (entry & 1)) + (8 * type);
- ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
- ivar &= ~(0xFF << index);
- ivar |= (vector << index);
- IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
- }
-}
-
-static void
-ixv_configure_ivars(struct adapter *adapter)
-{
- struct ix_queue *que = adapter->queues;
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- /* First the RX queue entry */
- ixv_set_ivar(adapter, i, que->msix, 0);
- /* ... and the TX */
- ixv_set_ivar(adapter, i, que->msix, 1);
- /* Set an initial value in EITR */
- IXGBE_WRITE_REG(&adapter->hw,
- IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
- }
-
- /* For the Link interrupt */
- ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
-}
-
-
-/*
-** Tasklet handler for MSIX MBX interrupts
-** - do outside interrupt since it might sleep
-*/
-static void
-ixv_handle_mbx(void *context, int pending)
-{
- struct adapter *adapter = context;
-
- ixgbe_check_link(&adapter->hw,
- &adapter->link_speed, &adapter->link_up, 0);
- ixv_update_link_status(adapter);
-}
-
-/*
-** The VF stats registers never have a truely virgin
-** starting point, so this routine tries to make an
-** artificial one, marking ground zero on attach as
-** it were.
-*/
-static void
-ixv_save_stats(struct adapter *adapter)
-{
- if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
- adapter->stats.saved_reset_vfgprc +=
- adapter->stats.vfgprc - adapter->stats.base_vfgprc;
- adapter->stats.saved_reset_vfgptc +=
- adapter->stats.vfgptc - adapter->stats.base_vfgptc;
- adapter->stats.saved_reset_vfgorc +=
- adapter->stats.vfgorc - adapter->stats.base_vfgorc;
- adapter->stats.saved_reset_vfgotc +=
- adapter->stats.vfgotc - adapter->stats.base_vfgotc;
- adapter->stats.saved_reset_vfmprc +=
- adapter->stats.vfmprc - adapter->stats.base_vfmprc;
- }
-}
-
-static void
-ixv_init_stats(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
- adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
- adapter->stats.last_vfgorc |=
- (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
-
- adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
- adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
- adapter->stats.last_vfgotc |=
- (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
-
- adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
-
- adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
- adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
- adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
- adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
- adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
-}
-
-#define UPDATE_STAT_32(reg, last, count) \
-{ \
- u32 current = IXGBE_READ_REG(hw, reg); \
- if (current < last) \
- count += 0x100000000LL; \
- last = current; \
- count &= 0xFFFFFFFF00000000LL; \
- count |= current; \
-}
-
-#define UPDATE_STAT_36(lsb, msb, last, count) \
-{ \
- u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
- u64 cur_msb = IXGBE_READ_REG(hw, msb); \
- u64 current = ((cur_msb << 32) | cur_lsb); \
- if (current < last) \
- count += 0x1000000000LL; \
- last = current; \
- count &= 0xFFFFFFF000000000LL; \
- count |= current; \
-}
-
-/*
-** ixv_update_stats - Update the board statistics counters.
-*/
-void
-ixv_update_stats(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
- adapter->stats.vfgprc);
- UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
- adapter->stats.vfgptc);
- UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
- adapter->stats.last_vfgorc, adapter->stats.vfgorc);
- UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
- adapter->stats.last_vfgotc, adapter->stats.vfgotc);
- UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
- adapter->stats.vfmprc);
-}
-
-/**********************************************************************
- *
- * This routine is called only when ixgbe_display_debug_stats is enabled.
- * This routine provides a way to take a look at important statistics
- * maintained by the driver and hardware.
- *
- **********************************************************************/
-static void
-ixv_print_hw_stats(struct adapter * adapter)
-{
- device_t dev = adapter->dev;
-
- device_printf(dev,"Std Mbuf Failed = %lu\n",
- adapter->mbuf_defrag_failed);
- device_printf(dev,"Driver dropped packets = %lu\n",
- adapter->dropped_pkts);
- device_printf(dev, "watchdog timeouts = %ld\n",
- adapter->watchdog_events);
-
- device_printf(dev,"Good Packets Rcvd = %llu\n",
- (long long)adapter->stats.vfgprc);
- device_printf(dev,"Good Packets Xmtd = %llu\n",
- (long long)adapter->stats.vfgptc);
- device_printf(dev,"TSO Transmissions = %lu\n",
- adapter->tso_tx);
-
-}
-
-/**********************************************************************
- *
- * This routine is called only when em_display_debug_stats is enabled.
- * This routine provides a way to take a look at important statistics
- * maintained by the driver and hardware.
- *
- **********************************************************************/
-static void
-ixv_print_debug_info(struct adapter *adapter)
-{
- device_t dev = adapter->dev;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ix_queue *que = adapter->queues;
- struct rx_ring *rxr;
- struct tx_ring *txr;
- struct lro_ctrl *lro;
-
- device_printf(dev,"Error Byte Count = %u \n",
- IXGBE_READ_REG(hw, IXGBE_ERRBC));
-
- for (int i = 0; i < adapter->num_queues; i++, que++) {
- txr = que->txr;
- rxr = que->rxr;
- lro = &rxr->lro;
- device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
- que->msix, (long)que->irqs);
- device_printf(dev,"RX(%d) Packets Received: %lld\n",
- rxr->me, (long long)rxr->rx_packets);
- device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
- rxr->me, (long long)rxr->rx_split_packets);
- device_printf(dev,"RX(%d) Bytes Received: %lu\n",
- rxr->me, (long)rxr->rx_bytes);
- device_printf(dev,"RX(%d) LRO Queued= %d\n",
- rxr->me, lro->lro_queued);
- device_printf(dev,"RX(%d) LRO Flushed= %d\n",
- rxr->me, lro->lro_flushed);
- device_printf(dev,"TX(%d) Packets Sent: %lu\n",
- txr->me, (long)txr->total_packets);
- device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
- txr->me, (long)txr->no_desc_avail);
- }
-
- device_printf(dev,"MBX IRQ Handled: %lu\n",
- (long)adapter->mbx_irq);
- return;
-}
-
-static int
-ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
-{
- int error;
- int result;
- struct adapter *adapter;
-
- result = -1;
- error = sysctl_handle_int(oidp, &result, 0, req);
-
- if (error || !req->newptr)
- return (error);
-
- if (result == 1) {
- adapter = (struct adapter *) arg1;
- ixv_print_hw_stats(adapter);
- }
- return error;
-}
-
-static int
-ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
-{
- int error, result;
- struct adapter *adapter;
-
- result = -1;
- error = sysctl_handle_int(oidp, &result, 0, req);
-
- if (error || !req->newptr)
- return (error);
-
- if (result == 1) {
- adapter = (struct adapter *) arg1;
- ixv_print_debug_info(adapter);
- }
- return error;
-}
-
-/*
-** Set flow control using sysctl:
-** Flow control values:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
-static int
-ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- int error;
- struct adapter *adapter;
-
- error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
-
- if (error)
- return (error);
-
- adapter = (struct adapter *) arg1;
- switch (ixv_flow_control) {
- case ixgbe_fc_rx_pause:
- case ixgbe_fc_tx_pause:
- case ixgbe_fc_full:
- adapter->hw.fc.requested_mode = ixv_flow_control;
- break;
- case ixgbe_fc_none:
- default:
- adapter->hw.fc.requested_mode = ixgbe_fc_none;
- }
-
- ixgbe_fc_enable(&adapter->hw);
- return error;
-}
-
-static void
-ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
- const char *description, int *limit, int value)
-{
- *limit = value;
- SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, name, CTLFLAG_RW, limit, value, description);
-}
-
diff --git a/sys/dev/ixgbe/ixv.h b/sys/dev/ixgbe/ixv.h
deleted file mode 100644
index 79d84b6..0000000
--- a/sys/dev/ixgbe/ixv.h
+++ /dev/null
@@ -1,438 +0,0 @@
-/******************************************************************************
-
- Copyright (c) 2001-2012, Intel Corporation
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-
-#ifndef _IXV_H_
-#define _IXV_H_
-
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/mbuf.h>
-#include <sys/protosw.h>
-#include <sys/socket.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/sockio.h>
-#include <sys/eventhandler.h>
-
-#include <net/if.h>
-#include <net/if_var.h>
-#include <net/if_arp.h>
-#include <net/bpf.h>
-#include <net/ethernet.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-
-#include <net/bpf.h>
-#include <net/if_types.h>
-#include <net/if_vlan_var.h>
-
-#include <netinet/in_systm.h>
-#include <netinet/in.h>
-#include <netinet/if_ether.h>
-#include <netinet/ip.h>
-#include <netinet/ip6.h>
-#include <netinet/tcp.h>
-#include <netinet/tcp_lro.h>
-#include <netinet/udp.h>
-
-#include <machine/in_cksum.h>
-
-#include <sys/bus.h>
-#include <machine/bus.h>
-#include <sys/rman.h>
-#include <machine/resource.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <machine/clock.h>
-#include <dev/pci/pcivar.h>
-#include <dev/pci/pcireg.h>
-#include <sys/proc.h>
-#include <sys/sysctl.h>
-#include <sys/endian.h>
-#include <sys/taskqueue.h>
-#include <sys/pcpu.h>
-#include <sys/smp.h>
-#include <machine/smp.h>
-
-#include "ixgbe_api.h"
-#include "ixgbe_vf.h"
-
-/* Tunables */
-
-/*
- * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
- * number of transmit descriptors allocated by the driver. Increasing this
- * value allows the driver to queue more transmits. Each descriptor is 16
- * bytes. Performance tests have show the 2K value to be optimal for top
- * performance.
- */
-#define DEFAULT_TXD 1024
-#define PERFORM_TXD 2048
-#define MAX_TXD 4096
-#define MIN_TXD 64
-
-/*
- * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
- * number of receive descriptors allocated for each RX queue. Increasing this
- * value allows the driver to buffer more incoming packets. Each descriptor
- * is 16 bytes. A receive buffer is also allocated for each descriptor.
- *
- * Note: with 8 rings and a dual port card, it is possible to bump up
- * against the system mbuf pool limit, you can tune nmbclusters
- * to adjust for this.
- */
-#define DEFAULT_RXD 1024
-#define PERFORM_RXD 2048
-#define MAX_RXD 4096
-#define MIN_RXD 64
-
-/* Alignment for rings */
-#define DBA_ALIGN 128
-
-/*
- * This parameter controls the maximum no of times the driver will loop in
- * the isr. Minimum Value = 1
- */
-#define MAX_LOOP 10
-
-/*
- * This is the max watchdog interval, ie. the time that can
- * pass between any two TX clean operations, such only happening
- * when the TX hardware is functioning.
- */
-#define IXV_WATCHDOG (10 * hz)
-
-/*
- * This parameters control when the driver calls the routine to reclaim
- * transmit descriptors.
- */
-#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
-#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
-
-#define IXV_MAX_FRAME_SIZE 0x3F00
-
-/* Flow control constants */
-#define IXV_FC_PAUSE 0xFFFF
-#define IXV_FC_HI 0x20000
-#define IXV_FC_LO 0x10000
-
-/* Defines for printing debug information */
-#define DEBUG_INIT 0
-#define DEBUG_IOCTL 0
-#define DEBUG_HW 0
-
-#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
-#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
-#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
-#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
-#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
-#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
-#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
-#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
-#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
-
-#define MAX_NUM_MULTICAST_ADDRESSES 128
-#define IXV_EITR_DEFAULT 128
-#define IXV_SCATTER 32
-#define IXV_RX_HDR 128
-#define MSIX_BAR 3
-#define IXV_TSO_SIZE 65535
-#define IXV_BR_SIZE 4096
-#define IXV_LINK_ITR 2000
-#define TX_BUFFER_SIZE ((u32) 1514)
-#define VFTA_SIZE 128
-
-/* Offload bits in mbuf flag */
-#if __FreeBSD_version >= 800000
-#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
-#else
-#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
-#endif
-
-/*
- *****************************************************************************
- * vendor_info_array
- *
- * This array contains the list of Subvendor/Subdevice IDs on which the driver
- * should load.
- *
- *****************************************************************************
- */
-typedef struct _ixv_vendor_info_t {
- unsigned int vendor_id;
- unsigned int device_id;
- unsigned int subvendor_id;
- unsigned int subdevice_id;
- unsigned int index;
-} ixv_vendor_info_t;
-
-
-struct ixv_tx_buf {
- u32 eop_index;
- struct mbuf *m_head;
- bus_dmamap_t map;
-};
-
-struct ixv_rx_buf {
- struct mbuf *m_head;
- struct mbuf *m_pack;
- struct mbuf *fmp;
- bus_dmamap_t hmap;
- bus_dmamap_t pmap;
-};
-
-/*
- * Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free.
- */
-struct ixv_dma_alloc {
- bus_addr_t dma_paddr;
- caddr_t dma_vaddr;
- bus_dma_tag_t dma_tag;
- bus_dmamap_t dma_map;
- bus_dma_segment_t dma_seg;
- bus_size_t dma_size;
- int dma_nseg;
-};
-
-/*
-** Driver queue struct: this is the interrupt container
-** for the associated tx and rx ring.
-*/
-struct ix_queue {
- struct adapter *adapter;
- u32 msix; /* This queue's MSIX vector */
- u32 eims; /* This queue's EIMS bit */
- u32 eitr_setting;
- u32 eitr; /* cached reg */
- struct resource *res;
- void *tag;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- struct task que_task;
- struct taskqueue *tq;
- u64 irqs;
-};
-
-/*
- * The transmit ring, one per queue
- */
-struct tx_ring {
- struct adapter *adapter;
- struct mtx tx_mtx;
- u32 me;
- bool watchdog_check;
- int watchdog_time;
- union ixgbe_adv_tx_desc *tx_base;
- struct ixv_dma_alloc txdma;
- u32 next_avail_desc;
- u32 next_to_clean;
- struct ixv_tx_buf *tx_buffers;
- volatile u16 tx_avail;
- u32 txd_cmd;
- bus_dma_tag_t txtag;
- char mtx_name[16];
- struct buf_ring *br;
- /* Soft Stats */
- u32 bytes;
- u32 packets;
- u64 no_desc_avail;
- u64 total_packets;
-};
-
-
-/*
- * The Receive ring, one per rx queue
- */
-struct rx_ring {
- struct adapter *adapter;
- struct mtx rx_mtx;
- u32 me;
- union ixgbe_adv_rx_desc *rx_base;
- struct ixv_dma_alloc rxdma;
- struct lro_ctrl lro;
- bool lro_enabled;
- bool hdr_split;
- bool discard;
- u32 next_to_refresh;
- u32 next_to_check;
- char mtx_name[16];
- struct ixv_rx_buf *rx_buffers;
- bus_dma_tag_t htag;
- bus_dma_tag_t ptag;
-
- u32 bytes; /* Used for AIM calc */
- u32 packets;
-
- /* Soft stats */
- u64 rx_irq;
- u64 rx_split_packets;
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_discarded;
-};
-
-/* Our adapter structure */
-struct adapter {
- struct ifnet *ifp;
- struct ixgbe_hw hw;
-
- struct ixgbe_osdep osdep;
- struct device *dev;
-
- struct resource *pci_mem;
- struct resource *msix_mem;
-
- /*
- * Interrupt resources: this set is
- * either used for legacy, or for Link
- * when doing MSIX
- */
- void *tag;
- struct resource *res;
-
- struct ifmedia media;
- struct callout timer;
- int msix;
- int if_flags;
-
- struct mtx core_mtx;
-
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
-
- u16 num_vlans;
- u16 num_queues;
-
- /* Info about the board itself */
- bool link_active;
- u16 max_frame_size;
- u32 link_speed;
- bool link_up;
- u32 mbxvec;
-
- /* Mbuf cluster size */
- u32 rx_mbuf_sz;
-
- /* Support for pluggable optics */
- struct task mbx_task; /* Mailbox tasklet */
- struct taskqueue *tq;
-
- /*
- ** Queues:
- ** This is the irq holder, it has
- ** and RX/TX pair or rings associated
- ** with it.
- */
- struct ix_queue *queues;
-
- /*
- * Transmit rings:
- * Allocated at run time, an array of rings.
- */
- struct tx_ring *tx_rings;
- int num_tx_desc;
-
- /*
- * Receive rings:
- * Allocated at run time, an array of rings.
- */
- struct rx_ring *rx_rings;
- int num_rx_desc;
- u64 que_mask;
- u32 rx_process_limit;
-
- /* Misc stats maintained by the driver */
- unsigned long dropped_pkts;
- unsigned long mbuf_defrag_failed;
- unsigned long mbuf_header_failed;
- unsigned long mbuf_packet_failed;
- unsigned long no_tx_map_avail;
- unsigned long no_tx_dma_setup;
- unsigned long watchdog_events;
- unsigned long tso_tx;
- unsigned long mbx_irq;
-
- struct ixgbevf_hw_stats stats;
-};
-
-
-#define IXV_CORE_LOCK_INIT(_sc, _name) \
- mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF)
-#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
-#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
-#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
-#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
-#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
-#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
-#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
-#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
-#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
-#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
-#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
-#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
-
-/* Workaround to make 8.0 buildable */
-#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
-static __inline int
-drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
-{
-#ifdef ALTQ
- if (ALTQ_IS_ENABLED(&ifp->if_snd))
- return (1);
-#endif
- return (!buf_ring_empty(br));
-}
-#endif
-
-/*
-** Find the number of unrefreshed RX descriptors
-*/
-static inline u16
-ixv_rx_unrefreshed(struct rx_ring *rxr)
-{
- struct adapter *adapter = rxr->adapter;
-
- if (rxr->next_to_check > rxr->next_to_refresh)
- return (rxr->next_to_check - rxr->next_to_refresh - 1);
- else
- return ((adapter->num_rx_desc + rxr->next_to_check) -
- rxr->next_to_refresh - 1);
-}
-
-#endif /* _IXV_H_ */
diff --git a/sys/modules/ixgbe/Makefile b/sys/modules/ix/Makefile
index 923e217..5a5485d 100644
--- a/sys/modules/ixgbe/Makefile
+++ b/sys/modules/ix/Makefile
@@ -2,10 +2,10 @@
.PATH: ${.CURDIR}/../../dev/ixgbe
-KMOD = if_ixgbe
+KMOD = if_ix
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
-SRCS += ixgbe.c ixv.c
+SRCS += if_ix.c ix_txrx.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
diff --git a/sys/modules/ixv/Makefile b/sys/modules/ixv/Makefile
new file mode 100644
index 0000000..20ecaf1
--- /dev/null
+++ b/sys/modules/ixv/Makefile
@@ -0,0 +1,15 @@
+#$FreeBSD$
+
+.PATH: ${.CURDIR}/../../dev/ixgbe
+
+KMOD = if_ixv
+SRCS = device_if.h bus_if.h pci_if.h
+SRCS += opt_inet.h opt_inet6.h opt_rss.h
+SRCS += if_ixv.c ix_txrx.c
+# Shared source
+SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
+SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
+SRCS += ixgbe_82599.c ixgbe_82598.c ixgbe_x540.c
+CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP
+
+.include <bsd.kmod.mk>
OpenPOWER on IntegriCloud