summaryrefslogtreecommitdiffstats
path: root/sys/dev/ixgbe
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2009-06-24 18:27:07 +0000
committerjfv <jfv@FreeBSD.org>2009-06-24 18:27:07 +0000
commite13ea48de648aaa38679919abe3a877c7d86001c (patch)
tree26341f299db67177e35924410a0d36fa6e562e7f /sys/dev/ixgbe
parent660ff14c445ffa5e88e7c9eb70a51111dd2a2f7e (diff)
downloadFreeBSD-src-e13ea48de648aaa38679919abe3a877c7d86001c.zip
FreeBSD-src-e13ea48de648aaa38679919abe3a877c7d86001c.tar.gz
Update for the Intel 10G driver, this adds support for
newest hardware, adds multiqueue tx interface, infrastructure cleanup to allow up to 32 MSIX vectors on newer Nehalem systems. Bug fixes, etc.
Diffstat (limited to 'sys/dev/ixgbe')
-rw-r--r--sys/dev/ixgbe/ixgbe.c1301
-rw-r--r--sys/dev/ixgbe/ixgbe.h81
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c54
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c309
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c30
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c60
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c59
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h4
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h60
11 files changed, 1279 insertions, 684 deletions
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index e2827f4..45a55a4 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -46,7 +46,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "1.7.4";
+char ixgbe_driver_version[] = "1.8.7";
/*********************************************************************
* PCI Device ID Table
@@ -73,6 +73,7 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -94,6 +95,12 @@ static int ixgbe_detach(device_t);
static int ixgbe_shutdown(device_t);
static void ixgbe_start(struct ifnet *);
static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
+#if __FreeBSD_version >= 800000
+static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
+static int ixgbe_mq_start_locked(struct ifnet *,
+ struct tx_ring *, struct mbuf *);
+static void ixgbe_qflush(struct ifnet *);
+#endif
static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
static void ixgbe_watchdog(struct adapter *);
static void ixgbe_init(void *);
@@ -106,9 +113,7 @@ static int ixgbe_allocate_pci_resources(struct adapter *);
static int ixgbe_allocate_msix(struct adapter *);
static int ixgbe_allocate_legacy(struct adapter *);
static int ixgbe_allocate_queues(struct adapter *);
-#if __FreeBSD_version >= 602105
static int ixgbe_setup_msix(struct adapter *);
-#endif
static void ixgbe_free_pci_resources(struct adapter *);
static void ixgbe_local_timer(void *);
static int ixgbe_hardware_init(struct adapter *);
@@ -151,22 +156,15 @@ static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
-static int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
-static void ixgbe_set_ivar(struct adapter *, u16, u8, s8);
+static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
static void ixgbe_configure_ivars(struct adapter *);
static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
-#ifdef IXGBE_HW_VLAN_SUPPORT
+static void ixgbe_setup_vlan_hw_support(struct adapter *);
static void ixgbe_register_vlan(void *, struct ifnet *, u16);
static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
-#endif
-
-#ifdef IXGBE_TIMESYNC
-/* Precision Time sync support */
-static int ixgbe_tsync_init(struct adapter *);
-static void ixgbe_tsync_disable(struct adapter *);
-#endif
static void ixgbe_update_aim(struct rx_ring *);
@@ -176,12 +174,10 @@ static bool ixgbe_sfp_probe(struct adapter *);
/* Legacy (single vector interrupt handler */
static void ixgbe_legacy_irq(void *);
-#if __FreeBSD_version >= 602105
/* The MSI/X Interrupt handlers */
static void ixgbe_msix_tx(void *);
static void ixgbe_msix_rx(void *);
static void ixgbe_msix_link(void *);
-#endif
/* Deferred interrupt tasklets */
static void ixgbe_handle_tx(void *, int);
@@ -238,45 +234,25 @@ TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
static int ixgbe_rx_process_limit = 100;
TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
-/* Flow control setting, default to off */
+/* Flow control setting, default to full */
static int ixgbe_flow_control = ixgbe_fc_full;
TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
/*
- * Should the driver do LRO on the RX end
- * this can be toggled on the fly, but the
- * interface must be reset (down/up) for it
- * to take effect.
- */
-static int ixgbe_enable_lro = 1;
-TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
-
-/*
* MSIX should be the default for best performance,
* but this allows it to be forced off for testing.
*/
-#if __FreeBSD_version >= 602105
static int ixgbe_enable_msix = 1;
-#else
-static int ixgbe_enable_msix = 0;
-#endif
TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
/*
- * Enable RX Header Split
- * WARNING: disable this if bridging or forwarding!!
+ * Number of Queues, should normally
+ * be left at 0, it then autoconfigures to
+ * the number of cpus. Each queue is a pair
+ * of RX and TX rings with a dedicated interrupt
*/
-static int ixgbe_rx_hdr_split = 1;
-TUNABLE_INT("hw.ixgbe.rx_hdr_split", &ixgbe_rx_hdr_split);
-
-/*
- * Number of TX/RX Queues, with 0 setting
- * it autoconfigures to the number of cpus.
- */
-static int ixgbe_tx_queues = 1;
-TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
-static int ixgbe_rx_queues = 1;
-TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
+static int ixgbe_num_queues = 0;
+TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
/* Number of TX descriptors per ring */
static int ixgbe_txd = DEFAULT_TXD;
@@ -290,6 +266,13 @@ TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
static int ixgbe_total_ports;
/*
+** Shadow VFTA table, this is needed because
+** the real filter table gets cleared during
+** a soft reset and we need to repopulate it.
+*/
+static u32 ixgbe_shadow_vfta[IXGBE_VFTA_SIZE];
+
+/*
** The number of scatter-gather segments
** differs for 82598 and 82599, default to
** the former.
@@ -400,6 +383,9 @@ ixgbe_attach(device_t dev)
case IXGBE_DEV_ID_82599_KX4 :
adapter->optics = IFM_10G_CX4;
ixgbe_num_segs = IXGBE_82599_SCATTER;
+ break;
+ case IXGBE_DEV_ID_82599_XAUI_LOM :
+ ixgbe_num_segs = IXGBE_82599_SCATTER;
default:
break;
}
@@ -422,11 +408,6 @@ ixgbe_attach(device_t dev)
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
- &ixgbe_enable_lro, 1, "Large Receive Offload");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
&ixgbe_enable_aim, 1, "Interrupt Moderation");
@@ -445,11 +426,6 @@ ixgbe_attach(device_t dev)
OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
&ixgbe_bulk_latency, 1, "Bulk Latency");
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
- &ixgbe_rx_hdr_split, 1, "RX Header Split");
-
/* Set up the timer callout */
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
@@ -476,10 +452,10 @@ ixgbe_attach(device_t dev)
** system mbuf allocation. Tuning nmbclusters
** can alleviate this.
*/
- if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
+ if ((adapter->num_queues > 1) && (nmbclusters > 0 )){
int s;
/* Calculate the total RX mbuf needs */
- s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
+ s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
if (s > nmbclusters) {
device_printf(dev, "RX Descriptors exceed "
"system mbuf max, using default instead!\n");
@@ -537,6 +513,22 @@ ixgbe_attach(device_t dev)
/* Setup OS specific network interface */
ixgbe_setup_interface(dev, adapter);
+#ifdef IXGBE_IEEE1588
+ /*
+ ** Setup the timer: IEEE 1588 support
+ */
+ adapter->cycles.read = ixgbe_read_clock;
+ adapter->cycles.mask = (u64)-1;
+ adapter->cycles.mult = 1;
+ adapter->cycles.shift = IXGBE_TSYNC_SHIFT;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIMINCA, (1<<24) |
+ IXGBE_TSYNC_CYCLE_TIME * IXGBE_TSYNC_SHIFT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYSTIML, 0x00000000);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYSTIMH, 0xFF800000);
+
+ // JFV - this is not complete yet
+#endif
+
/* Sysctl for limiting the amount of work done in the taskqueue */
ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
@@ -545,13 +537,11 @@ ixgbe_attach(device_t dev)
/* Initialize statistics */
ixgbe_update_stats_counters(adapter);
-#ifdef IXGBE_HW_VLAN_SUPPORT
/* Register for VLAN events */
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
-#endif
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -590,11 +580,7 @@ ixgbe_detach(device_t dev)
INIT_DEBUGOUT("ixgbe_detach: begin");
/* Make sure VLANS are not using driver */
-#if __FreeBSD_version >= 700000
if (adapter->ifp->if_vlantrunk != NULL) {
-#else
- if (adapter->ifp->if_nvlans != 0) {
-#endif
device_printf(dev,"Vlan in use, detach first\n");
return (EBUSY);
}
@@ -603,14 +589,14 @@ ixgbe_detach(device_t dev)
ixgbe_stop(adapter);
IXGBE_CORE_UNLOCK(adapter);
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
if (txr->tq) {
taskqueue_drain(txr->tq, &txr->tx_task);
taskqueue_free(txr->tq);
}
}
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
if (rxr->tq) {
taskqueue_drain(rxr->tq, &rxr->rx_task);
taskqueue_free(rxr->tq);
@@ -630,13 +616,11 @@ ixgbe_detach(device_t dev)
ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
-#ifdef IXGBE_HW_VLAN_SUPPORT
/* Unregister VLAN events */
if (adapter->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-#endif
ether_ifdetach(adapter->ifp);
callout_drain(&adapter->timer);
@@ -697,19 +681,6 @@ ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
if (m_head == NULL)
break;
- /*
- * Force a cleanup if number of TX descriptors
- * available is below the threshold. If it fails
- * to get above, then abort transmit.
- */
- if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
- ixgbe_txeof(txr);
- /* Make sure things have improved */
- if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
- txr->no_tx_desc_avail++;
- break;
- }
- }
if (ixgbe_xmit(txr, &m_head)) {
if (m_head == NULL)
@@ -728,35 +699,127 @@ ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
return;
}
-
+/*
+ * Legacy TX start - called by the stack, this
+ * always uses the first tx ring, and should
+ * not be used with multiqueue tx enabled.
+ */
static void
ixgbe_start(struct ifnet *ifp)
{
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = adapter->tx_rings;
- u32 queue = 0;
-
- /*
- ** This is really just here for testing
- ** TX multiqueue, ultimately what is
- ** needed is the flow support in the stack
- ** and appropriate logic here to deal with
- ** it. -jfv
- */
- if (adapter->num_tx_queues > 1)
- queue = (curcpu % adapter->num_tx_queues);
-
- txr = &adapter->tx_rings[queue];
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (IXGBE_TX_TRYLOCK(txr) == 0)
- return;
+ IXGBE_TX_LOCK(txr);
ixgbe_start_locked(txr, ifp);
IXGBE_TX_UNLOCK(txr);
}
return;
}
+#if __FreeBSD_version >= 800000
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr;
+ int i = 0, err = 0;
+
+ /* Which queue to use */
+ if ((m->m_flags & M_FLOWID) != 0)
+ i = m->m_pkthdr.flowid % adapter->num_queues;
+ txr = &adapter->tx_rings[i];
+
+ if (IXGBE_TX_TRYLOCK(txr)) {
+ err = ixgbe_mq_start_locked(ifp, txr, m);
+ IXGBE_TX_UNLOCK(txr);
+ } else
+ err = drbr_enqueue(ifp, txr->br, m);
+
+ return (err);
+}
+
+static int
+ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+ struct adapter *adapter = txr->adapter;
+ struct mbuf *next;
+ int err = 0;
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ err = drbr_enqueue(ifp, txr->br, m);
+ return (err);
+ }
+
+ if (m == NULL) /* Called by tasklet */
+ goto process;
+
+ /* If nothing queued go right to xmit */
+ if (drbr_empty(ifp, txr->br)) {
+ if (ixgbe_xmit(txr, &m)) {
+ if (m && (err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
+ } else {
+ /* Success, update stats */
+ drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, m);
+ /* Set the watchdog */
+ txr->watchdog_timer = IXGBE_TX_TIMEOUT;
+ }
+
+ } else if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+ return (err);
+
+process:
+ if (drbr_empty(ifp, txr->br))
+ return (err);
+
+ /* Process the queue */
+ while (TRUE) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ next = drbr_dequeue(ifp, txr->br);
+ if (next == NULL)
+ break;
+ if (ixgbe_xmit(txr, &next))
+ break;
+ ETHER_BPF_MTAP(ifp, next);
+ /* Set the watchdog */
+ txr->watchdog_timer = IXGBE_TX_TIMEOUT;
+ }
+
+ if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+
+ return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+ixgbe_qflush(struct ifnet *ifp)
+{
+ struct adapter *adapter = ifp->if_softc;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct mbuf *m;
+
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IXGBE_TX_LOCK(txr);
+ while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+ m_freem(m);
+ IXGBE_TX_UNLOCK(txr);
+ }
+ if_qflush(ifp);
+}
+#endif /* __FreeBSD_version >= 800000 */
+
/*********************************************************************
* Ioctl entry point
*
@@ -769,13 +832,16 @@ ixgbe_start(struct ifnet *ifp)
static int
ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
{
- int error = 0;
+ struct adapter *adapter = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
+#ifdef INET
struct ifaddr *ifa = (struct ifaddr *) data;
- struct adapter *adapter = ifp->if_softc;
+#endif
+ int error = 0;
switch (command) {
case SIOCSIFADDR:
+#ifdef INET
IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
if (ifa->ifa_addr->sa_family == AF_INET) {
ifp->if_flags |= IFF_UP;
@@ -786,6 +852,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
}
arp_ifinit(ifp, ifa);
} else
+#endif
ether_ioctl(ifp, command, data);
break;
case SIOCSIFMTU:
@@ -843,75 +910,24 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_HWCSUM;
if (mask & IFCAP_TSO4)
ifp->if_capenable ^= IFCAP_TSO4;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
ixgbe_init(adapter);
-#if __FreeBSD_version >= 700000
VLAN_CAPABILITIES(ifp);
-#endif
break;
}
-#ifdef IXGBE_TIMESYNC
+
+#ifdef IXGBE_IEEE1588
/*
** IOCTL support for Precision Time (IEEE 1588) Support
*/
- case IXGBE_TIMESYNC_READTS:
- {
- u32 rx_ctl, tx_ctl;
- struct ixgbe_tsync_read *tdata;
-
- tdata = (struct ixgbe_tsync_read *) ifr->ifr_data;
-
- if (tdata->read_current_time) {
- getnanotime(&tdata->system_time);
- tdata->network_time = IXGBE_READ_REG(&adapter->hw,
- IXGBE_SYSTIML);
- tdata->network_time |=
- (u64)IXGBE_READ_REG(&adapter->hw,
- IXGBE_SYSTIMH ) << 32;
- }
-
- rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
- tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
-
- if (rx_ctl & 0x1) {
- u32 tmp;
- unsigned char *tmp_cp;
-
- tdata->rx_valid = 1;
- tdata->rx_stamp = IXGBE_READ_REG(&adapter->hw,
- IXGBE_RXSTMPL);
- tdata->rx_stamp |= (u64)IXGBE_READ_REG(&adapter->hw,
- IXGBE_RXSTMPH) << 32;
-
- tmp = IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRL);
- tmp_cp = (unsigned char *) &tmp;
- tdata->srcid[0] = tmp_cp[0];
- tdata->srcid[1] = tmp_cp[1];
- tdata->srcid[2] = tmp_cp[2];
- tdata->srcid[3] = tmp_cp[3];
- tmp = IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRH);
- tmp_cp = (unsigned char *) &tmp;
- tdata->srcid[4] = tmp_cp[0];
- tdata->srcid[5] = tmp_cp[1];
- tdata->seqid = tmp >> 16;
- tdata->seqid = htons(tdata->seqid);
- } else
- tdata->rx_valid = 0;
-
- if (tx_ctl & 0x1) {
- tdata->tx_valid = 1;
- tdata->tx_stamp = IXGBE_READ_REG(&adapter->hw,
- IXGBE_TXSTMPL);
- tdata->tx_stamp |= (u64) IXGBE_READ_REG(&adapter->hw,
- IXGBE_TXSTMPH) << 32;
- } else
- tdata->tx_valid = 0;
-
- return (0);
- }
-#endif /* IXGBE_TIMESYNC */
+ case SIOCSHWTSTAMP:
+ error = ixgbe_hwtstamp_ioctl(adapter, ifp);
+ break;
+#endif
default:
IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
@@ -947,7 +963,7 @@ ixgbe_watchdog(struct adapter *adapter)
* Finally, anytime all descriptors are clean the timer is
* set to 0.
*/
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
u32 head, tail;
IXGBE_TX_LOCK(txr);
@@ -976,7 +992,7 @@ ixgbe_watchdog(struct adapter *adapter)
*/
if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
txr = adapter->tx_rings; /* reset pointer */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
IXGBE_TX_LOCK(txr);
txr->watchdog_timer = IXGBE_TX_TIMEOUT;
IXGBE_TX_UNLOCK(txr);
@@ -986,7 +1002,7 @@ ixgbe_watchdog(struct adapter *adapter)
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
IXGBE_READ_REG(hw, IXGBE_TDH(i)),
IXGBE_READ_REG(hw, IXGBE_TDT(i)));
@@ -1041,16 +1057,6 @@ ixgbe_init_locked(struct adapter *adapter)
return;
}
-#ifndef IXGBE_HW_VLAN_SUPPORT
- if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
- u32 ctrl;
-
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- }
-#endif
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(adapter)) {
device_printf(dev,"Could not setup transmit structures\n");
@@ -1096,7 +1102,7 @@ ixgbe_init_locked(struct adapter *adapter)
if (hw->device_id == IXGBE_DEV_ID_82598AT)
gpie |= IXGBE_SDP1_GPIEN;
- if (adapter->msix > 2) {
+ if (adapter->msix > 1) {
/* Enable Enhanced MSIX mode */
gpie |= IXGBE_GPIE_MSIX_MODE;
gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
@@ -1108,7 +1114,7 @@ ixgbe_init_locked(struct adapter *adapter)
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TSO4)
ifp->if_hwassist |= CSUM_TSO;
- else if (ifp->if_capenable & IFCAP_TXCSUM)
+ if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
/* Set MTU size */
@@ -1121,7 +1127,7 @@ ixgbe_init_locked(struct adapter *adapter)
/* Now enable all the queues */
- for (int i = 0; i < adapter->num_tx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
txdctl |= IXGBE_TXDCTL_ENABLE;
/* Set WTHRESH to 8, burst writeback */
@@ -1129,7 +1135,7 @@ ixgbe_init_locked(struct adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
}
- for (int i = 0; i < adapter->num_rx_queues; i++) {
+ for (int i = 0; i < adapter->num_queues; i++) {
rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
/* PTHRESH set to 32 */
rxdctl |= 0x0020;
@@ -1146,6 +1152,9 @@ ixgbe_init_locked(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
}
+ /* Set up VLAN offloads and filter */
+ ixgbe_setup_vlan_hw_support(adapter);
+
/* Enable Receive engine */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -1185,13 +1194,6 @@ ixgbe_init_locked(struct adapter *adapter)
} else
taskqueue_enqueue(adapter->tq, &adapter->link_task);
-
-#ifdef IXGBE_TIMESYNC
- /* Initialize IEEE 1588 support */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- ixgbe_tsync_init(adapter);
-#endif
-
/* Now inform the stack we're ready */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
@@ -1212,9 +1214,67 @@ ixgbe_init(void *arg)
/*
-** MSIX Interrupt Tasklets
+**
+** MSIX Interrupt Handlers and Tasklets
+**
*/
+static inline void
+ixgbe_enable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+}
+
+static inline void
+ixgbe_disable_queue(struct adapter *adapter, u32 vector)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+ }
+}
+
+static inline void
+ixgbe_rearm_rx_queues(struct adapter *adapter, u64 queues)
+{
+ u32 mask;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ } else {
+ mask = (queues & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+ mask = (queues >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ }
+}
+
static void
ixgbe_handle_rx(void *context, int pending)
{
@@ -1227,7 +1287,7 @@ ixgbe_handle_rx(void *context, int pending)
more = ixgbe_rxeof(rxr, -1);
} while (loop-- && more);
/* Reenable this interrupt */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
+ ixgbe_enable_queue(adapter, rxr->msix);
}
static void
@@ -1244,13 +1304,19 @@ ixgbe_handle_tx(void *context, int pending)
more = ixgbe_txeof(txr);
} while (loop-- && more);
- if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- ixgbe_start_locked(txr, ifp);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+#if __FreeBSD_version >= 800000
+ if (!drbr_empty(ifp, txr->br))
+ ixgbe_mq_start_locked(ifp, txr, NULL);
+#else
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ ixgbe_start_locked(txr, ifp);
+#endif
+ }
IXGBE_TX_UNLOCK(txr);
-
/* Reenable this interrupt */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
+ ixgbe_enable_queue(adapter, txr->msix);
}
@@ -1315,7 +1381,6 @@ ixgbe_legacy_irq(void *arg)
}
-#if __FreeBSD_version >= 602105
/*********************************************************************
*
* MSI TX Interrupt Service routine
@@ -1328,7 +1393,7 @@ ixgbe_msix_tx(void *arg)
struct adapter *adapter = txr->adapter;
bool more;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, txr->eims);
+ ixgbe_disable_queue(adapter, txr->msix);
IXGBE_TX_LOCK(txr);
++txr->tx_irq;
@@ -1337,7 +1402,7 @@ ixgbe_msix_tx(void *arg)
if (more)
taskqueue_enqueue(txr->tq, &txr->tx_task);
else /* Reenable this interrupt */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
+ ixgbe_enable_queue(adapter, txr->msix);
return;
}
@@ -1354,7 +1419,7 @@ ixgbe_msix_rx(void *arg)
struct adapter *adapter = rxr->adapter;
bool more;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims);
+ ixgbe_disable_queue(adapter, rxr->msix);
++rxr->rx_irq;
more = ixgbe_rxeof(rxr, adapter->rx_process_limit);
@@ -1366,7 +1431,7 @@ ixgbe_msix_rx(void *arg)
if (more)
taskqueue_enqueue(rxr->tq, &rxr->rx_task);
else
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
+ ixgbe_enable_queue(adapter, rxr->msix);
return;
}
@@ -1381,9 +1446,8 @@ ixgbe_msix_link(void *arg)
++adapter->link_irq;
/* First get the cause */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, IXGBE_EIMS_OTHER);
reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
- /* Clear with write */
+ /* Clear interrupt with write */
IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
/* Link status change */
@@ -1418,7 +1482,6 @@ ixgbe_msix_link(void *arg)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return;
}
-#endif /* FreeBSD_version >= 602105 */
/*
** Routine to do adjust the RX EITR value based on traffic,
@@ -1482,14 +1545,14 @@ ixgbe_init_moderation(struct adapter *adapter)
}
/* TX irq moderation rate is fixed */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
IXGBE_WRITE_REG(&adapter->hw,
IXGBE_EITR(txr->msix), ixgbe_ave_latency);
txr->watchdog_timer = FALSE;
}
/* RX moderation will be adapted over time, set default */
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
IXGBE_WRITE_REG(&adapter->hw,
IXGBE_EITR(rxr->msix), ixgbe_low_latency);
}
@@ -1590,7 +1653,7 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
u32 olinfo_status = 0, cmd_type_len;
u32 paylen = 0;
int i, j, error, nsegs;
- int first, last = 0, offload = 0;
+ int first, last = 0;
struct mbuf *m_head;
bus_dma_segment_t segs[ixgbe_num_segs];
bus_dmamap_t map;
@@ -1606,6 +1669,14 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
if (m_head->m_flags & M_VLANTAG)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ /* Do a clean if descriptors are low */
+ if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
+ ixgbe_txeof(txr);
+ /* Now do we at least have a minimal? */
+ if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
+ return (ENOBUFS);
+ }
+
/*
* Important to capture the first descriptor
* used because it will contain the index of
@@ -1679,14 +1750,15 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
++adapter->tso_tx;
} else
return (ENXIO);
- } else /* Offloads other than TSO */
- offload = ixgbe_tx_ctx_setup(txr, m_head);
- if (offload == TRUE)
+ } else if (ixgbe_tx_ctx_setup(txr, m_head))
olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-#ifdef IXGBE_TIMESYNC
- if (offload == IXGBE_TIMESTAMP)
- cmd_type_len |= IXGBE_ADVTXD_TSTAMP;
+
+#ifdef IXGBE_IEEE1588
+ /* This is changing soon to an mtag detection */
+ if (we detect this mbuf has a TSTAMP mtag)
+ cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
#endif
+
/* Record payload length */
if (paylen == 0)
olinfo_status |= m_head->m_pkthdr.len <<
@@ -1881,7 +1953,7 @@ ixgbe_local_timer(void *arg)
out:
/* Trigger an RX interrupt on all queues */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, adapter->rx_mask);
+ ixgbe_rearm_rx_queues(adapter, adapter->rx_mask);
callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
}
@@ -1914,7 +1986,7 @@ ixgbe_update_link_status(struct adapter *adapter)
device_printf(dev,"Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
adapter->link_active = FALSE;
- for (int i = 0; i < adapter->num_tx_queues;
+ for (int i = 0; i < adapter->num_queues;
i++, txr++)
txr->watchdog_timer = FALSE;
}
@@ -1946,11 +2018,6 @@ ixgbe_stop(void *arg)
/* Tell the stack that the interface is no longer active */
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-#ifdef IXGBE_TIMESYNC
- /* Disable IEEE 1588 support */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- ixgbe_tsync_disable(adapter);
-#endif
ixgbe_reset_hw(&adapter->hw);
adapter->hw.adapter_stopped = FALSE;
ixgbe_stop_adapter(&adapter->hw);
@@ -1996,16 +2063,16 @@ ixgbe_allocate_legacy(struct adapter *adapter)
device_t dev = adapter->dev;
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- int error;
+ int error, rid = 0;
- /* Legacy RID at 0 */
- if (adapter->msix == 0)
- adapter->rid[0] = 0;
+ /* MSI RID at 1 */
+ if (adapter->msix == 1)
+ rid = 1;
/* We allocate a single interrupt resource */
- adapter->res[0] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
- if (adapter->res[0] == NULL) {
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (adapter->res == NULL) {
device_printf(dev, "Unable to allocate bus resource: "
"interrupt\n");
return (ENXIO);
@@ -2026,13 +2093,18 @@ ixgbe_allocate_legacy(struct adapter *adapter)
taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
device_get_nameunit(adapter->dev));
- if ((error = bus_setup_intr(dev, adapter->res[0],
-#if __FreeBSD_version >= 700000
+ /* Tasklets for Link, SFP and Multispeed Fiber */
+ TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
+ TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
+ TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
+ adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
+ taskqueue_thread_enqueue, &adapter->tq);
+ taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
+ device_get_nameunit(adapter->dev));
+
+ if ((error = bus_setup_intr(dev, adapter->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
-#else
- INTR_TYPE_NET | INTR_MPSAFE, ixgbe_legacy_irq,
-#endif
- adapter, &adapter->tag[0])) != 0) {
+ adapter, &adapter->tag)) != 0) {
device_printf(dev, "Failed to register fast interrupt "
"handler: %d\n", error);
taskqueue_free(txr->tq);
@@ -2046,7 +2118,6 @@ ixgbe_allocate_legacy(struct adapter *adapter)
}
-#if __FreeBSD_version >= 602105
/*********************************************************************
*
* Setup MSIX Interrupt resources and handlers
@@ -2058,33 +2129,36 @@ ixgbe_allocate_msix(struct adapter *adapter)
device_t dev = adapter->dev;
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- int error, vector = 0;
+ int error, rid, vector = 0;
/* TX setup: the code is here for multi tx,
there are other parts of the driver not ready for it */
- for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
- adapter->res[vector] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[vector],
+ for (int i = 0; i < adapter->num_queues; i++, vector++, txr++) {
+ rid = vector + 1;
+ txr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res[vector]) {
+ if (!txr->res) {
device_printf(dev,"Unable to allocate"
" bus resource: tx interrupt [%d]\n", vector);
return (ENXIO);
}
/* Set the handler function */
- error = bus_setup_intr(dev, adapter->res[vector],
- INTR_TYPE_NET | INTR_MPSAFE,
-#if __FreeBSD_version > 700000
- NULL,
-#endif
- ixgbe_msix_tx, txr, &adapter->tag[vector]);
+ error = bus_setup_intr(dev, txr->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixgbe_msix_tx, txr, &txr->tag);
if (error) {
- adapter->res[vector] = NULL;
+ txr->res = NULL;
device_printf(dev, "Failed to register TX handler");
return (error);
}
txr->msix = vector;
- txr->eims = 1 << vector;
+#if __FreeBSD_version >= 800000
+ /*
+ ** Bind the msix vector, and thus the
+ ** ring to the corresponding cpu.
+ */
+ intr_bind(rman_get_start(txr->res), i);
+#endif
TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
taskqueue_thread_enqueue, &txr->tq);
@@ -2093,32 +2167,35 @@ ixgbe_allocate_msix(struct adapter *adapter)
}
/* RX setup */
- for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
- adapter->res[vector] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[vector],
- RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res[vector]) {
+ for (int i = 0; i < adapter->num_queues; i++, vector++, rxr++) {
+ rid = vector + 1;
+ rxr->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!rxr->res) {
device_printf(dev,"Unable to allocate"
" bus resource: rx interrupt [%d],"
- "rid = %d\n", i, adapter->rid[vector]);
+ "rid = %d\n", i, rid);
return (ENXIO);
}
/* Set the handler function */
- error = bus_setup_intr(dev, adapter->res[vector],
- INTR_TYPE_NET | INTR_MPSAFE,
-#if __FreeBSD_version > 700000
- NULL,
-#endif
- ixgbe_msix_rx, rxr, &adapter->tag[vector]);
+ error = bus_setup_intr(dev, rxr->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixgbe_msix_rx, rxr, &rxr->tag);
if (error) {
- adapter->res[vector] = NULL;
+ rxr->res = NULL;
device_printf(dev, "Failed to register RX handler");
return (error);
}
rxr->msix = vector;
- rxr->eims = 1 << vector;
/* used in local timer */
- adapter->rx_mask |= rxr->eims;
+ adapter->rx_mask |= (u64)(1 << vector);
+#if __FreeBSD_version >= 800000
+ /*
+ ** Bind the msix vector, and thus the
+ ** ring to the corresponding cpu.
+ */
+ intr_bind(rman_get_start(rxr->res), i);
+#endif
TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
taskqueue_thread_enqueue, &rxr->tq);
@@ -2127,22 +2204,20 @@ ixgbe_allocate_msix(struct adapter *adapter)
}
/* Now for Link changes */
- adapter->res[vector] = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
- if (!adapter->res[vector]) {
+ rid = vector + 1;
+ adapter->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!adapter->res) {
device_printf(dev,"Unable to allocate"
- " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
+ " bus resource: Link interrupt [%d]\n", rid);
return (ENXIO);
}
/* Set the link handler function */
- error = bus_setup_intr(dev, adapter->res[vector],
- INTR_TYPE_NET | INTR_MPSAFE,
-#if __FreeBSD_version > 700000
- NULL,
-#endif
- ixgbe_msix_link, adapter, &adapter->tag[vector]);
+ error = bus_setup_intr(dev, adapter->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixgbe_msix_link, adapter, &adapter->tag);
if (error) {
- adapter->res[vector] = NULL;
+ adapter->res = NULL;
device_printf(dev, "Failed to register LINK handler");
return (error);
}
@@ -2158,15 +2233,7 @@ ixgbe_allocate_msix(struct adapter *adapter)
return (0);
}
-#else /* Freebsd 6.1/2 */
-static int
-ixgbe_allocate_msix(struct adapter *adapter)
-{
- return (1);
-}
-#endif
-#if __FreeBSD_version >= 602105
/*
* Setup Either MSI/X or MSI
*/
@@ -2207,11 +2274,13 @@ ixgbe_setup_msix(struct adapter *adapter)
/* Figure out a reasonable auto config value */
queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
- if (ixgbe_tx_queues == 0)
- ixgbe_tx_queues = queues;
- if (ixgbe_rx_queues == 0)
- ixgbe_rx_queues = queues;
- want = ixgbe_tx_queues + ixgbe_rx_queues + 1;
+ if (ixgbe_num_queues == 0)
+ ixgbe_num_queues = queues;
+ /*
+ ** Want two vectors (RX/TX) per queue
+ ** plus an additional for Link.
+ */
+ want = (ixgbe_num_queues * 2) + 1;
if (msgs >= want)
msgs = want;
else {
@@ -2219,13 +2288,12 @@ ixgbe_setup_msix(struct adapter *adapter)
"MSIX Configuration Problem, "
"%d vectors but %d queues wanted!\n",
msgs, want);
- return (ENXIO);
+ return (0); /* Will go to Legacy setup */
}
if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
device_printf(adapter->dev,
"Using MSIX interrupts with %d vectors\n", msgs);
- adapter->num_tx_queues = ixgbe_tx_queues;
- adapter->num_rx_queues = ixgbe_rx_queues;
+ adapter->num_queues = ixgbe_num_queues;
return (msgs);
}
msi:
@@ -2234,7 +2302,7 @@ msi:
device_printf(adapter->dev,"Using MSI interrupt\n");
return (msgs);
}
-#endif /* FreeBSD_version >= 602105 */
+
static int
ixgbe_allocate_pci_resources(struct adapter *adapter)
@@ -2257,71 +2325,87 @@ ixgbe_allocate_pci_resources(struct adapter *adapter)
rman_get_bushandle(adapter->pci_mem);
adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
- /*
- * Init the resource arrays
- */
- for (int i = 0; i < IXGBE_MSGS; i++) {
- adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
- adapter->tag[i] = NULL;
- adapter->res[i] = NULL;
- }
-
/* Legacy defaults */
- adapter->num_tx_queues = 1;
- adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->hw.back = &adapter->osdep;
-#if __FreeBSD_version >= 602105
- /* Now setup MSI or MSI/X */
+ /*
+ ** Now setup MSI or MSI/X, should
+ ** return us the number of supported
+ ** vectors. (Will be 1 for MSI)
+ */
adapter->msix = ixgbe_setup_msix(adapter);
-#endif
- adapter->hw.back = &adapter->osdep;
return (0);
}
static void
ixgbe_free_pci_resources(struct adapter * adapter)
{
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
device_t dev = adapter->dev;
- int rid;
-
- /*
- * Legacy has this set to 0, but we need
- * to run this once, so reset it.
- */
- if (adapter->msix == 0)
- adapter->msix = 1;
+ int rid, memrid;
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- rid = PCIR_BAR(MSIX_82598_BAR);
+ memrid = PCIR_BAR(MSIX_82598_BAR);
else
- rid = PCIR_BAR(MSIX_82599_BAR);
+ memrid = PCIR_BAR(MSIX_82599_BAR);
/*
- * First release all the interrupt resources:
- * notice that since these are just kept
- * in an array we can do the same logic
- * whether its MSIX or just legacy.
- */
- for (int i = 0; i < adapter->msix; i++) {
- if (adapter->tag[i] != NULL) {
- bus_teardown_intr(dev, adapter->res[i],
- adapter->tag[i]);
- adapter->tag[i] = NULL;
+ ** There is a slight possibility of a failure mode
+ ** in attach that will result in entering this function
+ ** before interrupt resources have been initialized, and
+ ** in that case we do not want to execute the loops below
+ ** We can detect this reliably by the state of the adapter
+ ** res pointer.
+ */
+ if (adapter->res == NULL)
+ goto mem;
+
+ /*
+ ** Release all the interrupt resources:
+ ** notice this is harmless for Legacy or
+ ** MSI since pointers will always be NULL
+ */
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ rid = txr->msix + 1;
+ if (txr->tag != NULL) {
+ bus_teardown_intr(dev, txr->res, txr->tag);
+ txr->tag = NULL;
}
- if (adapter->res[i] != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ,
- adapter->rid[i], adapter->res[i]);
+ if (txr->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res);
+ }
+
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+ rid = rxr->msix + 1;
+ if (rxr->tag != NULL) {
+ bus_teardown_intr(dev, rxr->res, rxr->tag);
+ rxr->tag = NULL;
}
+ if (rxr->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res);
}
-#if __FreeBSD_version >= 602105
+ /* Clean the Legacy or Link interrupt last */
+ if (adapter->linkvec) /* we are doing MSIX */
+ rid = adapter->linkvec + 1;
+ else
+ (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (adapter->tag != NULL) {
+ bus_teardown_intr(dev, adapter->res, adapter->tag);
+ adapter->tag = NULL;
+ }
+ if (adapter->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+mem:
if (adapter->msix)
pci_release_msi(dev);
if (adapter->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
- rid, adapter->msix_mem);
-#endif
+ memrid, adapter->msix_mem);
if (adapter->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -2342,6 +2426,7 @@ static int
ixgbe_hardware_init(struct adapter *adapter)
{
device_t dev = adapter->dev;
+ u32 ret;
u16 csum;
csum = 0;
@@ -2362,8 +2447,18 @@ ixgbe_hardware_init(struct adapter *adapter)
adapter->hw.fc.high_water = IXGBE_FC_HI;
adapter->hw.fc.send_xon = TRUE;
- if (ixgbe_init_hw(&adapter->hw)) {
- device_printf(dev,"Hardware Initialization Failed");
+ ret = ixgbe_init_hw(&adapter->hw);
+ if (ret == IXGBE_ERR_EEPROM_VERSION) {
+ device_printf(dev, "This device is a pre-production adapter/"
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.\n If you are experiencing problems "
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.\n");
+ } else if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ device_printf(dev,"Unsupported SFP+ Module\n");
+ return (EIO);
+ } else if (ret != 0 ) {
+ device_printf(dev,"Hardware Initialization Failure\n");
return (EIO);
}
@@ -2393,9 +2488,13 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixgbe_ioctl;
ifp->if_start = ixgbe_start;
+#if __FreeBSD_version >= 800000
+ ifp->if_transmit = ixgbe_mq_start;
+ ifp->if_qflush = ixgbe_qflush;
+#endif
ifp->if_timer = 0;
ifp->if_watchdog = NULL;
- ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
+ ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
ether_ifattach(ifp, adapter->hw.mac.addr);
@@ -2407,9 +2506,9 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
- ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
ifp->if_capenable = ifp->if_capabilities;
@@ -2534,7 +2633,7 @@ ixgbe_allocate_queues(struct adapter *adapter)
/* First allocate the TX ring struct memory */
if (!(adapter->tx_rings =
(struct tx_ring *) malloc(sizeof(struct tx_ring) *
- adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate TX ring memory\n");
error = ENOMEM;
goto fail;
@@ -2544,7 +2643,7 @@ ixgbe_allocate_queues(struct adapter *adapter)
/* Next allocate the RX */
if (!(adapter->rx_rings =
(struct rx_ring *) malloc(sizeof(struct rx_ring) *
- adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate RX ring memory\n");
error = ENOMEM;
goto rx_fail;
@@ -2560,7 +2659,7 @@ ixgbe_allocate_queues(struct adapter *adapter)
* possibility that things fail midcourse and we need to
* undo memory gracefully
*/
- for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
+ for (int i = 0; i < adapter->num_queues; i++, txconf++) {
/* Set up some basics */
txr = &adapter->tx_rings[i];
txr->adapter = adapter;
@@ -2588,7 +2687,11 @@ ixgbe_allocate_queues(struct adapter *adapter)
error = ENOMEM;
goto err_tx_desc;
}
-
+#if __FreeBSD_version >= 800000
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
+ M_WAITOK, &txr->tx_mtx);
+#endif
}
/*
@@ -2596,7 +2699,7 @@ ixgbe_allocate_queues(struct adapter *adapter)
*/
rsize = roundup2(adapter->num_rx_desc *
sizeof(union ixgbe_adv_rx_desc), 4096);
- for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
rxr = &adapter->rx_rings[i];
/* Set up some basics */
rxr->adapter = adapter;
@@ -2750,7 +2853,7 @@ ixgbe_setup_transmit_structures(struct adapter *adapter)
{
struct tx_ring *txr = adapter->tx_rings;
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
ixgbe_setup_transmit_ring(txr);
return (0);
@@ -2769,7 +2872,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
/* Setup the Base and Length of the Tx Descriptor Ring */
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
u64 tdba = txr->txdma.dma_paddr;
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
@@ -2808,7 +2911,7 @@ ixgbe_free_transmit_structures(struct adapter *adapter)
{
struct tx_ring *txr = adapter->tx_rings;
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
IXGBE_TX_LOCK(txr);
ixgbe_free_transmit_buffers(txr);
ixgbe_dma_free(adapter, &txr->txdma);
@@ -2857,7 +2960,9 @@ ixgbe_free_transmit_buffers(struct tx_ring *txr)
tx_buffer->map = NULL;
}
}
-
+#if __FreeBSD_version >= 800000
+ buf_ring_free(txr->br, M_DEVBUF);
+#endif
if (txr->tx_buffers != NULL) {
free(txr->tx_buffers, M_DEVBUF);
txr->tx_buffers = NULL;
@@ -2875,7 +2980,7 @@ ixgbe_free_transmit_buffers(struct tx_ring *txr)
*
**********************************************************************/
-static int
+static boolean_t
ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
{
struct adapter *adapter = txr->adapter;
@@ -2883,22 +2988,18 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
struct ixgbe_tx_buf *tx_buffer;
u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
struct ether_vlan_header *eh;
- struct ip *ip = NULL;
+ struct ip *ip;
struct ip6_hdr *ip6;
int ehdrlen, ip_hlen = 0;
u16 etype;
u8 ipproto = 0;
- bool offload = FALSE;
+ bool offload = TRUE;
int ctxd = txr->next_avail_tx_desc;
-#if __FreeBSD_version < 700000
- struct m_tag *mtag;
-#else
u16 vtag = 0;
-#endif
- if (mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
- offload = TRUE;
+ if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+ offload = FALSE;
tx_buffer = &txr->tx_buffers[ctxd];
TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
@@ -2907,20 +3008,12 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
** In advanced descriptors the vlan tag must
** be placed into the descriptor itself.
*/
-#if __FreeBSD_version < 700000
- mtag = VLAN_OUTPUT_TAG(ifp, mp);
- if (mtag != NULL) {
- vlan_macip_lens |=
- htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
- offload = TRUE;
- }
-#else
if (mp->m_flags & M_VLANTAG) {
vtag = htole16(mp->m_pkthdr.ether_vtag);
vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
- offload = TRUE;
- }
-#endif
+ } else if (offload == FALSE)
+ return FALSE;
+
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
@@ -2943,7 +3036,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
ip = (struct ip *)(mp->m_data + ehdrlen);
ip_hlen = ip->ip_hl << 2;
if (mp->m_len < ehdrlen + ip_hlen)
- return FALSE; /* failure */
+ return (FALSE);
ipproto = ip->ip_p;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
break;
@@ -2951,16 +3044,13 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
ip_hlen = sizeof(struct ip6_hdr);
if (mp->m_len < ehdrlen + ip_hlen)
- return FALSE; /* failure */
+ return (FALSE);
ipproto = ip6->ip6_nxt;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
break;
-#ifdef IXGBE_TIMESYNC
- case ETHERTYPE_IEEE1588:
- return (IXGBE_TIMESTAMP);
-#endif
default:
- return (FALSE);
+ offload = FALSE;
+ break;
}
vlan_macip_lens |= ip_hlen;
@@ -2968,36 +3058,20 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
switch (ipproto) {
case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
+ if (mp->m_pkthdr.csum_flags & CSUM_TCP)
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- offload = TRUE;
- }
break;
case IPPROTO_UDP:
- {
-#ifdef IXGBE_TIMESYNC
- void *hdr = (caddr_t) ip + ip_hlen;
- struct udphdr *uh = (struct udphdr *)hdr;
-
- if (uh->uh_dport == htons(TSYNC_UDP_PORT))
- return (IXGBE_TIMESTAMP);
-#endif
- if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
+ if (mp->m_pkthdr.csum_flags & CSUM_UDP)
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- offload = TRUE;
- }
break;
- }
-
default:
- return (FALSE);
+ offload = FALSE;
+ break;
}
- if (offload != TRUE )
- return (FALSE);
-
/* Now copy bits into descriptor */
TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
@@ -3013,10 +3087,9 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
txr->next_avail_tx_desc = ctxd;
--txr->tx_avail;
- return (TRUE);
+ return (offload);
}
-#if __FreeBSD_version >= 700000
/**********************************************************************
*
* Setup work for hardware segmentation offload (TSO) on
@@ -3104,14 +3177,6 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
return TRUE;
}
-#else /* For 6.2 RELEASE */
-/* This makes it easy to keep the code common */
-static boolean_t
-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
-{
- return (FALSE);
-}
-#endif
/**********************************************************************
*
@@ -3420,12 +3485,14 @@ static int
ixgbe_setup_receive_ring(struct rx_ring *rxr)
{
struct adapter *adapter;
+ struct ifnet *ifp;
device_t dev;
struct ixgbe_rx_buf *rxbuf;
struct lro_ctrl *lro = &rxr->lro;
int j, rsize;
adapter = rxr->adapter;
+ ifp = adapter->ifp;
dev = adapter->dev;
/* Clear the ring contents */
@@ -3465,18 +3532,27 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
/* Setup our descriptor indices */
rxr->next_to_check = 0;
rxr->last_cleaned = 0;
+ rxr->lro_enabled = FALSE;
+ rxr->hdr_split = FALSE;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- /* Now set up the LRO interface */
- if (ixgbe_enable_lro) {
+ /*
+ ** Now set up the LRO interface, we
+ ** also only do head split when LRO
+ ** is enabled, since so often they
+ ** are undesireable in similar setups.
+ */
+ if (ifp->if_capenable & IFCAP_LRO) {
int err = tcp_lro_init(lro);
if (err) {
INIT_DEBUGOUT("LRO Initialization failed!\n");
goto fail;
}
INIT_DEBUGOUT("RX LRO Initialized\n");
+ rxr->lro_enabled = TRUE;
+ rxr->hdr_split = TRUE;
lro->ifp = adapter->ifp;
}
@@ -3511,7 +3587,7 @@ ixgbe_setup_receive_structures(struct adapter *adapter)
struct rx_ring *rxr = adapter->rx_rings;
int j;
- for (j = 0; j < adapter->num_rx_queues; j++, rxr++)
+ for (j = 0; j < adapter->num_queues; j++, rxr++)
if (ixgbe_setup_receive_ring(rxr))
goto fail;
@@ -3587,7 +3663,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
- if (ixgbe_rx_hdr_split) {
+ if (rxr->hdr_split) {
/* Use a standard mbuf for the header */
srrctl |= ((IXGBE_RX_HDR << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
& IXGBE_SRRCTL_BSIZEHDR_MASK);
@@ -3609,7 +3685,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(0), srrctl);
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
u64 rdba = rxr->rxdma.dma_paddr;
/* Setup the Base and Length of the Rx Descriptor Ring */
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
@@ -3626,7 +3702,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
/* Setup RSS */
- if (adapter->num_rx_queues > 1) {
+ if (adapter->num_queues > 1) {
int i, j;
reta = 0;
@@ -3635,7 +3711,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
/* Set up the redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == adapter->num_rx_queues) j = 0;
+ if (j == adapter->num_queues) j = 0;
reta = (reta << 8) | (j * 0x11);
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
@@ -3673,14 +3749,6 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl |= IXGBE_RDRXCTL_AGGDIS;
- rdrxctl |= IXGBE_RDRXCTL_RSCLLIDIS;
- rdrxctl |= IXGBE_RDRXCTL_MVMEN;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
- }
-
return;
}
@@ -3694,7 +3762,7 @@ ixgbe_free_receive_structures(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
struct lro_ctrl *lro = &rxr->lro;
ixgbe_free_receive_buffers(rxr);
/* Free LRO memory */
@@ -3786,12 +3854,12 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
struct mbuf *sendmp, *mh, *mp;
- u16 hlen, plen, hdr;
+ u16 hlen, plen, hdr, vtag;
u8 dopayload, accept_frame, eop;
accept_frame = 1;
- hlen = plen = 0;
+ hlen = plen = vtag = 0;
sendmp = mh = mp = NULL;
/* Sync the buffers */
@@ -3809,7 +3877,7 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
** packet spans multiple descriptors, in that
** case only the first header is valid.
*/
- if ((ixgbe_rx_hdr_split) && (rxr->fmp == NULL)){
+ if ((rxr->hdr_split) && (rxr->fmp == NULL)){
hdr = le16toh(cur->
wb.lower.lo_dword.hs_rss.hdr_info);
hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
@@ -3857,10 +3925,39 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
} else
eop = 0;
+#ifdef IXGBE_IEEE1588
+ This code needs to be converted to work here
+ -----------------------------------------------------
+ if (unlikely(staterr & IXGBE_RXD_STAT_TS)) {
+ u64 regval;
+ u64 ns;
+// Create an mtag and set it up
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps(skb);
+
+ rd32(IXGBE_TSYNCRXCTL) & IXGBE_TSYNCRXCTL_VALID),
+ "igb: no RX time stamp available for time stamped packet");
+ regval = rd32(IXGBE_RXSTMPL);
+ regval |= (u64)rd32(IXGBE_RXSTMPH) << 32;
+// Do time conversion from the register
+ ns = timecounter_cyc2time(&adapter->clock, regval);
+ clocksync_update(&adapter->sync, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp =
+ clocksync_hw2sys(&adapter->sync, ns);
+ }
+#endif
+
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
accept_frame = 0;
if (accept_frame) {
+ /*
+ ** Save the vlan id, because get_buf will
+ ** clobber the writeback descriptor...
+ */
+ vtag = le16toh(cur->wb.upper.vlan);
if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
ifp->if_iqdrops++;
goto discard;
@@ -3895,16 +3992,13 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
else
rxr->fmp->m_pkthdr.csum_flags = 0;
if (staterr & IXGBE_RXD_STAT_VP) {
-#if __FreeBSD_version >= 700000
- rxr->fmp->m_pkthdr.ether_vtag =
- le16toh(cur->wb.upper.vlan);
+ rxr->fmp->m_pkthdr.ether_vtag = vtag;
rxr->fmp->m_flags |= M_VLANTAG;
-#else
- VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
- (le16toh(cur->wb.upper.vlan) &
- IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
-#endif
}
+#if __FreeBSD_version >= 800000
+ rxr->fmp->m_pkthdr.flowid = curcpu;
+ rxr->fmp->m_flags |= M_FLOWID;
+#endif
sendmp = rxr->fmp;
rxr->fmp = NULL;
rxr->lmp = NULL;
@@ -3948,8 +4042,14 @@ discard:
** thru this call.
*/
if (sendmp != NULL) {
- /* Use LRO if possible */
- if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
+ /*
+ ** Send to the stack if:
+ ** - LRO not enabled, or
+ ** - no LRO resources, or
+ ** - lro enqueue fails
+ */
+ if ((!rxr->lro_enabled) ||
+ ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0))))
(*ifp->if_input)(ifp, sendmp);
}
@@ -3978,7 +4078,7 @@ discard:
** then schedule another interrupt.
*/
if (staterr & IXGBE_RXD_STAT_DD) {
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rxr->eims);
+ ixgbe_rearm_rx_queues(adapter, (u64)(1 << rxr->msix));
return TRUE;
}
@@ -4020,57 +4120,101 @@ ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
}
-#ifdef IXGBE_HW_VLAN_SUPPORT
/*
- * This routine is run via an vlan
- * config EVENT
- */
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
static void
ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 ctrl, rctl, index, vfta;
+ u16 index, bit;
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
- /* Make entry in the hardware filter table */
- ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ ixgbe_shadow_vfta[index] |= (1 << bit);
+ ++adapter->num_vlans;
+ /* Re-init to load the changes */
+ ixgbe_init(adapter);
}
/*
- * This routine is run via an vlan
- * unconfig EVENT
- */
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
static void
ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
{
struct adapter *adapter = ifp->if_softc;
- u32 index, vfta;
+ u16 index, bit;
- /* Remove entry in the hardware filter table */
- ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
- /* Have all vlans unregistered? */
- if (adapter->ifp->if_vlantrunk == NULL) {
- u32 ctrl;
- /* Turn off the filter table */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_VME;
- ctrl &= ~IXGBE_VLNCTRL_VFE;
- ctrl |= IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- }
+ index = (vtag >> 5) & 0x7F;
+ bit = vtag & 0x1F;
+ ixgbe_shadow_vfta[index] &= ~(1 << bit);
+ --adapter->num_vlans;
+ /* Re-init to load the changes */
+ ixgbe_init(adapter);
+}
+
+static void
+ixgbe_setup_vlan_hw_support(struct adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+
+ /*
+ ** We get here thru init_locked, meaning
+ ** a soft reset, this has already cleared
+ ** the VFTA and other state, so if there
+ ** have been no vlan's registered do nothing.
+ */
+ if (adapter->num_vlans == 0)
+ return;
+
+ /*
+ ** A soft reset zero's out the VFTA, so
+ ** we need to repopulate it now.
+ */
+ for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
+ if (ixgbe_shadow_vfta[i] != 0)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
+ ixgbe_shadow_vfta[i]);
+
+ /* Enable the Filter Table */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+
+ /* On 82599 the VLAN enable is per/queue in RXDCTL */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ for (int i = 0; i < adapter->num_queues; i++) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+ }
}
-#endif
static void
ixgbe_enable_intr(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 mask = IXGBE_EIMS_ENABLE_MASK;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
/* Enable Fan Failure detection */
@@ -4095,6 +4239,16 @@ ixgbe_enable_intr(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
}
+ /*
+ ** Now enable all queues, this is done seperately to
+ ** allow for handling the extended (beyond 32) MSIX
+ ** vectors that can be used by 82599
+ */
+ for (int i = 0; i < adapter->num_queues; i++, rxr++)
+ ixgbe_enable_queue(adapter, rxr->msix);
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ ixgbe_enable_queue(adapter, txr->msix);
+
IXGBE_WRITE_FLUSH(hw);
return;
@@ -4105,7 +4259,13 @@ ixgbe_disable_intr(struct adapter *adapter)
{
if (adapter->msix_mem)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ } else {
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ }
IXGBE_WRITE_FLUSH(&adapter->hw);
return;
}
@@ -4138,7 +4298,7 @@ ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
** - type is RX/TX/MISC
*/
static void
-ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector, s8 type)
+ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 ivar, index;
@@ -4185,10 +4345,10 @@ ixgbe_configure_ivars(struct adapter *adapter)
struct tx_ring *txr = adapter->tx_rings;
struct rx_ring *rxr = adapter->rx_rings;
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++)
+ for (int i = 0; i < adapter->num_queues; i++, rxr++)
ixgbe_set_ivar(adapter, i, rxr->msix, 0);
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
ixgbe_set_ivar(adapter, i, txr->msix, 1);
/* For the Link interrupt */
@@ -4441,7 +4601,7 @@ ixgbe_print_debug_info(struct adapter *adapter)
device_printf(dev,"Error Byte Count = %u \n",
IXGBE_READ_REG(hw, IXGBE_ERRBC));
- for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
+ for (int i = 0; i < adapter->num_queues; i++, rxr++) {
struct lro_ctrl *lro = &rxr->lro;
device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
@@ -4460,7 +4620,7 @@ ixgbe_print_debug_info(struct adapter *adapter)
rxr->me, lro->lro_flushed);
}
- for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
IXGBE_READ_REG(hw, IXGBE_TDH(i)),
IXGBE_READ_REG(hw, IXGBE_TDT(i)));
@@ -4561,96 +4721,173 @@ ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
-#ifdef IXGBE_TIMESYNC
+#ifdef IXGBE_IEEE1588
+
/*
- * Initialize the Time Sync Feature
- */
+** ixgbe_hwtstamp_ioctl - control hardware time stamping
+**
+** Outgoing time stamping can be enabled and disabled. Play nice and
+** disable it when requested, although it shouldn't case any overhead
+** when no packet needs it. At most one packet in the queue may be
+** marked for time stamping, otherwise it would be impossible to tell
+** for sure to which packet the hardware time stamp belongs.
+**
+** Incoming time stamping has to be configured via the hardware
+** filters. Not all combinations are supported, in particular event
+** type has to be specified. Matching the kind of event packet is
+** not supported, with the exception of "all V2 events regardless of
+** level 2 or 4".
+**
+*/
static int
-ixgbe_tsync_init(struct adapter *adapter)
+ixgbe_hwtstamp_ioctl(struct adapter *adapter, struct ifreq *ifr)
{
- device_t dev = adapter->dev;
- u32 tx_ctl, rx_ctl;
-
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIMINCA, (1<<24) |
- 20833/PICOSECS_PER_TICK);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hwtstamp_ctrl *config;
+ u32 tsync_tx_ctl_bit = IXGBE_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl_bit = IXGBE_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_ctl_type = 0;
+ u32 tsync_rx_cfg = 0;
+ int is_l4 = 0;
+ int is_l2 = 0;
+ u16 port = 319; /* PTP */
+ u32 regval;
+
+ config = (struct hwtstamp_ctrl *) ifr->ifr_data;
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return (EINVAL);
- adapter->last_stamp = IXGBE_READ_REG(&adapter->hw, IXGBE_SYSTIML);
- adapter->last_stamp |= (u64)IXGBE_READ_REG(&adapter->hw,
- IXGBE_SYSTIMH) << 32ULL;
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl_bit = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tsync_tx_ctl_bit = IXGBE_TSYNCTXCTL_ENABLED;
+ break;
+ default:
+ return (ERANGE);
+ }
- /* Enable the TX side */
- tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
- tx_ctl |= 0x10;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCTXCTL, tx_ctl);
- IXGBE_WRITE_FLUSH(&adapter->hw);
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl_bit = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * register TSYNCRXCFG must be set, therefore it is not
+ * possible to time stamp both Sync and Delay_Req messages
+ * => fall back to time stamping all packets
+ */
+ tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = 1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = 1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+ is_l2 = 1;
+ is_l4 = 1;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = IXGBE_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = 1;
+ is_l4 = 1;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ tsync_rx_ctl_type = IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
- tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
- if ((tx_ctl & 0x10) == 0) {
- device_printf(dev, "Failed to enable TX timestamping\n");
- return (ENXIO);
- }
+ /* enable/disable TX */
+ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ regval = (regval & ~IXGBE_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
- /* Enable RX */
- rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
- rx_ctl |= 0x10; /* Enable the feature */
- rx_ctl |= 0x04; /* This value turns on Ver 1 and 2 */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCRXCTL, rx_ctl);
+ /* enable/disable RX, define which PTP packets are time stamped */
+ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ regval = (regval & ~IXGBE_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
+ regval = (regval & ~0xE) | tsync_rx_ctl_type;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCFG, tsync_rx_cfg);
/*
- * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 (Ethertype)
- * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable filter)
- * Ethertype Filter Queue Filter[0][31] = 0x1 (Enable Timestamping)
+ * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
+ * (Ethertype to filter on)
+ * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
+ * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
*/
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), 0xC00088f7);
-
- IXGBE_WRITE_FLUSH(&adapter->hw);
-
- rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
- if ((rx_ctl & 0x10) == 0) {
- device_printf(dev, "Failed to enable RX timestamping\n");
- return (ENXIO);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF0, is_l2 ? 0x440088f7 : 0);
+
+ /* L4 Queue Filter[0]: only filter by source and destination port */
+ IXGBE_WRITE_REG(hw, IXGBE_SPQF0, htons(port));
+ IXGBE_WRITE_REG(hw, IXGBE_IMIREXT(0), is_l4 ?
+ ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IMIR(0), is_l4 ?
+ (htons(port)
+ | (0<<16) /* immediate interrupt disabled */
+ | 0 /* (1<<17) bit cleared: do not bypass
+ destination port check */)
+ : 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF0, is_l4 ?
+ (0x11 /* UDP */
+ | (1<<15) /* VF not compared */
+ | (1<<27) /* Enable Timestamping */
+ | (7<<28) /* only source port filter enabled,
+ source/target address and protocol
+ masked */)
+ : ((1<<15) | (15<<28) /* all mask bits set = filter not
+ enabled */));
+
+ wrfl();
+
+ adapter->hwtstamp_ctrl = config;
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
+ regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
- device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
-
- return (0);
+ return (error);
}
/*
- * Disable the Time Sync Feature
- */
-static void
-ixgbe_tsync_disable(struct adapter *adapter)
+** ixgbe_read_clock - read raw cycle counter (to be used by time counter)
+*/
+static cycle_t ixgbe_read_clock(const struct cyclecounter *tc)
{
- u32 tx_ctl, rx_ctl;
-
- tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
- tx_ctl &= ~0x10;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCTXCTL, tx_ctl);
- IXGBE_WRITE_FLUSH(&adapter->hw);
-
- /* Invalidate TX Timestamp */
- IXGBE_READ_REG(&adapter->hw, IXGBE_TXSTMPH);
-
- tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
- if (tx_ctl & 0x10)
- HW_DEBUGOUT("Failed to disable TX timestamping\n");
-
- rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
- rx_ctl &= ~0x10;
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCRXCTL, rx_ctl);
- IXGBE_WRITE_FLUSH(&adapter->hw);
-
- /* Invalidate RX Timestamp */
- IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRH);
-
- rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
- if (rx_ctl & 0x10)
- HW_DEBUGOUT("Failed to disable RX timestamping\n");
-
- return;
+ struct adapter *adapter =
+ container_of(tc, struct igb_adapter, cycles);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 stamp;
+
+ stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32ULL;
+
+ return (stamp);
}
-#endif /* IXGBE_TIMESYNC */
+#endif /* IXGBE_IEEE1588 */
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index e6bebd1..848820a 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -39,6 +39,9 @@
#include <sys/param.h>
#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
+#include <sys/buf_ring.h>
+#endif
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
@@ -83,10 +86,11 @@
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
-#ifdef IXGBE_TIMESYNC
-#include <sys/ioccom.h>
-#include <sys/time.h>
+#ifdef IXGBE_IEEE1588
+#include <sys/ieee1588.h>
#endif
#include "ixgbe_api.h"
@@ -171,11 +175,10 @@
#define IXGBE_TSO_SIZE 65535
#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
#define IXGBE_RX_HDR 128
+#define IXGBE_VFTA_SIZE 128
+#define IXGBE_BR_SIZE 4096
#define CSUM_OFFLOAD 7 /* Bits in csum flags */
-/* The number of MSIX messages the 82598 supports */
-#define IXGBE_MSGS 18
-
/* For 6.X code compatibility */
#if !defined(ETHER_BPF_MTAP)
#define ETHER_BPF_MTAP BPF_MTAP
@@ -199,9 +202,6 @@
#define IXGBE_CLEAN_PKT 2
#define IXGBE_CLEAN_ALL 3
-/* Used for auto RX queue configuration */
-extern int mp_ncpus;
-
/*
*****************************************************************************
* vendor_info_array
@@ -253,7 +253,6 @@ struct tx_ring {
struct mtx tx_mtx;
u32 me;
u32 msix;
- u32 eims;
u32 watchdog_timer;
union ixgbe_adv_tx_desc *tx_base;
volatile u32 tx_hwb;
@@ -267,6 +266,13 @@ struct tx_ring {
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
+#if __FreeBSD_version >= 800000
+ struct buf_ring *br;
+#endif
+ /* Interrupt resources */
+ void *tag;
+ struct resource *res;
+
/* Soft Stats */
u32 no_tx_desc_avail;
u32 no_tx_desc_late;
@@ -283,13 +289,14 @@ struct rx_ring {
struct mtx rx_mtx;
u32 me;
u32 msix;
- u32 eims;
u32 payload;
struct task rx_task;
struct taskqueue *tq;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
+ bool lro_enabled;
+ bool hdr_split;
unsigned int last_cleaned;
unsigned int next_to_check;
struct ixgbe_rx_buf *rx_buffers;
@@ -302,6 +309,10 @@ struct rx_ring {
u32 bytes; /* Used for AIM calc */
u32 eitr_setting;
+ /* Interrupt resources */
+ void *tag;
+ struct resource *res;
+
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
@@ -321,11 +332,12 @@ struct adapter {
struct resource *msix_mem;
/*
- * Interrupt resources:
+ * Interrupt resources: this set is
+ * either used for legacy, or for Link
+ * when doing MSIX
*/
- void *tag[IXGBE_MSGS];
- struct resource *res[IXGBE_MSGS];
- int rid[IXGBE_MSGS];
+ void *tag;
+ struct resource *res;
struct ifmedia media;
struct callout timer;
@@ -334,6 +346,12 @@ struct adapter {
struct mtx core_mtx;
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
+
+ u32 num_vlans;
+ u16 num_queues;
+
/* Info about the board itself */
u32 part_num;
u32 optics;
@@ -363,7 +381,6 @@ struct adapter {
*/
struct tx_ring *tx_rings;
int num_tx_desc;
- int num_tx_queues;
/*
* Receive rings:
@@ -371,14 +388,15 @@ struct adapter {
*/
struct rx_ring *rx_rings;
int num_rx_desc;
- int num_rx_queues;
- u32 rx_mask;
+ u64 rx_mask;
u32 rx_process_limit;
-#ifdef IXGBE_TIMESYNC
- u64 last_stamp;
- u64 last_sec;
- u32 last_ns;
+#ifdef IXGBE_IEEE1588
+ /* IEEE 1588 precision time support */
+ struct cyclecounter cycles;
+ struct nettimer clock;
+ struct nettime_compare compare;
+ struct hwtstamp_ctrl hwtstamp;
#endif
/* Misc stats maintained by the driver */
@@ -395,31 +413,12 @@ struct adapter {
struct ixgbe_hw_stats stats;
};
-#ifdef IXGBE_TIMESYNC
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
#define IXGBE_ADVTXD_TSTAMP 0x00080000
-/* TIMESYNC IOCTL defines */
-#define IXGBE_TIMESYNC_READTS _IOWR('i', 127, struct ixgbe_tsync_read)
-#define IXGBE_TIMESTAMP 5 /* A unique return value */
-
-/* Used in the READTS IOCTL */
-struct ixgbe_tsync_read {
- int read_current_time;
- struct timespec system_time;
- u64 network_time;
- u64 rx_stamp;
- u64 tx_stamp;
- u16 seqid;
- unsigned char srcid[6];
- int rx_valid;
- int tx_valid;
-};
-
-#endif /* IXGBE_TIMESYNC */
#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index fd90d16..fb711c6 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -69,6 +69,8 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+
/**
* ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
@@ -119,6 +121,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
&ixgbe_get_supported_physical_layer_82598;
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
@@ -416,7 +419,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
}
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
else
@@ -675,6 +678,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
+ s32 phy_status = IXGBE_SUCCESS;
u32 ctrl;
u32 gheccr;
u32 i;
@@ -722,13 +726,16 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
/* Init PHY and function pointers, perform SFP setup */
- status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ phy_status = hw->phy.ops.init(hw);
+ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto reset_hw_out;
+ else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto no_phy_reset;
hw->phy.ops.reset(hw);
}
+no_phy_reset:
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
@@ -777,16 +784,20 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
} else if (autoc != hw->mac.orig_autoc)
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
/*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table
*/
hw->mac.ops.init_rx_addrs(hw);
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
reset_hw_out:
+ if (phy_status != IXGBE_SUCCESS)
+ status = phy_status;
return status;
}
@@ -1103,3 +1114,36 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
out:
return physical_layer;
}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ * port devices.
+ * @hw: pointer to the HW structure
+ *
+ * Calls common function and corrects issue with some single port devices
+ * that enable LAN1 but not LAN0.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u16 pci_gen, pci_ctrl2;
+
+ ixgbe_set_lan_id_multi_port_pcie(hw);
+
+ /* check if LAN0 is disabled */
+ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+ /* if LAN0 is completely disabled force function to 0 */
+ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+ bus->func = 0;
+ }
+ }
+}
+
+
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 4f438a2..c3d23af 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -82,6 +82,8 @@ s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps);
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -174,16 +176,27 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
if (ret_val != IXGBE_SUCCESS)
goto setup_sfp_out;
+ /* PHY config will finish before releasing the semaphore */
+ ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
while (data_value != 0xffff) {
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
IXGBE_WRITE_FLUSH(hw);
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
}
- /* Now restart DSP */
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102);
- IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d);
- IXGBE_WRITE_FLUSH(hw);
+ /* Now restart DSP by setting Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
+
+ /* Release the semaphore */
+ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access */
+ msec_delay(hw->eeprom.semaphore_delay);
}
setup_sfp_out:
@@ -270,6 +283,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->max_rx_queues = 128;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
+
return ret_val;
}
@@ -383,6 +397,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
@@ -414,6 +429,7 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
u32 i;
s32 status = IXGBE_SUCCESS;
+
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
@@ -485,6 +501,7 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
bool link_up = FALSE;
bool negotiation;
@@ -495,6 +512,25 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
speed &= link_speed;
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ /*
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to TRUE to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with TRUE clause 37 autoneg, which also
+ * involves a loss of signal.
+ */
+
/*
* Try each speed one by one, highest priority first. We do this in
* software because 10gb fiber doesn't support speed autonegotiation.
@@ -511,7 +547,7 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
goto out;
- /* Set hardware SDP's */
+ /* Set the module link speed */
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -524,15 +560,35 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
if (status != IXGBE_SUCCESS)
goto out;
- msec_delay(100);
+ /* Flap the tx laser if it has not already been done */
+ if (hw->mac.autotry_restart) {
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ usec_delay(100);
- /* If we have link, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- goto out;
+ /* Enable tx laser; allow 2ms to light up per spec */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ msec_delay(2);
- if (link_up)
- goto out;
+ hw->mac.autotry_restart = FALSE;
+ }
+
+ /* The controller may take up to 500ms at 10g to acquire link */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, FALSE);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
}
if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
@@ -548,7 +604,7 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
goto out;
- /* Set hardware SDP's */
+ /* Set the module link speed */
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -562,6 +618,22 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
if (status != IXGBE_SUCCESS)
goto out;
+ /* Flap the tx laser if it has not already been done */
+ if (hw->mac.autotry_restart) {
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ usec_delay(100);
+
+ /* Enable tx laser; allow 2ms to light up per spec */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ msec_delay(2);
+
+ hw->mac.autotry_restart = FALSE;
+ }
+
+ /* Wait for the link partner to also set speed */
msec_delay(100);
/* If we have link, just jump out */
@@ -654,6 +726,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
s32 status = IXGBE_SUCCESS;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 start_autoc = autoc;
u32 orig_autoc = 0;
u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -669,16 +742,18 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
speed &= link_capabilities;
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ }
+
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
orig_autoc = hw->mac.orig_autoc;
else
orig_autoc = autoc;
-
- if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
- status = IXGBE_ERR_LINK_SETUP;
- } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
@@ -712,7 +787,8 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
}
}
- if (status == IXGBE_SUCCESS) {
+ if (autoc != start_autoc) {
+
/* Restart link */
autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
@@ -893,6 +969,9 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
}
}
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
/*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
@@ -901,8 +980,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
@@ -1076,13 +1154,14 @@ s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
u32 bitindex;
u32 bits;
u32 first_empty_slot;
+ u32 vt;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
/*
* this is a 2 part operation - first the VFTA, then the
- * VLVF and VLVFB if vind is set
+ * VLVF and VLVFB if VT Mode is set
*/
/* Part 1
@@ -1102,14 +1181,15 @@ s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Part 2
- * If the vind is set
+ * If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- if (vind) {
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
/* find the vlanid or the first empty slot */
first_empty_slot = 0;
@@ -1134,46 +1214,49 @@ s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
- bits =
- IXGBE_READ_REG(hw, IXGBE_VLVFB(regindex*2));
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(regindex*2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex*2), bits);
+ IXGBE_VLVFB(regindex*2),
+ bits);
} else {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex*2)+1));
+ IXGBE_VLVFB((regindex*2)+1));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex*2)+1), bits);
+ IXGBE_VLVFB((regindex*2)+1),
+ bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex*2));
- bits &= ~(1 << vind);
+ IXGBE_VLVFB(regindex*2));
+ bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex*2), bits);
+ IXGBE_VLVFB(regindex*2),
+ bits);
bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex*2)+1));
+ IXGBE_VLVFB((regindex*2)+1));
} else {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex*2)+1));
+ IXGBE_VLVFB((regindex*2)+1));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex*2)+1), bits);
+ IXGBE_VLVFB((regindex*2)+1),
+ bits);
bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex*2));
+ IXGBE_VLVFB(regindex*2));
}
}
if (bits)
IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
- (IXGBE_VLVF_VIEN | vlan));
+ (IXGBE_VLVF_VIEN | vlan));
else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
}
-
out:
return IXGBE_SUCCESS;
}
@@ -1221,16 +1304,75 @@ s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
**/
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
+ int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRCMD.CMD must be zero.
+ */
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ usec_delay(10);
+ }
+ if (i >= IXGBE_FDIRCMD_CMD_POLL) {
+ DEBUGOUT("Flow Director previous command isn't complete, "
+ "aborting table re-initialization. \n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
IXGBE_WRITE_FLUSH(hw);
+ /*
+ * 82599 adapters flow director init flow cannot be restarted,
+ * Workaround 82599 silicon errata by performing the following steps
+ * before re-writing the FDIRCTRL control register with the same value.
+ * - write 1 to bit 8 of FDIRCMD register &
+ * - write 0 to bit 8 of FDIRCMD register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+ IXGBE_WRITE_FLUSH(hw);
+
IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll init-done after we write FDIRCTRL register */
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ usec_delay(10);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
return IXGBE_SUCCESS;
}
-#define IXGBE_FDIR_INIT_DONE_POLL 10
/**
* ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
* @hw: pointer to hardware structure
@@ -1456,6 +1598,7 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
*/
union {
+ u64 fill[6];
u32 key[11];
u8 key_stream[44];
} tmp_key;
@@ -1464,23 +1607,30 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
u8 int_key[44]; /* upper-most bit unused */
u8 hash_str[46]; /* upper-most 2 bits unused */
u16 hash_result = 0;
- u16 tmp = 0;
int i, j, k, h;
- memset(&tmp_key, 0, sizeof(tmp_key));
+ /*
+ * Initialize the fill member to prevent warnings
+ * on some compilers
+ */
+ tmp_key.fill[0] = 0;
+
/* First load the temporary key stream */
- for (i = 0; i < 11; i++)
- tmp_key.key[i] = key;
+ for (i = 0; i < 6; i++) {
+ u64 fillkey = ((u64)key << 32) | key;
+ tmp_key.fill[i] = fillkey;
+ }
/*
* Set the interim key for the hashing. Bit 352 is unused, so we must
* shift and compensate when building the key.
*/
+
int_key[0] = tmp_key.key_stream[0] >> 1;
for (i = 1, j = 0; i < 44; i++) {
- int_key[i] = (tmp_key.key_stream[j] & 0x1) << 7;
+ unsigned int this_key = tmp_key.key_stream[j] << 7;
j++;
- int_key[i] |= tmp_key.key_stream[j] >> 1;
+ int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
}
/*
@@ -1489,11 +1639,11 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
*/
hash_str[0] = (stream[40] & 0x7f) >> 1;
for (i = 1, j = 40; i < 46; i++) {
- hash_str[i] = (stream[j] & 0x1) << 7;
+ unsigned int this_str = stream[j] << 7;
j++;
if (j > 41)
j = 0;
- hash_str[i] |= stream[j] >> 1;
+ hash_str[i] = (u8)(this_str | (stream[j] >> 1));
}
/*
@@ -1503,7 +1653,7 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
*/
for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
for (h = 0; h < 8 && k < 351; h++, k++) {
- if ((int_key[j] >> h) & 0x1) {
+ if (int_key[j] & (1 << h)) {
/*
* Key bit is set, XOR in the current 16-bit
* string. Example of processing:
@@ -1518,11 +1668,11 @@ u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key)
* (hash_str[i - 1] & 0xff << 1) |
* (hash_str[i] & 0x80 >> 7)
*/
- tmp = ((hash_str[i] & (0xff << h)) >> h);
- tmp |= ((hash_str[i - 1] & 0xff) << (8 - h));
- tmp |= (hash_str[i - 2] & (0xff >> (8 - h)))
- << (16 - h);
- hash_result ^= tmp;
+ int tmp = (hash_str[i] >> h);
+ tmp |= (hash_str[i - 1] << (8 - h));
+ tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
+ << (16 - h);
+ hash_result ^= (u16)tmp;
}
}
}
@@ -2139,6 +2289,11 @@ s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = TRUE;
+
+ if (ret_val == IXGBE_SUCCESS)
+ ret_val = ixgbe_verify_fw_version_82599(hw);
return ret_val;
}
@@ -2220,6 +2375,8 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
goto out;
break;
case IXGBE_AUTOC_LMS_10G_SERIAL:
@@ -2442,3 +2599,51 @@ s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
san_mac_addr_out:
return status;
}
+
+/**
+ * ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ * @hw: pointer to hardware structure
+ *
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ * if the FW version is not supported.
+ **/
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
+ u16 fw_version = 0;
+
+ /* firmware check is only necessary for SFI devices */
+ if (hw->phy.media_type != ixgbe_media_type_fiber) {
+ status = IXGBE_SUCCESS;
+ goto fw_version_out;
+ }
+
+ /* get the offset to the Firmware Module block */
+ hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset);
+
+ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the firmware version */
+ hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+ IXGBE_FW_PATCH_VERSION_4),
+ &fw_version);
+
+ if (fw_version > 0x5)
+ status = IXGBE_SUCCESS;
+
+fw_version_out:
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index a3e7b35..9c3356f 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -103,6 +103,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_82598EB;
break;
case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_CX4:
hw->mac.type = ixgbe_mac_82599EB;
@@ -935,3 +936,32 @@ s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
(hw, regval), IXGBE_NOT_IMPLEMENTED);
}
+
+/**
+ * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+ (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+ if (hw->mac.ops.release_swfw_sync)
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index ba987ed..654f22f 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -162,5 +162,8 @@ s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+
#endif /* _IXGBE_API_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 030605c..30c372d 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -87,6 +87,8 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
+ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
+ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
/* LEDs */
mac->ops.led_on = &ixgbe_led_on_generic;
@@ -792,13 +794,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM;
- u32 timeout;
+ u32 timeout = 2000;
u32 i;
u32 swsm;
- /* Set timeout value based on size of EEPROM */
- timeout = hw->eeprom.word_size + 1;
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -1078,6 +1077,9 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ /* Delay before attempt to obtain semaphore again to allow FW access */
+ msec_delay(hw->eeprom.semaphore_delay);
}
/**
@@ -1633,6 +1635,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
s32 ret_val = IXGBE_SUCCESS;
u32 mflcn_reg, fccfg_reg;
u32 reg;
+ u32 rx_pba_size;
DEBUGFUNC("ixgbe_fc_enable_generic");
@@ -1699,22 +1702,41 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- /* Set up and enable Rx high/low water mark thresholds, enable XON. */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
- }
+ reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
+ /* Thresholds are different for link flow control when in DCB mode */
+ if (reg & IXGBE_MTQC_RT_ENA) {
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+
+ /* Always disable XON for LFC when in DCB mode */
+ reg = (rx_pba_size >> 5) & 0xFFE0;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+
+ reg = (rx_pba_size >> 2) & 0xFFE0;
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause)
+ reg |= IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
+ } else {
+ /* Set up and enable Rx high/low water mark thresholds,
+ * enable XON. */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ if (hw->fc.send_xon) {
+ IXGBE_WRITE_REG(hw,
+ IXGBE_FCRTL_82599(packetbuf_num),
+ (hw->fc.low_water |
+ IXGBE_FCRTL_XONE));
+ } else {
+ IXGBE_WRITE_REG(hw,
+ IXGBE_FCRTL_82599(packetbuf_num),
+ hw->fc.low_water);
+ }
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
+ (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ }
}
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
else
@@ -2043,9 +2065,6 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
ixgbe_release_eeprom_semaphore(hw);
-
- /* Delay before attempt to obtain semaphore again to allow FW access */
- msec_delay(hw->eeprom.semaphore_delay);
}
/**
@@ -2081,6 +2100,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
if (!link_up) {
+
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
msec_delay(10);
@@ -2104,6 +2125,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index 53b64f5..e1bbce0 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -83,6 +83,8 @@
#define true 1
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
+#define UNREFERENCED_PARAMETER(_p)
+
#define IXGBE_HTONL htonl
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index afd30ee..daa37d9 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -444,7 +444,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
**/
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_NOT_IMPLEMENTED;
+ s32 status = IXGBE_SUCCESS;
u32 time_out;
u32 max_time_out = 10;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
@@ -485,7 +485,6 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
- status = IXGBE_SUCCESS;
break;
}
}
@@ -760,9 +759,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_1g = 0;
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
- u8 transmission_media = 0;
+ u8 cable_tech = 0;
u16 enforce_sfp = 0;
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -779,13 +784,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* LAN ID is needed for sfp_type determination */
hw->mac.ops.set_lan_id(hw);
- if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
- &transmission_media);
+ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
/* ID Module
* =========
@@ -798,7 +806,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 6 SFP_SR/LR_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
- if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
hw->phy.sfp_type = ixgbe_sfp_type_sr;
@@ -807,7 +815,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
@@ -836,6 +844,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = TRUE;
/* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = FALSE;
if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
(comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
@@ -861,8 +870,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
- if (transmission_media &
- IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
@@ -875,8 +883,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_intel;
break;
default:
- if (transmission_media &
- IXGBE_SFF_TWIN_AX_CAPABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
@@ -884,16 +891,21 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
}
+ /* All passive DA cables are supported */
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ status = IXGBE_SUCCESS;
+ goto out;
+ }
+
+ /* 1G SFP modules are not supported */
if (comp_codes_10g == 0) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
}
- if (hw->mac.type == ixgbe_mac_82598EB ||
- (hw->phy.sfp_type != ixgbe_sfp_type_sr &&
- hw->phy.sfp_type != ixgbe_sfp_type_lr &&
- hw->phy.sfp_type != ixgbe_sfp_type_srlr_core0 &&
- hw->phy.sfp_type != ixgbe_sfp_type_srlr_core1)) {
+
+ /* Anything else 82598-based is supported */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
status = IXGBE_SUCCESS;
goto out;
}
@@ -1031,7 +1043,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
s32 status = IXGBE_SUCCESS;
- u32 max_retry = 1;
+ u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
bool nack = 1;
@@ -1043,12 +1055,13 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
- status = IXGBE_ERR_SWFW_SYNC;
- goto read_byte_out;
- }
do {
+ if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
ixgbe_i2c_start(hw);
/* Device Address and write indication */
@@ -1091,6 +1104,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 047d31d..bfe73de 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -46,10 +46,10 @@
#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
/* Bitmasks */
-#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 595a863..50345f4 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -37,6 +37,7 @@
#include "ixgbe_osdep.h"
+
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -55,6 +56,7 @@
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -280,6 +282,7 @@
#define IXGBE_DTXCTL 0x07E00
#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -290,6 +293,8 @@
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -711,14 +716,6 @@
#define IXGBE_PBACLR_82599 0x11068
#define IXGBE_CIAA_82599 0x11088
#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_PCIE_DIAG_0_82599 0x11090
-#define IXGBE_PCIE_DIAG_1_82599 0x11094
-#define IXGBE_PCIE_DIAG_2_82599 0x11098
-#define IXGBE_PCIE_DIAG_3_82599 0x1109C
-#define IXGBE_PCIE_DIAG_4_82599 0x110A0
-#define IXGBE_PCIE_DIAG_5_82599 0x110A4
-#define IXGBE_PCIE_DIAG_6_82599 0x110A8
-#define IXGBE_PCIE_DIAG_7_82599 0x110C0
#define IXGBE_INTRPT_CSR_82599 0x110B0
#define IXGBE_INTRPT_MASK_82599 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -785,10 +782,20 @@
#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
#define IXGBE_PCIEECCCTL0 0x11100
#define IXGBE_PCIEECCCTL1 0x11104
#define IXGBE_RXDBUECC 0x03F70
#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
#define IXGBE_PBTXECC 0x0C300
#define IXGBE_PBRXECC 0x03300
#define IXGBE_GHECCR 0x110B0
@@ -1108,6 +1115,8 @@
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1351,6 +1360,7 @@
/* VLAN pool filtering masks */
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1363,8 +1373,10 @@
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001
-#define IXGBE_ESDP_SDP1 0x00000002
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
@@ -1537,6 +1549,7 @@
#define IXGBE_PBANUM1_PTR 0x16
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -1580,10 +1593,17 @@
#define IXGBE_EERD_ATTEMPTS 100000
#endif
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
/* PCI Bus Info */
#define IXGBE_PCI_LINK_STATUS 0xB2
@@ -1636,6 +1656,7 @@
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
/* Enable short packet padding to 64 bytes */
#define IXGBE_TX_PAD_ENABLE 0x00000400
#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
@@ -1782,6 +1803,7 @@
#define IXGBE_PSRTYPE_UDPHDR 0x00000020
#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
/* SRRCTL bit definitions */
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
@@ -1875,6 +1897,12 @@
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
/* Little Endian defines */
#ifndef __le16
#define __le16 u16
@@ -1947,6 +1975,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
@@ -1957,6 +1986,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
#define IXGBE_FDIRCMD_DROP 0x00000200
#define IXGBE_FDIRCMD_INT 0x00000400
#define IXGBE_FDIRCMD_LAST 0x00000800
@@ -1964,6 +1994,8 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
@@ -2134,6 +2166,7 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@@ -2421,6 +2454,8 @@ struct ixgbe_mac_operations {
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u16);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *);
@@ -2504,6 +2539,7 @@ struct ixgbe_mac_info {
bool orig_link_settings_stored;
bool autoneg;
bool autoneg_succeeded;
+ bool autotry_restart;
};
struct ixgbe_phy_info {
@@ -2541,6 +2577,7 @@ struct ixgbe_hw {
#define ixgbe_call_func(hw, func, params, error) \
(func != NULL) ? func params : error
+
/* Error Codes */
#define IXGBE_SUCCESS 0
#define IXGBE_ERR_EEPROM -1
@@ -2565,8 +2602,9 @@ struct ixgbe_hw {
#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-#define UNREFERENCED_PARAMETER(_p)
#endif /* _IXGBE_TYPE_H_ */
OpenPOWER on IntegriCloud