summaryrefslogtreecommitdiffstats
path: root/sys/net/iflib.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/net/iflib.c')
-rw-r--r--sys/net/iflib.c541
1 files changed, 374 insertions, 167 deletions
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index c11da16..0d54f2c 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/taskqueue.h>
+#include <sys/limits.h>
#include <net/if.h>
@@ -101,7 +102,6 @@ __FBSDID("$FreeBSD$");
* Enable mbuf vectors for compressing long mbuf chains
*/
-
/*
* NB:
* - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead
@@ -181,8 +181,10 @@ struct iflib_ctx {
struct sysctl_oid *ifc_sysctl_node;
uint16_t ifc_sysctl_ntxqs;
uint16_t ifc_sysctl_nrxqs;
- uint16_t ifc_sysctl_ntxds;
- uint16_t ifc_sysctl_nrxds;
+ uint16_t ifc_sysctl_qs_eq_override;
+
+ uint16_t ifc_sysctl_ntxds[8];
+ uint16_t ifc_sysctl_nrxds[8];
struct if_txrx ifc_txrx;
#define isc_txd_encap ifc_txrx.ift_txd_encap
#define isc_txd_flush ifc_txrx.ift_txd_flush
@@ -294,10 +296,11 @@ typedef struct iflib_sw_tx_desc_array {
#define IFLIB_RESTART_BUDGET 8
-#define IFC_LEGACY 0x1
-#define IFC_QFLUSH 0x2
-#define IFC_MULTISEG 0x4
-#define IFC_DMAR 0x8
+#define IFC_LEGACY 0x01
+#define IFC_QFLUSH 0x02
+#define IFC_MULTISEG 0x04
+#define IFC_DMAR 0x08
+#define IFC_SC_ALLOCATED 0x10
#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
@@ -311,6 +314,7 @@ struct iflib_txq {
uint8_t ift_db_pending;
uint8_t ift_db_pending_queued;
uint8_t ift_npending;
+ uint8_t ift_br_offset;
/* implicit pad */
uint64_t ift_processed;
uint64_t ift_cleaned;
@@ -414,6 +418,7 @@ struct iflib_rxq {
uint16_t ifr_cq_cidx;
uint16_t ifr_cq_pidx;
uint8_t ifr_cq_gen;
+ uint8_t ifr_fl_offset;
if_ctx_t ifr_ctx;
iflib_fl_t ifr_fl;
@@ -604,7 +609,7 @@ static void iflib_tx_structures_free(if_ctx_t ctx);
static void iflib_rx_structures_free(if_ctx_t ctx);
static int iflib_queues_alloc(if_ctx_t ctx);
static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
-static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx);
+static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget);
static int iflib_qset_structures_setup(if_ctx_t ctx);
static int iflib_msix_init(if_ctx_t ctx);
static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str);
@@ -875,7 +880,7 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) {
nic_i = fl->ifl_cidx;
nm_i = netmap_idx_n2k(kring, nic_i);
- avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i);
+ avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i, INT_MAX);
for (n = 0; avail > 0; n++, avail--) {
error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri);
if (error)
@@ -930,7 +935,7 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
/*
* XXX we should be batching this operation - TODO
*/
- ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1);
+ ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1, fl->ifl_buf_size);
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map,
BUS_DMASYNC_PREREAD);
nm_i = nm_next(nm_i, lim);
@@ -958,6 +963,7 @@ static int
iflib_netmap_attach(if_ctx_t ctx)
{
struct netmap_adapter na;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
bzero(&na, sizeof(na));
@@ -966,8 +972,8 @@ iflib_netmap_attach(if_ctx_t ctx)
MPASS(ctx->ifc_softc_ctx.isc_ntxqsets);
MPASS(ctx->ifc_softc_ctx.isc_nrxqsets);
- na.num_tx_desc = ctx->ifc_sctx->isc_ntxd;
- na.num_rx_desc = ctx->ifc_sctx->isc_ntxd;
+ na.num_tx_desc = scctx->isc_ntxd[0];
+ na.num_rx_desc = scctx->isc_nrxd[0];
na.nm_txsync = iflib_netmap_txsync;
na.nm_rxsync = iflib_netmap_rxsync;
na.nm_register = iflib_netmap_register;
@@ -986,7 +992,7 @@ iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
if (slot == 0)
return;
- for (int i = 0; i < ctx->ifc_sctx->isc_ntxd; i++) {
+ for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) {
/*
* In netmap mode, set the map for the packet buffer.
@@ -1011,7 +1017,7 @@ iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
if (slot == 0)
return;
sd = rxq->ifr_fl[0].ifl_sds;
- nrxd = ctx->ifc_sctx->isc_nrxd;
+ nrxd = ctx->ifc_softc_ctx.isc_nrxd[0];
for (int i = 0; i < nrxd; i++, sd++) {
int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i);
uint64_t paddr;
@@ -1021,7 +1027,7 @@ iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
vaddr = addr = PNMB(na, slot + sj, &paddr);
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, sd->ifsd_map, addr);
/* Update descriptor and the cached value */
- ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1);
+ ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1, rxq->ifr_fl[0].ifl_buf_size);
}
/* preserve queue */
if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) {
@@ -1236,7 +1242,8 @@ iflib_txsd_alloc(iflib_txq_t txq)
nsegments = scctx->isc_tx_nsegments;
ntsosegments = scctx->isc_tx_tso_segments_max;
- MPASS(sctx->isc_ntxd > 0);
+ MPASS(scctx->isc_ntxd[0] > 0);
+ MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
MPASS(nsegments > 0);
MPASS(ntsosegments > 0);
/*
@@ -1259,13 +1266,11 @@ iflib_txsd_alloc(iflib_txq_t txq)
sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize);
goto fail;
}
-#ifdef INVARIANTS
+#ifdef IFLIB_DIAGNOSTICS
device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n",
sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize);
+
#endif
- device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n",
- scctx->isc_tx_tso_size_max, ntsosegments,
- scctx->isc_tx_tso_segsize_max);
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@@ -1282,21 +1287,21 @@ iflib_txsd_alloc(iflib_txq_t txq)
goto fail;
}
-#ifdef INVARIANTS
+#ifdef IFLIB_DIAGNOSTICS
device_printf(dev,"TSO maxsize: %d ntsosegments: %d maxsegsize: %d\n",
scctx->isc_tx_tso_size_max, ntsosegments,
scctx->isc_tx_tso_segsize_max);
#endif
if (!(txq->ift_sds.ifsd_flags =
(uint8_t *) malloc(sizeof(uint8_t) *
- sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(txq->ift_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
- sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
@@ -1308,13 +1313,13 @@ iflib_txsd_alloc(iflib_txq_t txq)
return (0);
if (!(txq->ift_sds.ifsd_map =
- (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * sctx->isc_ntxd, M_IFLIB, M_NOWAIT | M_ZERO))) {
+ (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
- for (int i = 0; i < sctx->isc_ntxd; i++) {
+ for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
@@ -1348,9 +1353,8 @@ static void
iflib_txq_destroy(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
- if_shared_ctx_t sctx = ctx->ifc_sctx;
- for (int i = 0; i < sctx->isc_ntxd; i++)
+ for (int i = 0; i < txq->ift_size; i++)
iflib_txsd_destroy(ctx, txq, i);
if (txq->ift_sds.ifsd_map != NULL) {
free(txq->ift_sds.ifsd_map, M_IFLIB);
@@ -1390,7 +1394,7 @@ iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
bus_dmamap_unload(txq->ift_desc_tag,
txq->ift_sds.ifsd_map[i]);
}
- m_freem(*mp);
+ m_free(*mp);
DBG_COUNTER_INC(tx_frees);
*mp = NULL;
}
@@ -1399,7 +1403,7 @@ static int
iflib_txq_setup(iflib_txq_t txq)
{
if_ctx_t ctx = txq->ift_ctx;
- if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
iflib_dma_info_t di;
int i;
@@ -1408,7 +1412,7 @@ iflib_txq_setup(iflib_txq_t txq)
/* Reset indices */
txq->ift_cidx_processed = txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
- txq->ift_size = sctx->isc_ntxd;
+ txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
bzero((void *)di->idi_vaddr, di->idi_size);
@@ -1433,22 +1437,25 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
iflib_fl_t fl;
iflib_rxsd_t rxsd;
int err;
- MPASS(sctx->isc_nrxd > 0);
+ MPASS(scctx->isc_nrxd[0] > 0);
+ MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0);
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_sds = malloc(sizeof(struct iflib_sw_rx_desc) *
- sctx->isc_nrxd, M_IFLIB, M_WAITOK | M_ZERO);
+ scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB,
+ M_WAITOK | M_ZERO);
if (fl->ifl_sds == NULL) {
device_printf(dev, "Unable to allocate rx sw desc memory\n");
return (ENOMEM);
}
- fl->ifl_size = sctx->isc_nrxd; /* this isn't necessarily the same */
+ fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@@ -1468,7 +1475,7 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
}
rxsd = fl->ifl_sds;
- for (int i = 0; i < sctx->isc_nrxd; i++, rxsd++) {
+ for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++, rxsd++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &rxsd->ifsd_map);
if (err) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
@@ -1626,7 +1633,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
}
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx,
- fl->ifl_bus_addrs, fl->ifl_vm_addrs, i);
+ fl->ifl_bus_addrs, fl->ifl_vm_addrs, i, fl->ifl_buf_size);
i = 0;
pidx = fl->ifl_pidx;
}
@@ -1854,7 +1861,11 @@ iflib_init_locked(if_ctx_t ctx)
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
iflib_netmap_rxq_init(ctx, rxq);
}
+#ifdef INVARIANTS
+ i = if_getdrvflags(ifp);
+#endif
IFDI_INIT(ctx);
+ MPASS(if_getdrvflags(ifp) == i);
for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) {
for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) {
if (iflib_fl_setup(fl)) {
@@ -1902,7 +1913,6 @@ iflib_stop(if_ctx_t ctx)
iflib_txq_t txq = ctx->ifc_txqs;
iflib_rxq_t rxq = ctx->ifc_rxqs;
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
- if_shared_ctx_t sctx = ctx->ifc_sctx;
iflib_dma_info_t di;
iflib_fl_t fl;
int i, j;
@@ -1920,7 +1930,7 @@ iflib_stop(if_ctx_t ctx)
/* clean any enqueued buffers */
iflib_txq_check_drain(txq, 0);
/* Free any existing tx buffers. */
- for (j = 0; j < sctx->isc_ntxd; j++) {
+ for (j = 0; j < txq->ift_size; j++) {
iflib_txsd_free(ctx, txq, j);
}
txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
@@ -1990,13 +2000,24 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
caddr_t cl;
i = 0;
+ mh = NULL;
do {
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE);
MPASS(sd->ifsd_cl != NULL);
MPASS(sd->ifsd_m != NULL);
+
+ /* Don't include zero-length frags */
+ if (ri->iri_frags[i].irf_len == 0) {
+ /* XXX we can save the cluster here, but not the mbuf */
+ m_init(sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
+ m_free(sd->ifsd_m);
+ sd->ifsd_m = NULL;
+ continue;
+ }
+
m = sd->ifsd_m;
- if (i == 0) {
+ if (mh == NULL) {
flags = M_PKTHDR|M_EXT;
mh = mt = m;
padlen = ri->iri_pad;
@@ -2019,14 +2040,12 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
*/
m->m_data += padlen;
ri->iri_len -= padlen;
- m->m_len = ri->iri_len;
+ m->m_len = ri->iri_frags[i].irf_len;
} while (++i < ri->iri_nfrags);
return (mh);
}
-
-
/*
* Process one software descriptor
*/
@@ -2037,13 +2056,14 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
iflib_rxsd_t sd;
/* should I merge this back in now that the two paths are basically duplicated? */
- if (ri->iri_len <= IFLIB_RX_COPY_THRESH) {
+ if (ri->iri_nfrags == 1 &&
+ ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) {
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE);
m = sd->ifsd_m;
sd->ifsd_m = NULL;
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
memcpy(m->m_data, sd->ifsd_cl, ri->iri_len);
- m->m_len = ri->iri_len;
+ m->m_len = ri->iri_frags[0].irf_len;
} else {
m = assemble_segments(rxq, ri);
}
@@ -2063,13 +2083,13 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
{
if_ctx_t ctx = rxq->ifr_ctx;
if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
int avail, i;
uint16_t *cidxp;
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
struct ifnet *ifp;
- struct lro_entry *queued;
int lro_enabled;
/*
* XXX early demux data packets so that if_input processing only handles
@@ -2084,11 +2104,11 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
mh = mt = NULL;
MPASS(budget > 0);
rx_pkts = rx_bytes = 0;
- if (sctx->isc_flags & IFLIB_HAS_CQ)
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ)
cidxp = &rxq->ifr_cq_cidx;
else
cidxp = &rxq->ifr_fl[0].ifl_cidx;
- if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp)) == 0) {
+ if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) {
for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++)
__iflib_fl_refill_lt(ctx, fl, budget + 8);
DBG_COUNTER_INC(rx_unavail);
@@ -2112,10 +2132,11 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
/* in lieu of handling correctly - make sure it isn't being unhandled */
MPASS(err == 0);
- if (sctx->isc_flags & IFLIB_HAS_CQ) {
- /* we know we consumed _one_ CQ entry */
- if (++rxq->ifr_cq_cidx == sctx->isc_nrxd) {
- rxq->ifr_cq_cidx = 0;
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
+ *cidxp = ri.iri_cidx;
+ /* Update our consumer index */
+ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) {
+ rxq->ifr_cq_cidx -= scctx->isc_nrxd[0];
rxq->ifr_cq_gen = 0;
}
/* was this only a completion queue message? */
@@ -2128,7 +2149,7 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
/* will advance the cidx on the corresponding free lists */
m = iflib_rxd_pkt_get(rxq, &ri);
if (avail == 0 && budget_left)
- avail = iflib_rxd_avail(ctx, rxq, *cidxp);
+ avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left);
if (__predict_false(m == NULL)) {
DBG_COUNTER_INC(rx_mbuf_null);
@@ -2148,7 +2169,6 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
ifp = ctx->ifc_ifp;
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
-
while (mh != NULL) {
m = mh;
mh = mh->m_nextpkt;
@@ -2162,32 +2182,33 @@ iflib_rxeof(iflib_rxq_t rxq, int budget)
DBG_COUNTER_INC(rx_if_input);
ifp->if_input(ifp, m);
}
+
if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts);
/*
* Flush any outstanding LRO work
*/
- while ((queued = LIST_FIRST(&rxq->ifr_lc.lro_active)) != NULL) {
- LIST_REMOVE(queued, next);
#if defined(INET6) || defined(INET)
- tcp_lro_flush(&rxq->ifr_lc, queued);
+ tcp_lro_flush_all(&rxq->ifr_lc);
#endif
- }
- return (iflib_rxd_avail(ctx, rxq, *cidxp));
+ if (avail)
+ return true;
+ return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
}
#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags)
#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG)
-#define TXQ_MAX_DB_DEFERRED(ctx) (ctx->ifc_sctx->isc_ntxd >> 5)
-#define TXQ_MAX_DB_CONSUMED(ctx) (ctx->ifc_sctx->isc_ntxd >> 4)
+#define TXQ_MAX_DB_DEFERRED(size) (size >> 5)
+#define TXQ_MAX_DB_CONSUMED(size) (size >> 4)
static __inline void
iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring)
{
uint32_t dbval;
- if (ring || txq->ift_db_pending >= TXQ_MAX_DB_DEFERRED(ctx)) {
+ if (ring || txq->ift_db_pending >=
+ TXQ_MAX_DB_DEFERRED(txq->ift_size)) {
/* the lock will only ever be contended in the !min_latency case */
if (!TXDB_TRYLOCK(txq))
@@ -2233,9 +2254,9 @@ static int
iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
{
struct ether_vlan_header *eh;
- struct mbuf *m;
+ struct mbuf *m, *n;
- m = *mp;
+ n = m = *mp;
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present,
@@ -2261,7 +2282,6 @@ iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
{
struct ip *ip = NULL;
struct tcphdr *th = NULL;
- struct mbuf *n;
int minthlen;
minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th));
@@ -2403,37 +2423,31 @@ collapse_pkthdr(struct mbuf *m0)
/*
* If dodgy hardware rejects the scatter gather chain we've handed it
- * we'll need to rebuild the mbuf chain before we can call m_defrag
+ * we'll need to remove the mbuf chain from ifsg_m[] before we can add the
+ * m_defrag'd mbufs
*/
static __noinline struct mbuf *
-iflib_rebuild_mbuf(iflib_txq_t txq)
+iflib_remove_mbuf(iflib_txq_t txq)
{
-
- int ntxd, mhlen, len, i, pidx;
+ int ntxd, i, pidx;
struct mbuf *m, *mh, **ifsd_m;
- if_shared_ctx_t sctx;
pidx = txq->ift_pidx;
ifsd_m = txq->ift_sds.ifsd_m;
- sctx = txq->ift_ctx->ifc_sctx;
- ntxd = sctx->isc_ntxd;
+ ntxd = txq->ift_size;
mh = m = ifsd_m[pidx];
ifsd_m[pidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
- len = m->m_len;
- mhlen = m->m_pkthdr.len;
i = 1;
- while (len < mhlen && (m->m_next == NULL)) {
- m->m_next = ifsd_m[(pidx + i) & (ntxd-1)];
+ while (m) {
ifsd_m[(pidx + i) & (ntxd -1)] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
#endif
m = m->m_next;
- len += m->m_len;
i++;
}
return (mh);
@@ -2446,6 +2460,7 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
{
if_ctx_t ctx;
if_shared_ctx_t sctx;
+ if_softc_ctx_t scctx;
int i, next, pidx, mask, err, maxsegsz, ntxd, count;
struct mbuf *m, *tmp, **ifsd_m, **mp;
@@ -2459,8 +2474,9 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
ctx = txq->ift_ctx;
sctx = ctx->ifc_sctx;
+ scctx = &ctx->ifc_softc_ctx;
ifsd_m = txq->ift_sds.ifsd_m;
- ntxd = sctx->isc_ntxd;
+ ntxd = txq->ift_size;
pidx = txq->ift_pidx;
if (map != NULL) {
uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
@@ -2472,13 +2488,12 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
ifsd_flags[pidx] |= TX_SW_DESC_MAPPED;
i = 0;
next = pidx;
- mask = (sctx->isc_ntxd-1);
+ mask = (txq->ift_size-1);
m = *m0;
do {
mp = &ifsd_m[next];
*mp = m;
m = m->m_next;
- (*mp)->m_next = NULL;
if (__predict_false((*mp)->m_len == 0)) {
m_free(*mp);
*mp = NULL;
@@ -2529,13 +2544,12 @@ iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
count++;
tmp = m;
m = m->m_next;
- tmp->m_next = NULL;
} while (m != NULL);
*nsegs = i;
}
return (0);
err:
- *m0 = iflib_rebuild_mbuf(txq);
+ *m0 = iflib_remove_mbuf(txq);
return (EFBIG);
}
@@ -2558,7 +2572,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
sctx = ctx->ifc_sctx;
scctx = &ctx->ifc_softc_ctx;
segs = txq->ift_segs;
- ntxd = sctx->isc_ntxd;
+ ntxd = txq->ift_size;
m_head = *m_headp;
map = NULL;
@@ -2645,14 +2659,14 @@ defrag:
if (map != NULL)
bus_dmamap_unload(desc_tag, map);
DBG_COUNTER_INC(encap_txq_avail_fail);
- if (txq->ift_task.gt_task.ta_pending == 0)
+ if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
GROUPTASK_ENQUEUE(&txq->ift_task);
return (ENOBUFS);
}
pi.ipi_segs = segs;
pi.ipi_nsegs = nsegs;
- MPASS(pidx >= 0 && pidx < sctx->isc_ntxd);
+ MPASS(pidx >= 0 && pidx < txq->ift_size);
#ifdef PKT_DEBUG
print_pkt(&pi);
#endif
@@ -2661,11 +2675,12 @@ defrag:
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
DBG_COUNTER_INC(tx_encap);
- MPASS(pi.ipi_new_pidx >= 0 && pi.ipi_new_pidx < sctx->isc_ntxd);
+ MPASS(pi.ipi_new_pidx >= 0 &&
+ pi.ipi_new_pidx < txq->ift_size);
ndesc = pi.ipi_new_pidx - pi.ipi_pidx;
if (pi.ipi_new_pidx < pi.ipi_pidx) {
- ndesc += sctx->isc_ntxd;
+ ndesc += txq->ift_size;
txq->ift_gen = 1;
}
MPASS(pi.ipi_new_pidx != pidx);
@@ -2678,7 +2693,7 @@ defrag:
txq->ift_pidx = pi.ipi_new_pidx;
txq->ift_npending += pi.ipi_ndescs;
} else if (__predict_false(err == EFBIG && remap < 2)) {
- *m_headp = m_head = iflib_rebuild_mbuf(txq);
+ *m_headp = m_head = iflib_remove_mbuf(txq);
remap = 1;
txq->ift_txd_encap_efbig++;
goto defrag;
@@ -2700,7 +2715,7 @@ defrag_failed:
#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets)
#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets)
-#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NRXQSETS(ctx)) + FIRST_QSET(ctx))
+#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx))
#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments))
#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh)
#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max)
@@ -2712,7 +2727,7 @@ defrag_failed:
*
* ORing with 2 assures that min occupancy is never less than 2 without any conditional logic
*/
-#define TXQ_MIN_OCCUPANCY(ctx) ((ctx->ifc_sctx->isc_ntxd >> 6)| 0x2)
+#define TXQ_MIN_OCCUPANCY(size) ((size >> 6)| 0x2)
static inline int
iflib_txq_min_occupancy(iflib_txq_t txq)
@@ -2720,7 +2735,9 @@ iflib_txq_min_occupancy(iflib_txq_t txq)
if_ctx_t ctx;
ctx = txq->ift_ctx;
- return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen) < TXQ_MIN_OCCUPANCY(ctx) + MAX_TX_DESC(ctx));
+ return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx,
+ txq->ift_gen) < TXQ_MIN_OCCUPANCY(txq->ift_size) +
+ MAX_TX_DESC(ctx));
}
static void
@@ -2734,7 +2751,7 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
cidx = txq->ift_cidx;
gen = txq->ift_gen;
- qsize = txq->ift_ctx->ifc_sctx->isc_ntxd;
+ qsize = txq->ift_size;
mask = qsize-1;
hasmap = txq->ift_sds.ifsd_map != NULL;
ifsd_flags = txq->ift_sds.ifsd_flags;
@@ -2760,7 +2777,7 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
/* XXX we don't support any drivers that batch packets yet */
MPASS(m->m_nextpkt == NULL);
- m_freem(m);
+ m_free(m);
ifsd_m[cidx] = NULL;
#if MEMORY_LOGGING
txq->ift_dequeued++;
@@ -2856,7 +2873,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) {
DBG_COUNTER_INC(txq_drain_flushing);
for (i = 0; i < avail; i++) {
- m_freem(r->items[(cidx + i) & (r->size-1)]);
+ m_free(r->items[(cidx + i) & (r->size-1)]);
r->items[(cidx + i) & (r->size-1)] = NULL;
}
return (avail);
@@ -2903,7 +2920,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
break;
- if (desc_used > TXQ_MAX_DB_CONSUMED(ctx))
+ if (desc_used > TXQ_MAX_DB_CONSUMED(txq->ift_size))
break;
}
@@ -2924,7 +2941,7 @@ iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
}
static void
-_task_fn_tx(void *context, int pending)
+_task_fn_tx(void *context)
{
iflib_txq_t txq = context;
if_ctx_t ctx = txq->ift_ctx;
@@ -2935,11 +2952,12 @@ _task_fn_tx(void *context, int pending)
}
static void
-_task_fn_rx(void *context, int pending)
+_task_fn_rx(void *context)
{
iflib_rxq_t rxq = context;
if_ctx_t ctx = rxq->ifr_ctx;
bool more;
+ int rc;
DBG_COUNTER_INC(task_fn_rxs);
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
@@ -2950,7 +2968,8 @@ _task_fn_rx(void *context, int pending)
IFDI_INTR_ENABLE(ctx);
else {
DBG_COUNTER_INC(rx_intr_enables);
- IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ rc = IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id);
+ KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver"));
}
}
if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)))
@@ -2960,7 +2979,7 @@ _task_fn_rx(void *context, int pending)
}
static void
-_task_fn_admin(void *context, int pending)
+_task_fn_admin(void *context)
{
if_ctx_t ctx = context;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
@@ -2990,7 +3009,7 @@ _task_fn_admin(void *context, int pending)
static void
-_task_fn_iov(void *context, int pending)
+_task_fn_iov(void *context)
{
if_ctx_t ctx = context;
@@ -3049,8 +3068,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
if_ctx_t ctx = if_getsoftc(ifp);
iflib_txq_t txq;
- struct mbuf *marr[8], **mp, *next;
- int err, i, count, qidx;
+ int err, qidx;
if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) {
DBG_COUNTER_INC(tx_frees);
@@ -3058,6 +3076,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
return (0);
}
+ MPASS(m->m_nextpkt == NULL);
qidx = 0;
if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
qidx = QIDX(ctx, m);
@@ -3077,6 +3096,7 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
return (ENOBUFS);
}
#endif
+#ifdef notyet
qidx = count = 0;
mp = marr;
next = m;
@@ -3098,22 +3118,21 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
next = next->m_nextpkt;
mp[i]->m_nextpkt = NULL;
}
+#endif
DBG_COUNTER_INC(tx_seen);
- err = ifmp_ring_enqueue(txq->ift_br[0], (void **)mp, count, TX_BATCH_SIZE);
+ err = ifmp_ring_enqueue(txq->ift_br[0], (void **)&m, 1, TX_BATCH_SIZE);
- if (iflib_txq_can_drain(txq->ift_br[0]))
- GROUPTASK_ENQUEUE(&txq->ift_task);
if (err) {
+ GROUPTASK_ENQUEUE(&txq->ift_task);
/* support forthcoming later */
#ifdef DRIVER_BACKPRESSURE
txq->ift_closed = TRUE;
#endif
- for (i = 0; i < count; i++)
- m_freem(mp[i]);
ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE);
+ m_freem(m);
+ } else if (TXQ_AVAIL(txq) < (txq->ift_size >> 1)) {
+ GROUPTASK_ENQUEUE(&txq->ift_task);
}
- if (count > nitems(marr))
- free(mp, M_IFLIB);
return (err);
}
@@ -3138,13 +3157,13 @@ iflib_if_qflush(if_t ifp)
if_qflush(ifp);
}
-#define IFCAP_REINIT (IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_TSO6|IFCAP_VLAN_HWTAGGING|IFCAP_VLAN_MTU | \
- IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
-#define IFCAP_FLAGS (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
+#define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \
IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | \
IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO)
+#define IFCAP_REINIT IFCAP_FLAGS
+
static int
iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
{
@@ -3428,6 +3447,9 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
if_ctx_t ctx;
if_t ifp;
if_softc_ctx_t scctx;
+ int i;
+ uint16_t main_txq;
+ uint16_t main_rxq;
ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO);
@@ -3435,6 +3457,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
if (sc == NULL) {
sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO);
device_set_softc(dev, ctx);
+ ctx->ifc_flags |= IFC_SC_ALLOCATED;
}
ctx->ifc_sctx = sctx;
@@ -3447,28 +3470,112 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
return (err);
}
iflib_add_device_sysctl_pre(ctx);
+
+ scctx = &ctx->ifc_softc_ctx;
+ /*
+ * XXX sanity check that ntxd & nrxd are a power of 2
+ */
+ if (ctx->ifc_sysctl_ntxqs != 0)
+ scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs;
+ if (ctx->ifc_sysctl_nrxqs != 0)
+ scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs;
+
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ if (ctx->ifc_sysctl_ntxds[i] != 0)
+ scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i];
+ else
+ scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i];
+ }
+
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ if (ctx->ifc_sysctl_nrxds[i] != 0)
+ scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i];
+ else
+ scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i];
+ }
+
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) {
+ device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n",
+ i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]);
+ scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i];
+ }
+ if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) {
+ device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n",
+ i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]);
+ scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i];
+ }
+ }
+
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) {
+ device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n",
+ i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]);
+ scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i];
+ }
+ if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) {
+ device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n",
+ i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]);
+ scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
+ }
+ }
+
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
return (err);
}
+ if (scctx->isc_ntxqsets_max)
+ scctx->isc_ntxqsets = min(scctx->isc_ntxqsets, scctx->isc_ntxqsets_max);
+ if (scctx->isc_nrxqsets_max)
+ scctx->isc_nrxqsets = min(scctx->isc_nrxqsets, scctx->isc_nrxqsets_max);
+
#ifdef ACPI_DMAR
if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL)
ctx->ifc_flags |= IFC_DMAR;
#endif
- scctx = &ctx->ifc_softc_ctx;
msix_bar = scctx->isc_msix_bar;
- if (scctx->isc_tx_nsegments > sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION)
- scctx->isc_tx_nsegments = max(1, sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION);
- if (scctx->isc_tx_tso_segments_max > sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION)
- scctx->isc_tx_tso_segments_max = max(1, sctx->isc_ntxd / MAX_SINGLE_PACKET_FRACTION);
-
ifp = ctx->ifc_ifp;
- /*
- * XXX sanity check that ntxd & nrxd are a power of 2
- */
+ if(sctx->isc_flags & IFLIB_HAS_TXCQ)
+ main_txq = 1;
+ else
+ main_txq = 0;
+
+ if(sctx->isc_flags & IFLIB_HAS_RXCQ)
+ main_rxq = 1;
+ else
+ main_rxq = 0;
+
+ /* XXX change for per-queue sizes */
+ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
+ scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]);
+ for (i = 0; i < sctx->isc_nrxqs; i++) {
+ if (!powerof2(scctx->isc_nrxd[i])) {
+ /* round down instead? */
+ device_printf(dev, "# rx descriptors must be a power of 2\n");
+ err = EINVAL;
+ goto fail;
+ }
+ }
+ for (i = 0; i < sctx->isc_ntxqs; i++) {
+ if (!powerof2(scctx->isc_ntxd[i])) {
+ device_printf(dev,
+ "# tx descriptors must be a power of 2");
+ err = EINVAL;
+ goto fail;
+ }
+ }
+
+ if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] /
+ MAX_SINGLE_PACKET_FRACTION)
+ scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] /
+ MAX_SINGLE_PACKET_FRACTION);
+ if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] /
+ MAX_SINGLE_PACKET_FRACTION)
+ scctx->isc_tx_tso_segments_max = max(1,
+ scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION);
/*
* Protect the stack against modern hardware
@@ -3482,7 +3589,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max;
if (scctx->isc_rss_table_size == 0)
scctx->isc_rss_table_size = 64;
- scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;;
+ scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1;
/*
** Now setup MSI or MSI/X, should
** return us the number of supported
@@ -3520,7 +3627,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
MPASS(msix == 1);
rid = 1;
}
- if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx, &rid, "irq0")) != 0) {
+ if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) {
device_printf(dev, "iflib_legacy_setup failed %d\n", err);
goto fail_intr_free;
}
@@ -3536,6 +3643,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
}
*ctxp = ctx;
+ if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter);
iflib_add_device_sysctl_post(ctx);
return (0);
fail_detach:
@@ -3599,7 +3707,7 @@ iflib_device_deregister(if_ctx_t ctx)
led_destroy(ctx->ifc_led_dev);
/* XXX drain any dependent tasks */
tqg = qgroup_if_io_tqg;
- for (txq = ctx->ifc_txqs, i = 0, rxq = ctx->ifc_rxqs; i < NTXQSETS(ctx); i++, txq++) {
+ for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
callout_drain(&txq->ift_timer);
callout_drain(&txq->ift_db_check);
if (txq->ift_task.gt_uniq != NULL)
@@ -3616,6 +3724,7 @@ iflib_device_deregister(if_ctx_t ctx)
taskqgroup_detach(tqg, &ctx->ifc_vflr_task);
IFDI_DETACH(ctx);
+ device_set_softc(ctx->ifc_dev, NULL);
if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) {
pci_release_msi(dev);
}
@@ -3633,6 +3742,9 @@ iflib_device_deregister(if_ctx_t ctx)
iflib_tx_structures_free(ctx);
iflib_rx_structures_free(ctx);
+ if (ctx->ifc_flags & IFC_SC_ALLOCATED)
+ free(ctx->ifc_softc, M_IFLIB);
+ free(ctx, M_IFLIB);
return (0);
}
@@ -3782,7 +3894,13 @@ _iflib_assert(if_shared_ctx_t sctx)
MPASS(sctx->isc_txrx->ift_rxd_pkt_get);
MPASS(sctx->isc_txrx->ift_rxd_refill);
MPASS(sctx->isc_txrx->ift_rxd_flush);
- MPASS(sctx->isc_nrxd);
+
+ MPASS(sctx->isc_nrxd_min[0]);
+ MPASS(sctx->isc_nrxd_max[0]);
+ MPASS(sctx->isc_nrxd_default[0]);
+ MPASS(sctx->isc_ntxd_min[0]);
+ MPASS(sctx->isc_ntxd_max[0]);
+ MPASS(sctx->isc_ntxd_default[0]);
}
static int
@@ -3796,7 +3914,6 @@ iflib_register(if_ctx_t ctx)
_iflib_assert(sctx);
CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev));
- MPASS(ctx->ifc_flags == 0);
ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER);
if (ifp == NULL) {
@@ -3818,7 +3935,6 @@ iflib_register(if_ctx_t ctx)
if_setioctlfn(ifp, iflib_if_ioctl);
if_settransmitfn(ifp, iflib_if_transmit);
if_setqflushfn(ifp, iflib_if_qflush);
- if_setgetcounterfn(ifp, iflib_if_get_counter);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setcapabilities(ifp, 0);
@@ -3842,16 +3958,17 @@ static int
iflib_queues_alloc(if_ctx_t ctx)
{
if_shared_ctx_t sctx = ctx->ifc_sctx;
+ if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
- int nrxqsets = ctx->ifc_softc_ctx.isc_nrxqsets;
- int ntxqsets = ctx->ifc_softc_ctx.isc_ntxqsets;
+ int nrxqsets = scctx->isc_nrxqsets;
+ int ntxqsets = scctx->isc_ntxqsets;
iflib_txq_t txq;
iflib_rxq_t rxq;
iflib_fl_t fl = NULL;
- int i, j, cpu, err, txconf, rxconf, fl_ifdi_offset;
+ int i, j, cpu, err, txconf, rxconf;
iflib_dma_info_t ifdip;
- uint32_t *rxqsizes = sctx->isc_rxqsizes;
- uint32_t *txqsizes = sctx->isc_txqsizes;
+ uint32_t *rxqsizes = scctx->isc_rxqsizes;
+ uint32_t *txqsizes = scctx->isc_txqsizes;
uint8_t nrxqs = sctx->isc_nrxqs;
uint8_t ntxqs = sctx->isc_ntxqs;
int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1;
@@ -3860,10 +3977,11 @@ iflib_queues_alloc(if_ctx_t ctx)
struct ifmp_ring **brscp;
int nbuf_rings = 1; /* XXX determine dynamically */
- KASSERT(ntxqs > 0, ("number of queues must be at least 1"));
- KASSERT(nrxqs > 0, ("number of queues must be at least 1"));
+ KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1"));
+ KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1"));
brscp = NULL;
+ txq = NULL;
rxq = NULL;
/* Allocate the TX ring struct memory */
@@ -3891,8 +4009,6 @@ iflib_queues_alloc(if_ctx_t ctx)
ctx->ifc_txqs = txq;
ctx->ifc_rxqs = rxq;
- txq = NULL;
- rxq = NULL;
/*
* XXX handle allocation failure
@@ -3916,6 +4032,11 @@ iflib_queues_alloc(if_ctx_t ctx)
}
txq->ift_ctx = ctx;
txq->ift_id = i;
+ if (sctx->isc_flags & IFLIB_HAS_TXCQ) {
+ txq->ift_br_offset = 1;
+ } else {
+ txq->ift_br_offset = 0;
+ }
/* XXX fix this */
txq->ift_timer.c_cpu = cpu;
txq->ift_db_check.c_cpu = cpu;
@@ -3970,10 +4091,10 @@ iflib_queues_alloc(if_ctx_t ctx)
}
rxq->ifr_ctx = ctx;
rxq->ifr_id = i;
- if (sctx->isc_flags & IFLIB_HAS_CQ) {
- fl_ifdi_offset = 1;
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
+ rxq->ifr_fl_offset = 1;
} else {
- fl_ifdi_offset = 0;
+ rxq->ifr_fl_offset = 0;
}
rxq->ifr_nfl = nfree_lists;
if (!(fl =
@@ -3986,7 +4107,8 @@ iflib_queues_alloc(if_ctx_t ctx)
for (j = 0; j < nfree_lists; j++) {
rxq->ifr_fl[j].ifl_rxq = rxq;
rxq->ifr_fl[j].ifl_id = j;
- rxq->ifr_fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + fl_ifdi_offset];
+ rxq->ifr_fl[j].ifl_ifdi =
+ &rxq->ifr_ifdi[j + rxq->ifr_fl_offset];
}
/* Allocate receive buffers for the ring*/
if (iflib_rxsd_alloc(rxq)) {
@@ -4106,12 +4228,13 @@ iflib_rx_structures_setup(if_ctx_t ctx)
for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) {
#if defined(INET6) || defined(INET)
tcp_lro_free(&rxq->ifr_lc);
- if ((err = tcp_lro_init(&rxq->ifr_lc)) != 0) {
+ if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp,
+ TCP_LRO_ENTRIES, min(1024,
+ ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) {
device_printf(ctx->ifc_dev, "LRO Initialization failed!\n");
goto fail;
}
rxq->ifr_lro_enabled = TRUE;
- rxq->ifr_lc.ifp = ctx->ifc_ifp;
#endif
IFDI_RXQ_SETUP(ctx, rxq->ifr_id);
}
@@ -4142,7 +4265,7 @@ iflib_rx_structures_free(if_ctx_t ctx)
{
iflib_rxq_t rxq = ctx->ifc_rxqs;
- for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, rxq++) {
+ for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) {
iflib_rx_sds_free(rxq);
}
}
@@ -4193,7 +4316,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
struct taskqgroup *tqg;
iflib_filter_info_t info;
cpuset_t cpus;
- task_fn_t *fn;
+ gtask_fn_t *fn;
int tqrid, err;
void *q;
@@ -4254,7 +4377,7 @@ iflib_softirq_alloc_generic(if_ctx_t ctx, int rid, iflib_intr_type_t type, void
{
struct grouptask *gtask;
struct taskqgroup *tqg;
- task_fn_t *fn;
+ gtask_fn_t *fn;
void *q;
switch (type) {
@@ -4310,7 +4433,7 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
iflib_filter_info_t info;
struct grouptask *gtask;
struct taskqgroup *tqg;
- task_fn_t *fn;
+ gtask_fn_t *fn;
int tqrid;
void *q;
int err;
@@ -4385,7 +4508,7 @@ iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
}
void
-iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, task_fn_t *fn,
+iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn,
char *name)
{
@@ -4394,14 +4517,21 @@ iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, task_fn_t *fn,
}
void
-iflib_link_state_change(if_ctx_t ctx, int link_state)
+iflib_config_gtask_deinit(struct grouptask *gtask)
+{
+
+ taskqgroup_detach(qgroup_if_config_tqg, gtask);
+}
+
+void
+iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate)
{
if_t ifp = ctx->ifc_ifp;
iflib_txq_t txq = ctx->ifc_txqs;
-#if 0
+
if_setbaudrate(ifp, baudrate);
-#endif
+
/* If link down, disable watchdog */
if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) {
for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
@@ -4431,10 +4561,11 @@ iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
}
static int
-iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx)
+iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget)
{
- return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx));
+ return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
+ budget));
}
void
@@ -4468,8 +4599,9 @@ iflib_msix_init(if_ctx_t ctx)
int iflib_num_tx_queues, iflib_num_rx_queues;
int err, admincnt, bar;
- iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs;
- iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs;
+ iflib_num_tx_queues = scctx->isc_ntxqsets;
+ iflib_num_rx_queues = scctx->isc_nrxqsets;
+
bar = ctx->ifc_softc_ctx.isc_msix_bar;
admincnt = sctx->isc_admin_intrcnt;
/* Override by tuneable */
@@ -4549,18 +4681,31 @@ iflib_msix_init(if_ctx_t ctx)
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
- if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queues)
- queues = rx_queues = iflib_num_rx_queues;
+ if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt)
+ rx_queues = iflib_num_rx_queues;
else
rx_queues = queues;
+ /*
+ * We want this to be all logical CPUs by default
+ */
if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues)
tx_queues = iflib_num_tx_queues;
else
- tx_queues = queues;
+ tx_queues = mp_ncpus;
+
+ if (ctx->ifc_sysctl_qs_eq_override == 0) {
+#ifdef INVARIANTS
+ if (tx_queues != rx_queues)
+ device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n",
+ min(rx_queues, tx_queues), min(rx_queues, tx_queues));
+#endif
+ tx_queues = min(rx_queues, tx_queues);
+ rx_queues = min(rx_queues, tx_queues);
+ }
device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues);
- vectors = queues + admincnt;
+ vectors = rx_queues + admincnt;
if ((err = pci_alloc_msix(dev, &vectors)) == 0) {
device_printf(dev,
"Using MSIX interrupts with %d vectors\n", vectors);
@@ -4568,6 +4713,7 @@ iflib_msix_init(if_ctx_t ctx)
scctx->isc_nrxqsets = rx_queues;
scctx->isc_ntxqsets = tx_queues;
scctx->isc_intr = IFLIB_INTR_MSIX;
+
return (vectors);
} else {
device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err);
@@ -4617,7 +4763,58 @@ mp_ring_state_handler(SYSCTL_HANDLER_ARGS)
return(rc);
}
+enum iflib_ndesc_handler {
+ IFLIB_NTXD_HANDLER,
+ IFLIB_NRXD_HANDLER,
+};
+static int
+mp_ndesc_handler(SYSCTL_HANDLER_ARGS)
+{
+ if_ctx_t ctx = (void *)arg1;
+ enum iflib_ndesc_handler type = arg2;
+ char buf[256] = {0};
+ uint16_t *ndesc;
+ char *p, *next;
+ int nqs, rc, i;
+
+ MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER);
+
+ nqs = 8;
+ switch(type) {
+ case IFLIB_NTXD_HANDLER:
+ ndesc = ctx->ifc_sysctl_ntxds;
+ if (ctx->ifc_sctx)
+ nqs = ctx->ifc_sctx->isc_ntxqs;
+ break;
+ case IFLIB_NRXD_HANDLER:
+ ndesc = ctx->ifc_sysctl_nrxds;
+ if (ctx->ifc_sctx)
+ nqs = ctx->ifc_sctx->isc_nrxqs;
+ break;
+ }
+ if (nqs == 0)
+ nqs = 8;
+
+ for (i=0; i<8; i++) {
+ if (i >= nqs)
+ break;
+ if (i)
+ strcat(buf, ",");
+ sprintf(strchr(buf, 0), "%d", ndesc[i]);
+ }
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+
+ for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p;
+ i++, p = strsep(&next, " ,")) {
+ ndesc[i] = strtoul(p, NULL, 10);
+ }
+
+ return(rc);
+}
#define NAME_BUFLEN 32
static void
@@ -4634,19 +4831,29 @@ iflib_add_device_sysctl_pre(if_ctx_t ctx)
CTLFLAG_RD, NULL, "IFLIB fields");
oid_list = SYSCTL_CHILDREN(node);
+ SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
+ CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0,
+ "driver version");
+
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
"# of txqs to use, 0 => use default #");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
- "# of txqs to use, 0 => use default #");
- SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxds",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxds, 0,
- "# of tx descriptors to use, 0 => use default #");
- SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxds",
- CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxds, 0,
- "# of rx descriptors to use, 0 => use default #");
-
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0,
+ "# of rxqs to use, 0 => use default #");
+ SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable",
+ CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0,
+ "permit #txq != #rxq");
+
+ /* XXX change for per-queue sizes */
+ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds",
+ CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER,
+ mp_ndesc_handler, "A",
+ "list of # of tx descriptors to use, 0 = use default #");
+ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds",
+ CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER,
+ mp_ndesc_handler, "A",
+ "list of # of rx descriptors to use, 0 = use default #");
}
static void
@@ -4700,7 +4907,7 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
&txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD,
- &txq->ift_mbuf_defrag_failed, "# of times no descriptors were available");
+ &txq->ift_no_desc_avail, "# of times no descriptors were available");
SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed",
CTLFLAG_RD,
&txq->ift_map_failed, "# of times dma map failed");
@@ -4763,7 +4970,7 @@ iflib_add_device_sysctl_post(if_ctx_t ctx)
queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf,
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
- if (sctx->isc_flags & IFLIB_HAS_CQ) {
+ if (sctx->isc_flags & IFLIB_HAS_RXCQ) {
SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx",
CTLFLAG_RD,
&rxq->ifr_cq_pidx, 1, "Producer Index");
OpenPOWER on IntegriCloud