summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2016-10-31 22:45:11 +0000
committerjhb <jhb@FreeBSD.org>2016-10-31 22:45:11 +0000
commitf918b0afff547f9e3ed4b9998d038bb71e1839bb (patch)
tree13186f31587235fd840fd3a82b87f84058d89075
parent9f53aaf8709508e8f0c33d83b792013332225f14 (diff)
downloadFreeBSD-src-f918b0afff547f9e3ed4b9998d038bb71e1839bb.zip
FreeBSD-src-f918b0afff547f9e3ed4b9998d038bb71e1839bb.tar.gz
MFC 291665,291685,291856,297467,302110,302263: Add support for VIs.
291665: Add support for configuring additional virtual interfaces (VIs) on a port. Each virtual interface has its own MAC address, queues, and statistics. The dedicated netmap interfaces (ncxgbeX / ncxlX) were already implemented as additional VIs on each port. This change allows additional non-netmap interfaces to be configured on each port. Additional virtual interfaces use the naming scheme vcxgbeX or vcxlX. Additional VIs are enabled by setting the hw.cxgbe.num_vis tunable to a value greater than 1 before loading the cxgbe(4) or cxl(4) driver. NB: The first VI on each port is the "main" interface (cxgbeX or cxlX). T4/T5 NICs provide a limited number of MAC addresses for each physical port. As a result, a maximum of six VIs can be configured on each port (including the "main" interface and the netmap interface when netmap is enabled). One user-visible result is that when netmap is enabled, packets received or transmitted via the netmap interface are no longer counted in the stats for the "main" interface, but are not accounted to the netmap interface. The netmap interfaces now also have a new-bus device and export various information sysctl nodes via dev.n(cxgbe|cxl).X. The cxgbetool 'clearstats' command clears the stats for all VIs on the specified port along with the port's stats. There is currently no way to clear the stats of an individual VI. 291685: Fix build for !TCP_OFFLOAD case. 291856: Fix RSS build. 297467: Remove #ifdef's from various structures used in the cxgbe/cxl driver. This provides a constant ABI and layout for these structures (especially struct adapter) avoiding some foot shooting. 302110: cxgbe(4): Merge netmap support from the ncxgbe/ncxl interfaces to the vcxgbe/vcxl interfaces and retire the 'n' interfaces. The main cxgbe/cxl interfaces and tunables related to them are not affected by any of this and will continue to operate as usual. The driver used to create an additional 'n' interface for every cxgbe/cxl interface if "device netmap" was in the kernel. The 'n' interface shared the wire with the main interface but was otherwise autonomous (with its own MAC address, etc.). It did not have normal tx/rx but had a specialized netmap-only data path. r291665 added another set of virtual interfaces (the 'v' interfaces) to the driver. These had normal tx/rx but no netmap support. This revision consolidates the features of both the interfaces into the 'v' interface which now has a normal data path, TOE support, and native netmap support. The 'v' interfaces need to be created explicitly with the hw.cxgbe.num_vis tunable. This means "device netmap" will not result in the automatic creation of any virtual interfaces. The following tunables can be used to override the default number of queues allocated for each 'v' interface. nofld* = 0 will disable TOE on the virtual interface and nnm* = 0 to will disable native netmap support. # number of normal NIC queues hw.cxgbe.ntxq_vi hw.cxgbe.nrxq_vi # number of TOE queues hw.cxgbe.nofldtxq_vi hw.cxgbe.nofldrxq_vi # number of netmap queues hw.cxgbe.nnmtxq_vi hw.cxgbe.nnmrxq_vi hw.cxgbe.nnm{t,r}xq{10,1}g tunables have been removed. --- tl;dr version --- The workflow for netmap on cxgbe starting with FreeBSD 11 is: 1) "device netmap" in the kernel config. 2) "hw.cxgbe.num_vis=2" in loader.conf. num_vis > 2 is ok too, you'll end up with multiple autonomous netmap-capable interfaces for every port. 3) "dmesg | grep vcxl | grep netmap" to verify that the interface has netmap queues. 4) Use any of the 'v' interfaces for netmap. pkt-gen -i vcxl<n>... . One major improvement is that the netmap interface has a normal data path as expected. 5) Just ignore the cxl interfaces if you want to use netmap only. No need to bring them up. The vcxl interfaces are completely independent and everything should just work. --------------------- 302263: cxgbe(4): Do not bring up an interface when IFCAP_TOE is enabled on it. The interface's queues are functional after VI_INIT_DONE (which is short of interface-up) and that's all that's needed for t4_tom to communicate with the chip. Relnotes: yes Sponsored by: Chelsio Communications
-rw-r--r--share/man/man4/cxgbe.412
-rw-r--r--sys/dev/cxgbe/adapter.h184
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c10
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/provider.c9
-rw-r--r--sys/dev/cxgbe/offload.h2
-rw-r--r--sys/dev/cxgbe/t4_main.c1474
-rw-r--r--sys/dev/cxgbe/t4_netmap.c401
-rw-r--r--sys/dev/cxgbe/t4_sge.c545
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c24
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c7
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c91
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c49
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h8
13 files changed, 1543 insertions, 1273 deletions
diff --git a/share/man/man4/cxgbe.4 b/share/man/man4/cxgbe.4
index 148bb1d..247e57a 100644
--- a/share/man/man4/cxgbe.4
+++ b/share/man/man4/cxgbe.4
@@ -31,7 +31,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd March 20, 2014
+.Dd December 2, 2015
.Dt CXGBE 4
.Os
.Sh NAME
@@ -171,6 +171,16 @@ number of CPU cores in the system, whichever is less.
.It Va hw.cxgbe.nofldrxq1g
The number of TOE rx queues to use for a 1Gb port.
The default is 1.
+.It Va hw.cxgbe.num_vis
+The number of virtual interfaces (VIs) created for each port.
+Each virtual interface creates a separate network interface.
+The first virtual interface on each port is required and represents
+the primary network interface on the port.
+Additional virtual interfaces on a port are named vcxgbe (T4) or
+vcxl (T5) and only use a single rx and tx queue.
+Additional virtual interfaces use a single pair of queues
+for rx and tx as well an additional pair of queues for TOE rx and tx.
+The default is 1.
.It Va hw.cxgbe.holdoff_timer_idx_10G
.It Va hw.cxgbe.holdoff_timer_idx_1G
The timer index value to use to delay interrupts.
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index ceba9e0..60c4ddb 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -192,49 +192,46 @@ enum {
CXGBE_BUSY = (1 << 9),
/* port flags */
- DOOMED = (1 << 0),
- PORT_INIT_DONE = (1 << 1),
- PORT_SYSCTL_CTX = (1 << 2),
HAS_TRACEQ = (1 << 3),
+
+ /* VI flags */
+ DOOMED = (1 << 0),
+ VI_INIT_DONE = (1 << 1),
+ VI_SYSCTL_CTX = (1 << 2),
INTR_RXQ = (1 << 4), /* All NIC rxq's take interrupts */
INTR_OFLD_RXQ = (1 << 5), /* All TOE rxq's take interrupts */
- INTR_NM_RXQ = (1 << 6), /* All netmap rxq's take interrupts */
- INTR_ALL = (INTR_RXQ | INTR_OFLD_RXQ | INTR_NM_RXQ),
+ INTR_ALL = (INTR_RXQ | INTR_OFLD_RXQ),
/* adapter debug_flags */
DF_DUMP_MBOX = (1 << 0),
};
-#define IS_DOOMED(pi) ((pi)->flags & DOOMED)
-#define SET_DOOMED(pi) do {(pi)->flags |= DOOMED;} while (0)
+#define IS_DOOMED(vi) ((vi)->flags & DOOMED)
+#define SET_DOOMED(vi) do {(vi)->flags |= DOOMED;} while (0)
#define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY)
#define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0)
#define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
-struct port_info {
+struct vi_info {
device_t dev;
- struct adapter *adapter;
+ struct port_info *pi;
struct ifnet *ifp;
struct ifmedia media;
- struct mtx pi_lock;
- char lockname[16];
unsigned long flags;
int if_flags;
- uint16_t *rss;
+ uint16_t *rss, *nm_rss;
uint16_t viid;
int16_t xact_addr_filt;/* index of exact MAC address filter */
uint16_t rss_size; /* size of VI's RSS table slice */
uint16_t rss_base; /* start of VI's RSS table slice */
- uint8_t lport; /* associated offload logical port */
- int8_t mdio_addr;
- uint8_t port_type;
- uint8_t mod_type;
- uint8_t port_id;
- uint8_t tx_chan;
- uint8_t rx_chan_map; /* rx MPS channel bitmap */
+
+ eventhandler_tag vlan_c;
+
+ int nintr;
+ int first_intr;
/* These need to be int as they are used in sysctl */
int ntxq; /* # of tx queues */
@@ -242,30 +239,49 @@ struct port_info {
int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */
int nrxq; /* # of rx queues */
int first_rxq; /* index of first rx queue */
-#ifdef TCP_OFFLOAD
int nofldtxq; /* # of offload tx queues */
int first_ofld_txq; /* index of first offload tx queue */
int nofldrxq; /* # of offload rx queues */
int first_ofld_rxq; /* index of first offload rx queue */
-#endif
-#ifdef DEV_NETMAP
- int nnmtxq; /* # of netmap tx queues */
- int first_nm_txq; /* index of first netmap tx queue */
- int nnmrxq; /* # of netmap rx queues */
- int first_nm_rxq; /* index of first netmap rx queue */
-
- struct ifnet *nm_ifp;
- struct ifmedia nm_media;
- int nmif_flags;
- uint16_t nm_viid;
- int16_t nm_xact_addr_filt;
- uint16_t nm_rss_size; /* size of netmap VI's RSS table slice */
-#endif
+ int nnmtxq;
+ int first_nm_txq;
+ int nnmrxq;
+ int first_nm_rxq;
int tmr_idx;
int pktc_idx;
int qsize_rxq;
int qsize_txq;
+ struct timeval last_refreshed;
+ struct fw_vi_stats_vf stats;
+
+ struct callout tick;
+ struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
+
+ uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
+};
+
+struct port_info {
+ device_t dev;
+ struct adapter *adapter;
+
+ struct vi_info *vi;
+ int nvi;
+ int up_vis;
+ int uld_vis;
+
+ struct mtx pi_lock;
+ char lockname[16];
+ unsigned long flags;
+
+ uint8_t lport; /* associated offload logical port */
+ int8_t mdio_addr;
+ uint8_t port_type;
+ uint8_t mod_type;
+ uint8_t port_id;
+ uint8_t tx_chan;
+ uint8_t rx_chan_map; /* rx MPS channel bitmap */
+
int linkdnrc;
struct link_config link_cfg;
@@ -273,14 +289,11 @@ struct port_info {
struct port_stats stats;
u_int tx_parse_error;
- eventhandler_tag vlan_c;
-
struct callout tick;
- struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
-
- uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
};
+#define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0]))
+
/* Where the cluster came from, how it has been carved up. */
struct cluster_layout {
int8_t zidx;
@@ -292,9 +305,7 @@ struct cluster_layout {
struct cluster_metadata {
u_int refcount;
-#ifdef INVARIANTS
struct fl_sdesc *sd; /* For debug only. Could easily be stale */
-#endif
};
struct fl_sdesc {
@@ -333,6 +344,11 @@ enum {
IQS_DISABLED = 0,
IQS_BUSY = 1,
IQS_IDLE = 2,
+
+ /* netmap related flags */
+ NM_OFF = 0,
+ NM_ON = 1,
+ NM_BUSY = 2,
};
/*
@@ -529,7 +545,6 @@ iq_to_rxq(struct sge_iq *iq)
}
-#ifdef TCP_OFFLOAD
/* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
struct sge_ofld_rxq {
struct sge_iq iq; /* MUST be first */
@@ -542,7 +557,6 @@ iq_to_ofld_rxq(struct sge_iq *iq)
return (__containerof(iq, struct sge_ofld_rxq, iq));
}
-#endif
struct wrqe {
STAILQ_ENTRY(wrqe) link;
@@ -594,9 +608,8 @@ struct sge_wrq {
} __aligned(CACHE_LINE_SIZE);
-#ifdef DEV_NETMAP
struct sge_nm_rxq {
- struct port_info *pi;
+ struct vi_info *vi;
struct iq_desc *iq_desc;
uint16_t iq_abs_id;
@@ -649,7 +662,6 @@ struct sge_nm_txq {
bus_addr_t ba;
int iqidx;
} __aligned(CACHE_LINE_SIZE);
-#endif
struct sge {
int timer_val[SGE_NTIMERS];
@@ -661,14 +673,10 @@ struct sge {
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx tx queues */
-#ifdef TCP_OFFLOAD
int nofldrxq; /* total # of TOE rx queues */
int nofldtxq; /* total # of TOE tx queues */
-#endif
-#ifdef DEV_NETMAP
int nnmrxq; /* total # of netmap rx queues */
int nnmtxq; /* total # of netmap tx queues */
-#endif
int niq; /* total # of ingress queues */
int neq; /* total # of egress queues */
@@ -677,14 +685,10 @@ struct sge {
struct sge_wrq *ctrlq; /* Control queues */
struct sge_txq *txq; /* NIC tx queues */
struct sge_rxq *rxq; /* NIC rx queues */
-#ifdef TCP_OFFLOAD
struct sge_wrq *ofld_txq; /* TOE tx queues */
struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */
-#endif
-#ifdef DEV_NETMAP
struct sge_nm_txq *nm_txq; /* netmap tx queues */
struct sge_nm_rxq *nm_rxq; /* netmap rx queues */
-#endif
uint16_t iq_start;
int eq_start;
@@ -731,8 +735,11 @@ struct adapter {
struct irq {
struct resource *res;
int rid;
+ volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */
void *tag;
- } *irq;
+ struct sge_rxq *rxq;
+ struct sge_nm_rxq *nm_rxq;
+ } __aligned(CACHE_LINE_SIZE) *irq;
bus_dma_tag_t dmat; /* Parent DMA tag */
@@ -743,21 +750,16 @@ struct adapter {
struct port_info *port[MAX_NPORTS];
uint8_t chan_map[NCHAN];
-#ifdef TCP_OFFLOAD
void *tom_softc; /* (struct tom_data *) */
struct tom_tunables tt;
void *iwarp_softc; /* (struct c4iw_dev *) */
void *iscsi_softc;
-#endif
struct l2t_data *l2t; /* L2 table */
struct tid_info tids;
uint16_t doorbells;
- int open_device_map;
-#ifdef TCP_OFFLOAD
int offload_map; /* ports with IFCAP_TOE enabled */
int active_ulds; /* ULDs activated on this adapter */
-#endif
int flags;
int debug_flags;
@@ -798,11 +800,9 @@ struct adapter {
fw_msg_handler_t fw_msg_handler[5]; /* NUM_FW6_TYPES */
cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */
-#ifdef INVARIANTS
const char *last_op;
const void *last_op_thr;
int last_op_flags;
-#endif
int sc_do_rxcopy;
};
@@ -863,24 +863,27 @@ struct adapter {
} \
} while (0)
-#define for_each_txq(pi, iter, q) \
- for (q = &pi->adapter->sge.txq[pi->first_txq], iter = 0; \
- iter < pi->ntxq; ++iter, ++q)
-#define for_each_rxq(pi, iter, q) \
- for (q = &pi->adapter->sge.rxq[pi->first_rxq], iter = 0; \
- iter < pi->nrxq; ++iter, ++q)
-#define for_each_ofld_txq(pi, iter, q) \
- for (q = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq], iter = 0; \
- iter < pi->nofldtxq; ++iter, ++q)
-#define for_each_ofld_rxq(pi, iter, q) \
- for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
- iter < pi->nofldrxq; ++iter, ++q)
-#define for_each_nm_txq(pi, iter, q) \
- for (q = &pi->adapter->sge.nm_txq[pi->first_nm_txq], iter = 0; \
- iter < pi->nnmtxq; ++iter, ++q)
-#define for_each_nm_rxq(pi, iter, q) \
- for (q = &pi->adapter->sge.nm_rxq[pi->first_nm_rxq], iter = 0; \
- iter < pi->nnmrxq; ++iter, ++q)
+#define for_each_txq(vi, iter, q) \
+ for (q = &vi->pi->adapter->sge.txq[vi->first_txq], iter = 0; \
+ iter < vi->ntxq; ++iter, ++q)
+#define for_each_rxq(vi, iter, q) \
+ for (q = &vi->pi->adapter->sge.rxq[vi->first_rxq], iter = 0; \
+ iter < vi->nrxq; ++iter, ++q)
+#define for_each_ofld_txq(vi, iter, q) \
+ for (q = &vi->pi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \
+ iter < vi->nofldtxq; ++iter, ++q)
+#define for_each_ofld_rxq(vi, iter, q) \
+ for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \
+ iter < vi->nofldrxq; ++iter, ++q)
+#define for_each_nm_txq(vi, iter, q) \
+ for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \
+ iter < vi->nnmtxq; ++iter, ++q)
+#define for_each_nm_rxq(vi, iter, q) \
+ for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \
+ iter < vi->nnmrxq; ++iter, ++q)
+#define for_each_vi(_pi, _iter, _vi) \
+ for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \
+ ++(_iter), ++(_vi))
#define IDXINCR(idx, incr, wrap) do { \
idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \
@@ -972,7 +975,7 @@ static inline void
t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
{
- bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
+ bcopy(hw_addr, sc->port[idx]->vi[0].hw_addr, ETHER_ADDR_LEN);
}
static inline bool
@@ -1008,18 +1011,22 @@ int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
int t4_register_an_handler(struct adapter *, an_handler_t);
int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
-int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
+int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *);
+void doom_vi(struct adapter *, struct vi_info *);
void end_synchronized_op(struct adapter *, int);
int update_mac_settings(struct ifnet *, int);
int adapter_full_init(struct adapter *);
int adapter_full_uninit(struct adapter *);
-int port_full_init(struct port_info *);
-int port_full_uninit(struct port_info *);
+uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
+int vi_full_init(struct vi_info *);
+int vi_full_uninit(struct vi_info *);
+void vi_sysctls(struct vi_info *);
+void vi_tick(void *);
#ifdef DEV_NETMAP
/* t4_netmap.c */
-int create_netmap_ifnet(struct port_info *);
-int destroy_netmap_ifnet(struct port_info *);
+void cxgbe_nm_attach(struct vi_info *);
+void cxgbe_nm_detach(struct vi_info *);
void t4_nm_intr(void *);
#endif
@@ -1036,10 +1043,11 @@ void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
int t4_destroy_dma_tag(struct adapter *);
int t4_setup_adapter_queues(struct adapter *);
int t4_teardown_adapter_queues(struct adapter *);
-int t4_setup_port_queues(struct port_info *);
-int t4_teardown_port_queues(struct port_info *);
+int t4_setup_vi_queues(struct vi_info *);
+int t4_teardown_vi_queues(struct vi_info *);
void t4_intr_all(void *);
void t4_intr(void *);
+void t4_vi_intr(void *);
void t4_intr_err(void *);
void t4_intr_evt(void *);
void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index cac1c9c..31e8668 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -5720,11 +5720,11 @@ int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
if (ret < 0)
return ret;
- p->viid = ret;
+ p->vi[0].viid = ret;
p->tx_chan = j;
p->rx_chan_map = get_mps_bg_map(adap, j);
p->lport = j;
- p->rss_size = rss_size;
+ p->vi[0].rss_size = rss_size;
t4_os_set_hw_addr(adap, p->port_id, addr);
ret = ntohl(c.u.info.lstatus_to_modtype);
@@ -5737,13 +5737,13 @@ int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
- V_FW_PARAMS_PARAM_YZ(p->viid);
+ V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
if (ret)
- p->rss_base = 0xffff;
+ p->vi[0].rss_base = 0xffff;
else {
/* MPASS((val >> 16) == rss_size); */
- p->rss_base = val & 0xffff;
+ p->vi[0].rss_base = val & 0xffff;
}
return 0;
diff --git a/sys/dev/cxgbe/iw_cxgbe/provider.c b/sys/dev/cxgbe/iw_cxgbe/provider.c
index 6b1dfa1..d7ce079 100644
--- a/sys/dev/cxgbe/iw_cxgbe/provider.c
+++ b/sys/dev/cxgbe/iw_cxgbe/provider.c
@@ -296,7 +296,7 @@ c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
if (port == 0 || port > sc->params.nports)
return (-EINVAL);
pi = sc->port[port - 1];
- memcpy(&gid->raw[0], pi->hw_addr, sizeof(pi->hw_addr));
+ memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN);
return (0);
}
@@ -309,7 +309,8 @@ c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
memset(props, 0, sizeof *props);
- memcpy(&props->sys_image_guid, sc->port[0]->hw_addr, 6);
+ memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr,
+ ETHER_ADDR_LEN);
props->hw_ver = sc->params.chipid;
props->fw_ver = sc->params.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
@@ -352,7 +353,7 @@ c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
if (port > sc->params.nports)
return (-EINVAL);
pi = sc->port[port - 1];
- ifp = pi->ifp;
+ ifp = pi->vi[0].ifp;
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
@@ -397,7 +398,7 @@ c4iw_register_device(struct c4iw_dev *dev)
BUG_ON(!sc->port[0]);
strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
- memcpy(&ibdev->node_guid, sc->port[0]->hw_addr, 6);
+ memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN);
ibdev->owner = THIS_MODULE;
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index 2a12283..bebaad9 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -125,7 +125,6 @@ struct t4_virt_res { /* virtualized HW resources */
struct t4_range l2t;
};
-#ifdef TCP_OFFLOAD
enum {
ULD_TOM = 0,
ULD_IWARP,
@@ -152,6 +151,7 @@ struct tom_tunables {
int tx_align;
};
+#ifdef TCP_OFFLOAD
int t4_register_uld(struct uld_info *);
int t4_unregister_uld(struct uld_info *);
int t4_activate_uld(struct adapter *, int);
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index d3099a6..392cd0c 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -107,6 +107,22 @@ static driver_t cxgbe_driver = {
sizeof(struct port_info)
};
+/* T4 VI (vcxgbe) interface */
+static int vcxgbe_probe(device_t);
+static int vcxgbe_attach(device_t);
+static int vcxgbe_detach(device_t);
+static device_method_t vcxgbe_methods[] = {
+ DEVMETHOD(device_probe, vcxgbe_probe),
+ DEVMETHOD(device_attach, vcxgbe_attach),
+ DEVMETHOD(device_detach, vcxgbe_detach),
+ { 0, 0 }
+};
+static driver_t vcxgbe_driver = {
+ "vcxgbe",
+ vcxgbe_methods,
+ sizeof(struct vi_info)
+};
+
static d_ioctl_t t4_ioctl;
static d_open_t t4_open;
static d_close_t t4_close;
@@ -143,6 +159,13 @@ static driver_t cxl_driver = {
sizeof(struct port_info)
};
+/* T5 VI (vcxl) interface */
+static driver_t vcxl_driver = {
+ "vcxl",
+ vcxgbe_methods,
+ sizeof(struct vi_info)
+};
+
static struct cdevsw t5_cdevsw = {
.d_version = D_VERSION,
.d_flags = 0,
@@ -203,6 +226,14 @@ TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
static int t4_nrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
+#define NTXQ_VI 1
+static int t4_ntxq_vi = -1;
+TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
+
+#define NRXQ_VI 1
+static int t4_nrxq_vi = -1;
+TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
+
static int t4_rsrv_noflowq = 0;
TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
@@ -222,24 +253,24 @@ TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
#define NOFLDRXQ_1G 1
static int t4_nofldrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
-#endif
-#ifdef DEV_NETMAP
-#define NNMTXQ_10G 2
-static int t4_nnmtxq10g = -1;
-TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
+#define NOFLDTXQ_VI 1
+static int t4_nofldtxq_vi = -1;
+TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
-#define NNMRXQ_10G 2
-static int t4_nnmrxq10g = -1;
-TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
+#define NOFLDRXQ_VI 1
+static int t4_nofldrxq_vi = -1;
+TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
+#endif
-#define NNMTXQ_1G 1
-static int t4_nnmtxq1g = -1;
-TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
+#ifdef DEV_NETMAP
+#define NNMTXQ_VI 2
+static int t4_nnmtxq_vi = -1;
+TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
-#define NNMRXQ_1G 1
-static int t4_nnmrxq1g = -1;
-TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
+#define NNMRXQ_VI 2
+static int t4_nnmrxq_vi = -1;
+TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
#endif
/*
@@ -327,6 +358,19 @@ TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
static int t5_write_combine = 0;
TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
+static int t4_num_vis = 1;
+TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
+
+/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
+static int vi_mac_funcs[] = {
+ FW_VI_FUNC_OFLD,
+ FW_VI_FUNC_IWARP,
+ FW_VI_FUNC_OPENISCSI,
+ FW_VI_FUNC_OPENFCOE,
+ FW_VI_FUNC_FOISCSI,
+ FW_VI_FUNC_FOFCOE,
+};
+
struct intrs_and_queues {
uint16_t intr_type; /* INTx, MSI, or MSI-X */
uint16_t nirq; /* Total # of vectors */
@@ -337,18 +381,18 @@ struct intrs_and_queues {
uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
-#ifdef TCP_OFFLOAD
uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
-#endif
-#ifdef DEV_NETMAP
- uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */
- uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */
- uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */
- uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */
-#endif
+
+ /* The vcxgbe/vcxl interfaces use these and not the ones above. */
+ uint16_t ntxq_vi; /* # of NIC txq's */
+ uint16_t nrxq_vi; /* # of NIC rxq's */
+ uint16_t nofldtxq_vi; /* # of TOE txq's */
+ uint16_t nofldrxq_vi; /* # of TOE rxq's */
+ uint16_t nnmtxq_vi; /* # of netmap txq's */
+ uint16_t nnmrxq_vi; /* # of netmap rxq's */
};
struct filter_entry {
@@ -370,7 +414,7 @@ static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
uint32_t *);
static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
static uint32_t position_memwin(struct adapter *, int, uint32_t);
-static int cfg_itype_and_nqueues(struct adapter *, int, int,
+static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
struct intrs_and_queues *);
static int prep_firmware(struct adapter *);
static int partition_resources(struct adapter *, const struct firmware *,
@@ -380,8 +424,8 @@ static int get_params__post_init(struct adapter *);
static int set_params__post_init(struct adapter *);
static void t4_set_desc(struct adapter *);
static void build_medialist(struct port_info *, struct ifmedia *);
-static int cxgbe_init_synchronized(struct port_info *);
-static int cxgbe_uninit_synchronized(struct port_info *);
+static int cxgbe_init_synchronized(struct vi_info *);
+static int cxgbe_uninit_synchronized(struct vi_info *);
static int setup_intr_handlers(struct adapter *);
static void quiesce_txq(struct adapter *, struct sge_txq *);
static void quiesce_wrq(struct adapter *, struct sge_wrq *);
@@ -393,6 +437,7 @@ static int t4_free_irq(struct adapter *, struct irq *);
static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
unsigned int);
static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
+static void vi_refresh_stats(struct adapter *, struct vi_info *);
static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
static void cxgbe_tick(void *);
static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
@@ -400,8 +445,8 @@ static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
struct mbuf *);
static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
static int fw_msg_not_handled(struct adapter *, const __be64 *);
-static int t4_sysctls(struct adapter *);
-static int cxgbe_sysctls(struct port_info *);
+static void t4_sysctls(struct adapter *);
+static void cxgbe_sysctls(struct port_info *);
static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
@@ -459,7 +504,7 @@ static int read_i2c(struct adapter *, struct t4_i2c_data *);
static int set_sched_class(struct adapter *, struct t4_sched_params *);
static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
#ifdef TCP_OFFLOAD
-static int toe_capability(struct port_info *, int);
+static int toe_capability(struct vi_info *, int);
#endif
static int mod_event(module_t, int, void *);
@@ -604,7 +649,7 @@ static int
t4_attach(device_t dev)
{
struct adapter *sc;
- int rc = 0, i, n10g, n1g, rqidx, tqidx;
+ int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
struct intrs_and_queues iaq;
struct sge *s;
#ifdef TCP_OFFLOAD
@@ -613,6 +658,7 @@ t4_attach(device_t dev)
#ifdef DEV_NETMAP
int nm_rqidx, nm_tqidx;
#endif
+ int num_vis;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -646,7 +692,7 @@ t4_attach(device_t dev)
mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
TAILQ_INIT(&sc->sfl);
- callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
+ callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
@@ -731,6 +777,24 @@ t4_attach(device_t dev)
goto done; /* error message displayed already */
/*
+ * Number of VIs to create per-port. The first VI is the "main" regular
+ * VI for the port. The rest are additional virtual interfaces on the
+ * same physical port. Note that the main VI does not have native
+ * netmap support but the extra VIs do.
+ *
+ * Limit the number of VIs per port to the number of available
+ * MAC addresses per port.
+ */
+ if (t4_num_vis >= 1)
+ num_vis = t4_num_vis;
+ else
+ num_vis = 1;
+ if (num_vis > nitems(vi_mac_funcs)) {
+ num_vis = nitems(vi_mac_funcs);
+ device_printf(dev, "Number of VIs limited to %d\n", num_vis);
+ }
+
+ /*
* First pass over all the ports - allocate VIs and initialize some
* basic parameters like mac address, port type, etc. We also figure
* out whether a port is 10G or 1G and use that information when
@@ -746,12 +810,22 @@ t4_attach(device_t dev)
/* These must be set before t4_port_init */
pi->adapter = sc;
pi->port_id = i;
+ /*
+ * XXX: vi[0] is special so we can't delay this allocation until
+ * pi->nvi's final value is known.
+ */
+ pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
+ M_ZERO | M_WAITOK);
- /* Allocate the vi and initialize parameters like mac addr */
+ /*
+ * Allocate the "main" VI and initialize parameters
+ * like mac addr.
+ */
rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
if (rc != 0) {
device_printf(dev, "unable to initialize port %d: %d\n",
i, rc);
+ free(pi->vi, M_CXGBE);
free(pi, M_CXGBE);
sc->port[i] = NULL;
goto done;
@@ -765,6 +839,7 @@ t4_attach(device_t dev)
rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
if (rc != 0) {
device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
+ free(pi->vi, M_CXGBE);
free(pi, M_CXGBE);
sc->port[i] = NULL;
goto done;
@@ -777,20 +852,12 @@ t4_attach(device_t dev)
if (is_10G_port(pi) || is_40G_port(pi)) {
n10g++;
- pi->tmr_idx = t4_tmr_idx_10g;
- pi->pktc_idx = t4_pktc_idx_10g;
} else {
n1g++;
- pi->tmr_idx = t4_tmr_idx_1g;
- pi->pktc_idx = t4_pktc_idx_1g;
}
- pi->xact_addr_filt = -1;
pi->linkdnrc = -1;
- pi->qsize_rxq = t4_qsize_rxq;
- pi->qsize_txq = t4_qsize_txq;
-
pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
if (pi->dev == NULL) {
device_printf(dev,
@@ -798,15 +865,18 @@ t4_attach(device_t dev)
rc = ENXIO;
goto done;
}
+ pi->vi[0].dev = pi->dev;
device_set_softc(pi->dev, pi);
}
/*
* Interrupt type, # of interrupts, # of rx/tx queues, etc.
*/
- rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
+ rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
if (rc != 0)
goto done; /* error message displayed already */
+ if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
+ num_vis = 1;
sc->intr_type = iaq.intr_type;
sc->intr_count = iaq.nirq;
@@ -814,6 +884,10 @@ t4_attach(device_t dev)
s = &sc->sge;
s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
+ if (num_vis > 1) {
+ s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
+ s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
+ }
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
@@ -821,6 +895,12 @@ t4_attach(device_t dev)
if (is_offload(sc)) {
s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
+ if (num_vis > 1) {
+ s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
+ iaq.nofldrxq_vi;
+ s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
+ iaq.nofldtxq_vi;
+ }
s->neq += s->nofldtxq + s->nofldrxq;
s->niq += s->nofldrxq;
@@ -831,8 +911,10 @@ t4_attach(device_t dev)
}
#endif
#ifdef DEV_NETMAP
- s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
- s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
+ if (num_vis > 1) {
+ s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
+ s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
+ }
s->neq += s->nnmtxq + s->nnmrxq;
s->niq += s->nnmrxq;
@@ -871,57 +953,70 @@ t4_attach(device_t dev)
#endif
for_each_port(sc, i) {
struct port_info *pi = sc->port[i];
+ struct vi_info *vi;
if (pi == NULL)
continue;
- pi->first_rxq = rqidx;
- pi->first_txq = tqidx;
- if (is_10G_port(pi) || is_40G_port(pi)) {
- pi->flags |= iaq.intr_flags_10g;
- pi->nrxq = iaq.nrxq10g;
- pi->ntxq = iaq.ntxq10g;
- } else {
- pi->flags |= iaq.intr_flags_1g;
- pi->nrxq = iaq.nrxq1g;
- pi->ntxq = iaq.ntxq1g;
- }
+ pi->nvi = num_vis;
+ for_each_vi(pi, j, vi) {
+ vi->pi = pi;
+ vi->qsize_rxq = t4_qsize_rxq;
+ vi->qsize_txq = t4_qsize_txq;
- if (pi->ntxq > 1)
- pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
- else
- pi->rsrv_noflowq = 0;
+ vi->first_rxq = rqidx;
+ vi->first_txq = tqidx;
+ if (is_10G_port(pi) || is_40G_port(pi)) {
+ vi->tmr_idx = t4_tmr_idx_10g;
+ vi->pktc_idx = t4_pktc_idx_10g;
+ vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
+ vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
+ vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
+ } else {
+ vi->tmr_idx = t4_tmr_idx_1g;
+ vi->pktc_idx = t4_pktc_idx_1g;
+ vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
+ vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
+ vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
+ }
+ rqidx += vi->nrxq;
+ tqidx += vi->ntxq;
+
+ if (j == 0 && vi->ntxq > 1)
+ vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
+ else
+ vi->rsrv_noflowq = 0;
- rqidx += pi->nrxq;
- tqidx += pi->ntxq;
#ifdef TCP_OFFLOAD
- if (is_offload(sc)) {
- pi->first_ofld_rxq = ofld_rqidx;
- pi->first_ofld_txq = ofld_tqidx;
+ vi->first_ofld_rxq = ofld_rqidx;
+ vi->first_ofld_txq = ofld_tqidx;
if (is_10G_port(pi) || is_40G_port(pi)) {
- pi->nofldrxq = iaq.nofldrxq10g;
- pi->nofldtxq = iaq.nofldtxq10g;
+ vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
+ vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
+ iaq.nofldrxq_vi;
+ vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
+ iaq.nofldtxq_vi;
} else {
- pi->nofldrxq = iaq.nofldrxq1g;
- pi->nofldtxq = iaq.nofldtxq1g;
+ vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
+ vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
+ iaq.nofldrxq_vi;
+ vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
+ iaq.nofldtxq_vi;
}
- ofld_rqidx += pi->nofldrxq;
- ofld_tqidx += pi->nofldtxq;
- }
+ ofld_rqidx += vi->nofldrxq;
+ ofld_tqidx += vi->nofldtxq;
#endif
#ifdef DEV_NETMAP
- pi->first_nm_rxq = nm_rqidx;
- pi->first_nm_txq = nm_tqidx;
- if (is_10G_port(pi) || is_40G_port(pi)) {
- pi->nnmrxq = iaq.nnmrxq10g;
- pi->nnmtxq = iaq.nnmtxq10g;
- } else {
- pi->nnmrxq = iaq.nnmrxq1g;
- pi->nnmtxq = iaq.nnmtxq1g;
- }
- nm_rqidx += pi->nnmrxq;
- nm_tqidx += pi->nnmtxq;
+ if (j > 0) {
+ vi->first_nm_rxq = nm_rqidx;
+ vi->first_nm_txq = nm_tqidx;
+ vi->nnmrxq = iaq.nnmrxq_vi;
+ vi->nnmtxq = iaq.nnmtxq_vi;
+ nm_rqidx += vi->nnmrxq;
+ nm_tqidx += vi->nnmtxq;
+ }
#endif
+ }
}
rc = setup_intr_handlers(sc);
@@ -996,11 +1091,12 @@ t4_detach(device_t dev)
for (i = 0; i < MAX_NPORTS; i++) {
pi = sc->port[i];
if (pi) {
- t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid);
+ t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
if (pi->dev)
device_delete_child(dev, pi->dev);
mtx_destroy(&pi->pi_lock);
+ free(pi->vi, M_CXGBE);
free(pi, M_CXGBE);
}
}
@@ -1052,6 +1148,7 @@ t4_detach(device_t dev)
mtx_destroy(&sc->sc_lock);
}
+ callout_drain(&sc->sfl_callout);
if (mtx_initialized(&sc->tids.ftid_lock))
mtx_destroy(&sc->tids.ftid_lock);
if (mtx_initialized(&sc->sfl_lock))
@@ -1084,12 +1181,13 @@ cxgbe_probe(device_t dev)
#define T4_CAP_ENABLE (T4_CAP)
static int
-cxgbe_attach(device_t dev)
+cxgbe_vi_attach(device_t dev, struct vi_info *vi)
{
- struct port_info *pi = device_get_softc(dev);
struct ifnet *ifp;
- char *s;
- int n, o;
+ struct sbuf *sb;
+
+ vi->xact_addr_filt = -1;
+ callout_init(&vi->tick, 1);
/* Allocate an ifnet and set it up */
ifp = if_alloc(IFT_ETHER);
@@ -1097,10 +1195,8 @@ cxgbe_attach(device_t dev)
device_printf(dev, "Cannot allocate ifnet\n");
return (ENOMEM);
}
- pi->ifp = ifp;
- ifp->if_softc = pi;
-
- callout_init(&pi->tick, CALLOUT_MPSAFE);
+ vi->ifp = ifp;
+ ifp->if_softc = vi;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1112,7 +1208,7 @@ cxgbe_attach(device_t dev)
ifp->if_capabilities = T4_CAP;
#ifdef TCP_OFFLOAD
- if (is_offload(pi->adapter))
+ if (vi->nofldrxq != 0)
ifp->if_capabilities |= IFCAP_TOE;
#endif
ifp->if_capenable = T4_CAP_ENABLE;
@@ -1123,99 +1219,120 @@ cxgbe_attach(device_t dev)
ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
ifp->if_hw_tsomaxsegsize = 65536;
- /* Initialize ifmedia for this port */
- ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
+ /* Initialize ifmedia for this VI */
+ ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
- build_medialist(pi, &pi->media);
+ build_medialist(vi->pi, &vi->media);
- pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
+ vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
EVENTHANDLER_PRI_ANY);
- ether_ifattach(ifp, pi->hw_addr);
-
- n = 128;
- s = malloc(n, M_CXGBE, M_WAITOK);
- o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq);
- MPASS(n > o);
+ ether_ifattach(ifp, vi->hw_addr);
+#ifdef DEV_NETMAP
+ if (vi->nnmrxq != 0)
+ cxgbe_nm_attach(vi);
+#endif
+ sb = sbuf_new_auto();
+ sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
#ifdef TCP_OFFLOAD
- if (is_offload(pi->adapter)) {
- o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)",
- pi->nofldtxq, pi->nofldrxq);
- MPASS(n > o);
- }
+ if (ifp->if_capabilities & IFCAP_TOE)
+ sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
+ vi->nofldtxq, vi->nofldrxq);
#endif
#ifdef DEV_NETMAP
- o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq,
- pi->nnmrxq);
- MPASS(n > o);
+ if (ifp->if_capabilities & IFCAP_NETMAP)
+ sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
+ vi->nnmtxq, vi->nnmrxq);
#endif
- device_printf(dev, "%s\n", s);
- free(s, M_CXGBE);
+ sbuf_finish(sb);
+ device_printf(dev, "%s\n", sbuf_data(sb));
+ sbuf_delete(sb);
+
+ vi_sysctls(vi);
+
+ return (0);
+}
+
+static int
+cxgbe_attach(device_t dev)
+{
+ struct port_info *pi = device_get_softc(dev);
+ struct vi_info *vi;
+ int i, rc;
+
+ callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
+
+ rc = cxgbe_vi_attach(dev, &pi->vi[0]);
+ if (rc)
+ return (rc);
+
+ for_each_vi(pi, i, vi) {
+ if (i == 0)
+ continue;
+ vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
+ "vcxgbe" : "vcxl", -1);
+ if (vi->dev == NULL) {
+ device_printf(dev, "failed to add VI %d\n", i);
+ continue;
+ }
+ device_set_softc(vi->dev, vi);
+ }
-#ifdef DEV_NETMAP
- /* nm_media handled here to keep implementation private to this file */
- ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change,
- cxgbe_media_status);
- build_medialist(pi, &pi->nm_media);
- create_netmap_ifnet(pi); /* logs errors it something fails */
-#endif
cxgbe_sysctls(pi);
+ bus_generic_attach(dev);
+
return (0);
}
+static void
+cxgbe_vi_detach(struct vi_info *vi)
+{
+ struct ifnet *ifp = vi->ifp;
+
+ ether_ifdetach(ifp);
+
+ if (vi->vlan_c)
+ EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
+
+ /* Let detach proceed even if these fail. */
+#ifdef DEV_NETMAP
+ if (ifp->if_capabilities & IFCAP_NETMAP)
+ cxgbe_nm_detach(vi);
+#endif
+ cxgbe_uninit_synchronized(vi);
+ callout_drain(&vi->tick);
+ vi_full_uninit(vi);
+
+ ifmedia_removeall(&vi->media);
+ if_free(vi->ifp);
+ vi->ifp = NULL;
+}
+
static int
cxgbe_detach(device_t dev)
{
struct port_info *pi = device_get_softc(dev);
struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->ifp;
+ int rc;
- /* Tell if_ioctl and if_init that the port is going away */
- ADAPTER_LOCK(sc);
- SET_DOOMED(pi);
- wakeup(&sc->flags);
- while (IS_BUSY(sc))
- mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
- SET_BUSY(sc);
-#ifdef INVARIANTS
- sc->last_op = "t4detach";
- sc->last_op_thr = curthread;
- sc->last_op_flags = 0;
-#endif
- ADAPTER_UNLOCK(sc);
+ /* Detach the extra VIs first. */
+ rc = bus_generic_detach(dev);
+ if (rc)
+ return (rc);
+ device_delete_children(dev);
+
+ doom_vi(sc, &pi->vi[0]);
if (pi->flags & HAS_TRACEQ) {
sc->traceq = -1; /* cloner should not create ifnet */
t4_tracer_port_detach(sc);
}
- if (pi->vlan_c)
- EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
-
- PORT_LOCK(pi);
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
- callout_stop(&pi->tick);
- PORT_UNLOCK(pi);
+ cxgbe_vi_detach(&pi->vi[0]);
callout_drain(&pi->tick);
- /* Let detach proceed even if these fail. */
- cxgbe_uninit_synchronized(pi);
- port_full_uninit(pi);
-
- ifmedia_removeall(&pi->media);
- ether_ifdetach(pi->ifp);
- if_free(pi->ifp);
-
-#ifdef DEV_NETMAP
- /* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */
- destroy_netmap_ifnet(pi);
-#endif
-
- ADAPTER_LOCK(sc);
- CLR_BUSY(sc);
- wakeup(&sc->flags);
- ADAPTER_UNLOCK(sc);
+ end_synchronized_op(sc, 0);
return (0);
}
@@ -1223,12 +1340,12 @@ cxgbe_detach(device_t dev)
static void
cxgbe_init(void *arg)
{
- struct port_info *pi = arg;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = arg;
+ struct adapter *sc = vi->pi->adapter;
- if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
+ if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
return;
- cxgbe_init_synchronized(pi);
+ cxgbe_init_synchronized(vi);
end_synchronized_op(sc, 0);
}
@@ -1236,8 +1353,8 @@ static int
cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
{
int rc = 0, mtu, flags, can_sleep;
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = ifp->if_softc;
+ struct adapter *sc = vi->pi->adapter;
struct ifreq *ifr = (struct ifreq *)data;
uint32_t mask;
@@ -1247,11 +1364,11 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
return (EINVAL);
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
+ rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
if (rc)
return (rc);
ifp->if_mtu = mtu;
- if (pi->flags & PORT_INIT_DONE) {
+ if (vi->flags & VI_INIT_DONE) {
t4_update_fl_bufsize(ifp);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MTU);
@@ -1262,14 +1379,14 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
case SIOCSIFFLAGS:
can_sleep = 0;
redo_sifflags:
- rc = begin_synchronized_op(sc, pi,
+ rc = begin_synchronized_op(sc, vi,
can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
if (rc)
return (rc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- flags = pi->if_flags;
+ flags = vi->if_flags;
if ((ifp->if_flags ^ flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
if (can_sleep == 1) {
@@ -1286,23 +1403,23 @@ redo_sifflags:
can_sleep = 1;
goto redo_sifflags;
}
- rc = cxgbe_init_synchronized(pi);
+ rc = cxgbe_init_synchronized(vi);
}
- pi->if_flags = ifp->if_flags;
+ vi->if_flags = ifp->if_flags;
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (can_sleep == 0) {
end_synchronized_op(sc, LOCK_HELD);
can_sleep = 1;
goto redo_sifflags;
}
- rc = cxgbe_uninit_synchronized(pi);
+ rc = cxgbe_uninit_synchronized(vi);
}
end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
break;
case SIOCADDMULTI:
case SIOCDELMULTI: /* these two are called with a mutex held :-( */
- rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
+ rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
if (rc)
return (rc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
@@ -1311,7 +1428,7 @@ redo_sifflags:
break;
case SIOCSIFCAP:
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
+ rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
if (rc)
return (rc);
@@ -1373,7 +1490,7 @@ redo_sifflags:
struct sge_rxq *rxq;
ifp->if_capenable ^= IFCAP_LRO;
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
if (ifp->if_capenable & IFCAP_LRO)
rxq->iq.flags |= IQ_LRO_ENABLED;
else
@@ -1385,7 +1502,7 @@ redo_sifflags:
if (mask & IFCAP_TOE) {
int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
- rc = toe_capability(pi, enable);
+ rc = toe_capability(vi, enable);
if (rc != 0)
goto fail;
@@ -1416,7 +1533,7 @@ fail:
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
- ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
+ ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
break;
case SIOCGI2C: {
@@ -1433,10 +1550,10 @@ fail:
rc = EINVAL;
break;
}
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c");
+ rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
if (rc)
return (rc);
- rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
+ rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr,
i2c.offset, i2c.len, &i2c.data[0]);
end_synchronized_op(sc, 0);
if (rc == 0)
@@ -1454,7 +1571,8 @@ fail:
static int
cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
{
- struct port_info *pi = ifp->if_softc;
+ struct vi_info *vi = ifp->if_softc;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct sge_txq *txq;
void *items[1];
@@ -1476,10 +1594,10 @@ cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
}
/* Select a txq. */
- txq = &sc->sge.txq[pi->first_txq];
+ txq = &sc->sge.txq[vi->first_txq];
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
- txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq)) +
- pi->rsrv_noflowq);
+ txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
+ vi->rsrv_noflowq);
items[0] = m;
rc = mp_ring_enqueue(txq->r, items, 1, 4096);
@@ -1492,13 +1610,13 @@ cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
static void
cxgbe_qflush(struct ifnet *ifp)
{
- struct port_info *pi = ifp->if_softc;
+ struct vi_info *vi = ifp->if_softc;
struct sge_txq *txq;
int i;
- /* queues do not exist if !PORT_INIT_DONE. */
- if (pi->flags & PORT_INIT_DONE) {
- for_each_txq(pi, i, txq) {
+ /* queues do not exist if !VI_INIT_DONE. */
+ if (vi->flags & VI_INIT_DONE) {
+ for_each_txq(vi, i, txq) {
TXQ_LOCK(txq);
txq->eq.flags &= ~EQ_ENABLED;
TXQ_UNLOCK(txq);
@@ -1514,9 +1632,9 @@ cxgbe_qflush(struct ifnet *ifp)
static int
cxgbe_media_change(struct ifnet *ifp)
{
- struct port_info *pi = ifp->if_softc;
+ struct vi_info *vi = ifp->if_softc;
- device_printf(pi->dev, "%s unimplemented.\n", __func__);
+ device_printf(vi->dev, "%s unimplemented.\n", __func__);
return (EOPNOTSUPP);
}
@@ -1524,20 +1642,12 @@ cxgbe_media_change(struct ifnet *ifp)
static void
cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
- struct port_info *pi = ifp->if_softc;
- struct ifmedia *media = NULL;
+ struct vi_info *vi = ifp->if_softc;
+ struct port_info *pi = vi->pi;
struct ifmedia_entry *cur;
int speed = pi->link_cfg.speed;
- if (ifp == pi->ifp)
- media = &pi->media;
-#ifdef DEV_NETMAP
- else if (ifp == pi->nm_ifp)
- media = &pi->nm_media;
-#endif
- MPASS(media != NULL);
-
- cur = media->ifm_cur;
+ cur = vi->media.ifm_cur;
ifmr->ifm_status = IFM_AVALID;
if (!pi->link_cfg.link_ok)
@@ -1563,6 +1673,84 @@ cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
speed));
}
+static int
+vcxgbe_probe(device_t dev)
+{
+ char buf[128];
+ struct vi_info *vi = device_get_softc(dev);
+
+ snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
+ vi - vi->pi->vi);
+ device_set_desc_copy(dev, buf);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+vcxgbe_attach(device_t dev)
+{
+ struct vi_info *vi;
+ struct port_info *pi;
+ struct adapter *sc;
+ int func, index, rc;
+ u32 param, val;
+
+ vi = device_get_softc(dev);
+ pi = vi->pi;
+ sc = pi->adapter;
+
+ index = vi - pi->vi;
+ KASSERT(index < nitems(vi_mac_funcs),
+ ("%s: VI %s doesn't have a MAC func", __func__,
+ device_get_nameunit(dev)));
+ func = vi_mac_funcs[index];
+ rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
+ vi->hw_addr, &vi->rss_size, func, 0);
+ if (rc < 0) {
+ device_printf(dev, "Failed to allocate virtual interface "
+ "for port %d: %d\n", pi->port_id, -rc);
+ return (-rc);
+ }
+ vi->viid = rc;
+
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
+ V_FW_PARAMS_PARAM_YZ(vi->viid);
+ rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ if (rc)
+ vi->rss_base = 0xffff;
+ else {
+ /* MPASS((val >> 16) == rss_size); */
+ vi->rss_base = val & 0xffff;
+ }
+
+ rc = cxgbe_vi_attach(dev, vi);
+ if (rc) {
+ t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
+ return (rc);
+ }
+ return (0);
+}
+
+static int
+vcxgbe_detach(device_t dev)
+{
+ struct vi_info *vi;
+ struct adapter *sc;
+
+ vi = device_get_softc(dev);
+ sc = vi->pi->adapter;
+
+ doom_vi(sc, vi);
+
+ cxgbe_vi_detach(vi);
+ t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
+
+ end_synchronized_op(sc, 0);
+
+ return (0);
+}
+
void
t4_fatal_err(struct adapter *sc)
{
@@ -1884,33 +2072,34 @@ position_memwin(struct adapter *sc, int n, uint32_t addr)
}
static int
-cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
+cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
struct intrs_and_queues *iaq)
{
int rc, itype, navail, nrxq10g, nrxq1g, n;
int nofldrxq10g = 0, nofldrxq1g = 0;
- int nnmrxq10g = 0, nnmrxq1g = 0;
bzero(iaq, sizeof(*iaq));
iaq->ntxq10g = t4_ntxq10g;
iaq->ntxq1g = t4_ntxq1g;
+ iaq->ntxq_vi = t4_ntxq_vi;
iaq->nrxq10g = nrxq10g = t4_nrxq10g;
iaq->nrxq1g = nrxq1g = t4_nrxq1g;
+ iaq->nrxq_vi = t4_nrxq_vi;
iaq->rsrv_noflowq = t4_rsrv_noflowq;
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
iaq->nofldtxq10g = t4_nofldtxq10g;
iaq->nofldtxq1g = t4_nofldtxq1g;
+ iaq->nofldtxq_vi = t4_nofldtxq_vi;
iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
+ iaq->nofldrxq_vi = t4_nofldrxq_vi;
}
#endif
#ifdef DEV_NETMAP
- iaq->nnmtxq10g = t4_nnmtxq10g;
- iaq->nnmtxq1g = t4_nnmtxq1g;
- iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
- iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
+ iaq->nnmtxq_vi = t4_nnmtxq_vi;
+ iaq->nnmrxq_vi = t4_nnmrxq_vi;
#endif
for (itype = INTR_MSIX; itype; itype >>= 1) {
@@ -1934,12 +2123,17 @@ restart:
/*
* Best option: an interrupt vector for errors, one for the
- * firmware event queue, and one for every rxq (NIC, TOE, and
- * netmap).
+ * firmware event queue, and one for every rxq (NIC and TOE) of
+ * every VI. The VIs that support netmap use the same
+ * interrupts for the NIC rx queues and the netmap rx queues
+ * because only one set of queues is active at a time.
*/
iaq->nirq = T4_EXTRA_INTR;
- iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
- iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
+ iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
+ iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
+ iaq->nirq += (n10g + n1g) * (num_vis - 1) *
+ max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */
+ iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq))) {
iaq->intr_flags_10g = INTR_ALL;
@@ -1947,41 +2141,44 @@ restart:
goto allocate;
}
+ /* Disable the VIs (and netmap) if there aren't enough intrs */
+ if (num_vis > 1) {
+ device_printf(sc->dev, "virtual interfaces disabled "
+ "because num_vis=%u with current settings "
+ "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
+ "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
+ "nnmrxq_vi=%u) would need %u interrupts but "
+ "only %u are available.\n", num_vis, nrxq10g,
+ nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
+ iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
+ navail);
+ num_vis = 1;
+ iaq->ntxq_vi = iaq->nrxq_vi = 0;
+ iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
+ iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
+ goto restart;
+ }
+
/*
* Second best option: a vector for errors, one for the firmware
* event queue, and vectors for either all the NIC rx queues or
* all the TOE rx queues. The queues that don't get vectors
* will forward their interrupts to those that do.
- *
- * Note: netmap rx queues cannot be created early and so they
- * can't be setup to receive forwarded interrupts for others.
*/
iaq->nirq = T4_EXTRA_INTR;
if (nrxq10g >= nofldrxq10g) {
iaq->intr_flags_10g = INTR_RXQ;
iaq->nirq += n10g * nrxq10g;
-#ifdef DEV_NETMAP
- iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
-#endif
} else {
iaq->intr_flags_10g = INTR_OFLD_RXQ;
iaq->nirq += n10g * nofldrxq10g;
-#ifdef DEV_NETMAP
- iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
-#endif
}
if (nrxq1g >= nofldrxq1g) {
iaq->intr_flags_1g = INTR_RXQ;
iaq->nirq += n1g * nrxq1g;
-#ifdef DEV_NETMAP
- iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
-#endif
} else {
iaq->intr_flags_1g = INTR_OFLD_RXQ;
iaq->nirq += n1g * nofldrxq1g;
-#ifdef DEV_NETMAP
- iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
-#endif
}
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq)))
@@ -1989,9 +2186,9 @@ restart:
/*
* Next best option: an interrupt vector for errors, one for the
- * firmware event queue, and at least one per port. At this
- * point we know we'll have to downsize nrxq and/or nofldrxq
- * and/or nnmrxq to fit what's available to us.
+ * firmware event queue, and at least one per main-VI. At this
+ * point we know we'll have to downsize nrxq and/or nofldrxq to
+ * fit what's available to us.
*/
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += n10g + n1g;
@@ -2014,9 +2211,6 @@ restart:
#ifdef TCP_OFFLOAD
iaq->nofldrxq10g = min(n, nofldrxq10g);
#endif
-#ifdef DEV_NETMAP
- iaq->nnmrxq10g = min(n, nnmrxq10g);
-#endif
}
if (n1g > 0) {
@@ -2035,9 +2229,6 @@ restart:
#ifdef TCP_OFFLOAD
iaq->nofldrxq1g = min(n, nofldrxq1g);
#endif
-#ifdef DEV_NETMAP
- iaq->nnmrxq1g = min(n, nnmrxq1g);
-#endif
}
if (itype != INTR_MSI || powerof2(iaq->nirq))
@@ -2053,10 +2244,6 @@ restart:
if (is_offload(sc))
iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
#endif
-#ifdef DEV_NETMAP
- iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
-#endif
-
allocate:
navail = iaq->nirq;
rc = 0;
@@ -2965,25 +3152,14 @@ int
update_mac_settings(struct ifnet *ifp, int flags)
{
int rc = 0;
- struct port_info *pi = ifp->if_softc;
+ struct vi_info *vi = ifp->if_softc;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
- uint16_t viid = 0xffff;
- int16_t *xact_addr_filt = NULL;
ASSERT_SYNCHRONIZED_OP(sc);
KASSERT(flags, ("%s: not told what to update.", __func__));
- if (ifp == pi->ifp) {
- viid = pi->viid;
- xact_addr_filt = &pi->xact_addr_filt;
- }
-#ifdef DEV_NETMAP
- else if (ifp == pi->nm_ifp) {
- viid = pi->nm_viid;
- xact_addr_filt = &pi->nm_xact_addr_filt;
- }
-#endif
if (flags & XGMAC_MTU)
mtu = ifp->if_mtu;
@@ -2997,8 +3173,8 @@ update_mac_settings(struct ifnet *ifp, int flags)
vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
- rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti,
- 1, vlanex, false);
+ rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
+ allmulti, 1, vlanex, false);
if (rc) {
if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
rc);
@@ -3010,14 +3186,14 @@ update_mac_settings(struct ifnet *ifp, int flags)
uint8_t ucaddr[ETHER_ADDR_LEN];
bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
- rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr,
- true, true);
+ rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
+ ucaddr, true, true);
if (rc < 0) {
rc = -rc;
if_printf(ifp, "change_mac failed: %d\n", rc);
return (rc);
} else {
- *xact_addr_filt = rc;
+ vi->xact_addr_filt = rc;
rc = 0;
}
}
@@ -3039,8 +3215,8 @@ update_mac_settings(struct ifnet *ifp, int flags)
i++;
if (i == FW_MAC_EXACT_CHUNK) {
- rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del,
- i, mcaddr, NULL, &hash, 0);
+ rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
+ del, i, mcaddr, NULL, &hash, 0);
if (rc < 0) {
rc = -rc;
for (j = 0; j < i; j++) {
@@ -3060,7 +3236,7 @@ update_mac_settings(struct ifnet *ifp, int flags)
}
}
if (i > 0) {
- rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i,
+ rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
mcaddr, NULL, &hash, 0);
if (rc < 0) {
rc = -rc;
@@ -3078,7 +3254,7 @@ update_mac_settings(struct ifnet *ifp, int flags)
}
}
- rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0);
+ rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
if (rc != 0)
if_printf(ifp, "failed to set mc address hash: %d", rc);
mcfail:
@@ -3092,7 +3268,7 @@ mcfail:
* {begin|end}_synchronized_op must be called from the same thread.
*/
int
-begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
+begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
char *wmesg)
{
int rc, pri;
@@ -3100,7 +3276,8 @@ begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
#ifdef WITNESS
/* the caller thinks it's ok to sleep, but is it really? */
if (flags & SLEEP_OK)
- pause("t4slptst", 1);
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "begin_synchronized_op");
#endif
if (INTR_OK)
@@ -3111,7 +3288,7 @@ begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
ADAPTER_LOCK(sc);
for (;;) {
- if (pi && IS_DOOMED(pi)) {
+ if (vi && IS_DOOMED(vi)) {
rc = ENXIO;
goto done;
}
@@ -3148,6 +3325,29 @@ done:
}
/*
+ * Tell if_ioctl and if_init that the VI is going away. This is
+ * special variant of begin_synchronized_op and must be paired with a
+ * call to end_synchronized_op.
+ */
+void
+doom_vi(struct adapter *sc, struct vi_info *vi)
+{
+
+ ADAPTER_LOCK(sc);
+ SET_DOOMED(vi);
+ wakeup(&sc->flags);
+ while (IS_BUSY(sc))
+ mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
+ SET_BUSY(sc);
+#ifdef INVARIANTS
+ sc->last_op = "t4detach";
+ sc->last_op_thr = curthread;
+ sc->last_op_flags = 0;
+#endif
+ ADAPTER_UNLOCK(sc);
+}
+
+/*
* {begin|end}_synchronized_op must be called from the same thread.
*/
void
@@ -3166,34 +3366,32 @@ end_synchronized_op(struct adapter *sc, int flags)
}
static int
-cxgbe_init_synchronized(struct port_info *pi)
+cxgbe_init_synchronized(struct vi_info *vi)
{
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->ifp;
+ struct ifnet *ifp = vi->ifp;
int rc = 0, i;
struct sge_txq *txq;
ASSERT_SYNCHRONIZED_OP(sc);
- if (isset(&sc->open_device_map, pi->port_id)) {
- KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
- ("mismatch between open_device_map and if_drv_flags"));
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return (0); /* already running */
- }
if (!(sc->flags & FULL_INIT_DONE) &&
((rc = adapter_full_init(sc)) != 0))
return (rc); /* error message displayed already */
- if (!(pi->flags & PORT_INIT_DONE) &&
- ((rc = port_full_init(pi)) != 0))
+ if (!(vi->flags & VI_INIT_DONE) &&
+ ((rc = vi_full_init(vi)) != 0))
return (rc); /* error message displayed already */
rc = update_mac_settings(ifp, XGMAC_ALL);
if (rc)
goto done; /* error message displayed already */
- rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
+ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
if (rc != 0) {
if_printf(ifp, "enable_vi failed: %d\n", rc);
goto done;
@@ -3204,7 +3402,7 @@ cxgbe_init_synchronized(struct port_info *pi)
* if this changes.
*/
- for_each_txq(pi, i, txq) {
+ for_each_txq(vi, i, txq) {
TXQ_LOCK(txq);
txq->eq.flags |= EQ_ENABLED;
TXQ_UNLOCK(txq);
@@ -3213,8 +3411,8 @@ cxgbe_init_synchronized(struct port_info *pi)
/*
* The first iq of the first port to come up is used for tracing.
*/
- if (sc->traceq < 0) {
- sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
+ if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
+ sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
V_QUEUENUMBER(sc->traceq));
@@ -3222,15 +3420,18 @@ cxgbe_init_synchronized(struct port_info *pi)
}
/* all ok */
- setbit(&sc->open_device_map, pi->port_id);
PORT_LOCK(pi);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
- PORT_UNLOCK(pi);
+ pi->up_vis++;
- callout_reset(&pi->tick, hz, cxgbe_tick, pi);
+ if (pi->nvi > 1)
+ callout_reset(&vi->tick, hz, vi_tick, vi);
+ else
+ callout_reset(&pi->tick, hz, cxgbe_tick, pi);
+ PORT_UNLOCK(pi);
done:
if (rc != 0)
- cxgbe_uninit_synchronized(pi);
+ cxgbe_uninit_synchronized(vi);
return (rc);
}
@@ -3239,18 +3440,19 @@ done:
* Idempotent.
*/
static int
-cxgbe_uninit_synchronized(struct port_info *pi)
+cxgbe_uninit_synchronized(struct vi_info *vi)
{
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->ifp;
+ struct ifnet *ifp = vi->ifp;
int rc, i;
struct sge_txq *txq;
ASSERT_SYNCHRONIZED_OP(sc);
- if (!(pi->flags & PORT_INIT_DONE)) {
+ if (!(vi->flags & VI_INIT_DONE)) {
KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
- ("uninited port is running"));
+ ("uninited VI is running"));
return (0);
}
@@ -3261,21 +3463,33 @@ cxgbe_uninit_synchronized(struct port_info *pi)
* holding in its RAM (for an offloaded connection) even after the VI is
* disabled.
*/
- rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
+ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
if (rc) {
if_printf(ifp, "disable_vi failed: %d\n", rc);
return (rc);
}
- for_each_txq(pi, i, txq) {
+ for_each_txq(vi, i, txq) {
TXQ_LOCK(txq);
txq->eq.flags &= ~EQ_ENABLED;
TXQ_UNLOCK(txq);
}
- clrbit(&sc->open_device_map, pi->port_id);
PORT_LOCK(pi);
+ if (pi->nvi == 1)
+ callout_stop(&pi->tick);
+ else
+ callout_stop(&vi->tick);
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ PORT_UNLOCK(pi);
+ return (0);
+ }
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ pi->up_vis--;
+ if (pi->up_vis > 0) {
+ PORT_UNLOCK(pi);
+ return (0);
+ }
PORT_UNLOCK(pi);
pi->link_cfg.link_ok = 0;
@@ -3293,10 +3507,12 @@ cxgbe_uninit_synchronized(struct port_info *pi)
static int
setup_intr_handlers(struct adapter *sc)
{
- int rc, rid, p, q;
+ int rc, rid, p, q, v;
char s[8];
struct irq *irq;
struct port_info *pi;
+ struct vi_info *vi;
+ struct sge *sge = &sc->sge;
struct sge_rxq *rxq;
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
@@ -3325,7 +3541,7 @@ setup_intr_handlers(struct adapter *sc)
rid++;
/* The second one is always the firmware event queue */
- rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
+ rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
if (rc != 0)
return (rc);
irq++;
@@ -3333,44 +3549,64 @@ setup_intr_handlers(struct adapter *sc)
for_each_port(sc, p) {
pi = sc->port[p];
+ for_each_vi(pi, v, vi) {
+ vi->first_intr = rid - 1;
- if (pi->flags & INTR_RXQ) {
- for_each_rxq(pi, q, rxq) {
- snprintf(s, sizeof(s), "%d.%d", p, q);
- rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
- s);
- if (rc != 0)
- return (rc);
- irq++;
- rid++;
+ if (vi->nnmrxq > 0) {
+ int n = max(vi->nrxq, vi->nnmrxq);
+
+ MPASS(vi->flags & INTR_RXQ);
+
+ rxq = &sge->rxq[vi->first_rxq];
+#ifdef DEV_NETMAP
+ nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
+#endif
+ for (q = 0; q < n; q++) {
+ snprintf(s, sizeof(s), "%x%c%x", p,
+ 'a' + v, q);
+ if (q < vi->nrxq)
+ irq->rxq = rxq++;
+#ifdef DEV_NETMAP
+ if (q < vi->nnmrxq)
+ irq->nm_rxq = nm_rxq++;
+#endif
+ rc = t4_alloc_irq(sc, irq, rid,
+ t4_vi_intr, irq, s);
+ if (rc != 0)
+ return (rc);
+ irq++;
+ rid++;
+ vi->nintr++;
+ }
+ } else if (vi->flags & INTR_RXQ) {
+ for_each_rxq(vi, q, rxq) {
+ snprintf(s, sizeof(s), "%x%c%x", p,
+ 'a' + v, q);
+ rc = t4_alloc_irq(sc, irq, rid,
+ t4_intr, rxq, s);
+ if (rc != 0)
+ return (rc);
+ irq++;
+ rid++;
+ vi->nintr++;
+ }
}
- }
#ifdef TCP_OFFLOAD
- if (pi->flags & INTR_OFLD_RXQ) {
- for_each_ofld_rxq(pi, q, ofld_rxq) {
- snprintf(s, sizeof(s), "%d,%d", p, q);
- rc = t4_alloc_irq(sc, irq, rid, t4_intr,
- ofld_rxq, s);
- if (rc != 0)
- return (rc);
- irq++;
- rid++;
+ if (vi->flags & INTR_OFLD_RXQ) {
+ for_each_ofld_rxq(vi, q, ofld_rxq) {
+ snprintf(s, sizeof(s), "%x%c%x", p,
+ 'A' + v, q);
+ rc = t4_alloc_irq(sc, irq, rid,
+ t4_intr, ofld_rxq, s);
+ if (rc != 0)
+ return (rc);
+ irq++;
+ rid++;
+ vi->nintr++;
+ }
}
- }
#endif
-#ifdef DEV_NETMAP
- if (pi->flags & INTR_NM_RXQ) {
- for_each_nm_rxq(pi, q, nm_rxq) {
- snprintf(s, sizeof(s), "%d-%d", p, q);
- rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr,
- nm_rxq, s);
- if (rc != 0)
- return (rc);
- irq++;
- rid++;
- }
}
-#endif
}
MPASS(irq == &sc->irq[sc->intr_count]);
@@ -3501,10 +3737,10 @@ hashen_to_hashconfig(int hashen)
#endif
int
-port_full_init(struct port_info *pi)
+vi_full_init(struct vi_info *vi)
{
- struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->ifp;
+ struct adapter *sc = vi->pi->adapter;
+ struct ifnet *ifp = vi->ifp;
uint16_t *rss;
struct sge_rxq *rxq;
int rc, i, j, hashen;
@@ -3517,36 +3753,36 @@ port_full_init(struct port_info *pi)
#endif
ASSERT_SYNCHRONIZED_OP(sc);
- KASSERT((pi->flags & PORT_INIT_DONE) == 0,
- ("%s: PORT_INIT_DONE already", __func__));
+ KASSERT((vi->flags & VI_INIT_DONE) == 0,
+ ("%s: VI_INIT_DONE already", __func__));
- sysctl_ctx_init(&pi->ctx);
- pi->flags |= PORT_SYSCTL_CTX;
+ sysctl_ctx_init(&vi->ctx);
+ vi->flags |= VI_SYSCTL_CTX;
/*
- * Allocate tx/rx/fl queues for this port.
+ * Allocate tx/rx/fl queues for this VI.
*/
- rc = t4_setup_port_queues(pi);
+ rc = t4_setup_vi_queues(vi);
if (rc != 0)
goto done; /* error message displayed already */
/*
- * Setup RSS for this port. Save a copy of the RSS table for later use.
+ * Setup RSS for this VI. Save a copy of the RSS table for later use.
*/
- if (pi->nrxq > pi->rss_size) {
+ if (vi->nrxq > vi->rss_size) {
if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
- "some queues will never receive traffic.\n", pi->nrxq,
- pi->rss_size);
- } else if (pi->rss_size % pi->nrxq) {
+ "some queues will never receive traffic.\n", vi->nrxq,
+ vi->rss_size);
+ } else if (vi->rss_size % vi->nrxq) {
if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
- "expect uneven traffic distribution.\n", pi->nrxq,
- pi->rss_size);
+ "expect uneven traffic distribution.\n", vi->nrxq,
+ vi->rss_size);
}
#ifdef RSS
MPASS(RSS_KEYSIZE == 40);
- if (pi->nrxq != nbuckets) {
+ if (vi->nrxq != nbuckets) {
if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
- "performance will be impacted.\n", pi->nrxq, nbuckets);
+ "performance will be impacted.\n", vi->nrxq, nbuckets);
}
rss_getkey((void *)&raw_rss_key[0]);
@@ -3555,24 +3791,24 @@ port_full_init(struct port_info *pi)
}
t4_write_rss_key(sc, (void *)&rss_key[0], -1);
#endif
- rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
- for (i = 0; i < pi->rss_size;) {
+ rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
+ for (i = 0; i < vi->rss_size;) {
#ifdef RSS
j = rss_get_indirection_to_bucket(i);
- j %= pi->nrxq;
- rxq = &sc->sge.rxq[pi->first_rxq + j];
+ j %= vi->nrxq;
+ rxq = &sc->sge.rxq[vi->first_rxq + j];
rss[i++] = rxq->iq.abs_id;
#else
- for_each_rxq(pi, j, rxq) {
+ for_each_rxq(vi, j, rxq) {
rss[i++] = rxq->iq.abs_id;
- if (i == pi->rss_size)
+ if (i == vi->rss_size)
break;
}
#endif
}
- rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
- pi->rss_size);
+ rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
+ vi->rss_size);
if (rc != 0) {
if_printf(ifp, "rss_config failed: %d\n", rc);
goto done;
@@ -3620,17 +3856,17 @@ port_full_init(struct port_info *pi)
F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
#endif
- rc = -t4_config_vi_rss(sc, sc->mbox, pi->viid, hashen, rss[0]);
+ rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]);
if (rc != 0) {
if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
goto done;
}
- pi->rss = rss;
- pi->flags |= PORT_INIT_DONE;
+ vi->rss = rss;
+ vi->flags |= VI_INIT_DONE;
done:
if (rc != 0)
- port_full_uninit(pi);
+ vi_full_uninit(vi);
return (rc);
}
@@ -3639,8 +3875,9 @@ done:
* Idempotent.
*/
int
-port_full_uninit(struct port_info *pi)
+vi_full_uninit(struct vi_info *vi)
{
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
int i;
struct sge_rxq *rxq;
@@ -3650,38 +3887,41 @@ port_full_uninit(struct port_info *pi)
struct sge_wrq *ofld_txq;
#endif
- if (pi->flags & PORT_INIT_DONE) {
+ if (vi->flags & VI_INIT_DONE) {
/* Need to quiesce queues. */
- quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
+ /* XXX: Only for the first VI? */
+ if (IS_MAIN_VI(vi))
+ quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
- for_each_txq(pi, i, txq) {
+ for_each_txq(vi, i, txq) {
quiesce_txq(sc, txq);
}
#ifdef TCP_OFFLOAD
- for_each_ofld_txq(pi, i, ofld_txq) {
+ for_each_ofld_txq(vi, i, ofld_txq) {
quiesce_wrq(sc, ofld_txq);
}
#endif
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
quiesce_iq(sc, &rxq->iq);
quiesce_fl(sc, &rxq->fl);
}
#ifdef TCP_OFFLOAD
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
quiesce_iq(sc, &ofld_rxq->iq);
quiesce_fl(sc, &ofld_rxq->fl);
}
#endif
- free(pi->rss, M_CXGBE);
+ free(vi->rss, M_CXGBE);
+ free(vi->nm_rss, M_CXGBE);
}
- t4_teardown_port_queues(pi);
- pi->flags &= ~PORT_INIT_DONE;
+ t4_teardown_vi_queues(vi);
+ vi->flags &= ~VI_INIT_DONE;
return (0);
}
@@ -3739,9 +3979,9 @@ quiesce_fl(struct adapter *sc, struct sge_fl *fl)
FL_LOCK(fl);
fl->flags |= FL_DOOMED;
FL_UNLOCK(fl);
+ callout_stop(&sc->sfl_callout);
mtx_unlock(&sc->sfl_lock);
- callout_drain(&sc->sfl_callout);
KASSERT((fl->flags & FL_STARVING) == 0,
("%s: still starving", __func__));
}
@@ -4474,10 +4714,127 @@ t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
}
+#define A_PL_INDIR_CMD 0x1f8
+
+#define S_PL_AUTOINC 31
+#define M_PL_AUTOINC 0x1U
+#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
+#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
+
+#define S_PL_VFID 20
+#define M_PL_VFID 0xffU
+#define V_PL_VFID(x) ((x) << S_PL_VFID)
+#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
+
+#define S_PL_ADDR 0
+#define M_PL_ADDR 0xfffffU
+#define V_PL_ADDR(x) ((x) << S_PL_ADDR)
+#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
+
+#define A_PL_INDIR_DATA 0x1fc
+
+static uint64_t
+read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
+{
+ u32 stats[2];
+
+ mtx_assert(&sc->regwin_lock, MA_OWNED);
+ t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
+ V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg)));
+ stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
+ stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
+ return (((uint64_t)stats[1]) << 32 | stats[0]);
+}
+
+static void
+t4_get_vi_stats(struct adapter *sc, unsigned int viid,
+ struct fw_vi_stats_vf *stats)
+{
+
+#define GET_STAT(name) \
+ read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
+
+ stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
+ stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
+ stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
+ stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
+ stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
+ stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
+ stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
+ stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
+ stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
+ stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
+ stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
+ stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
+ stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
+ stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
+ stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
+ stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
+
+#undef GET_STAT
+}
+
+static void
+t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
+{
+ int reg;
+
+ t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
+ V_PL_VFID(G_FW_VIID_VIN(viid)) |
+ V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
+ for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
+ reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
+ t4_write_reg(sc, A_PL_INDIR_DATA, 0);
+}
+
+static void
+vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
+{
+ struct ifnet *ifp = vi->ifp;
+ struct sge_txq *txq;
+ int i, drops;
+ struct fw_vi_stats_vf *s = &vi->stats;
+ struct timeval tv;
+ const struct timeval interval = {0, 250000}; /* 250ms */
+
+ if (!(vi->flags & VI_INIT_DONE))
+ return;
+
+ getmicrotime(&tv);
+ timevalsub(&tv, &interval);
+ if (timevalcmp(&tv, &vi->last_refreshed, <))
+ return;
+
+ mtx_lock(&sc->regwin_lock);
+ t4_get_vi_stats(sc, vi->viid, &vi->stats);
+
+ ifp->if_ipackets = s->rx_bcast_frames + s->rx_mcast_frames +
+ s->rx_ucast_frames;
+ ifp->if_ierrors = s->rx_err_frames;
+ ifp->if_opackets = s->tx_bcast_frames + s->tx_mcast_frames +
+ s->tx_ucast_frames + s->tx_offload_frames;
+ ifp->if_oerrors = s->tx_drop_frames;
+ ifp->if_ibytes = s->rx_bcast_bytes + s->rx_mcast_bytes +
+ s->rx_ucast_bytes;
+ ifp->if_obytes = s->tx_bcast_bytes + s->tx_mcast_bytes +
+ s->tx_ucast_bytes + s->tx_offload_bytes;
+ ifp->if_imcasts = s->rx_mcast_frames;
+ ifp->if_omcasts = s->tx_mcast_frames;
+
+ drops = 0;
+ for_each_txq(vi, i, txq)
+ drops += counter_u64_fetch(txq->r->drops);
+ ifp->if_snd.ifq_drops = drops;
+
+ getmicrotime(&vi->last_refreshed);
+ mtx_unlock(&sc->regwin_lock);
+}
+
static void
cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
{
- struct ifnet *ifp = pi->ifp;
+ struct vi_info *vi = &pi->vi[0];
+ struct ifnet *ifp = vi->ifp;
struct sge_txq *txq;
int i, drops;
struct port_stats *s = &pi->stats;
@@ -4513,7 +4870,7 @@ cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
}
drops = s->tx_drop;
- for_each_txq(pi, i, txq)
+ for_each_txq(vi, i, txq)
drops += counter_u64_fetch(txq->r->drops);
ifp->if_snd.ifq_drops = drops;
@@ -4529,18 +4886,22 @@ cxgbe_tick(void *arg)
{
struct port_info *pi = arg;
struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->ifp;
-
- PORT_LOCK(pi);
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- PORT_UNLOCK(pi);
- return; /* without scheduling another callout */
- }
+ PORT_LOCK_ASSERT_OWNED(pi);
cxgbe_refresh_stats(sc, pi);
callout_schedule(&pi->tick, hz);
- PORT_UNLOCK(pi);
+}
+
+void
+vi_tick(void *arg)
+{
+ struct vi_info *vi = arg;
+ struct adapter *sc = vi->pi->adapter;
+
+ vi_refresh_stats(sc, vi);
+
+ callout_schedule(&vi->tick, hz);
}
static void
@@ -4647,7 +5008,7 @@ t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
return (0);
}
-static int
+static void
t4_sysctls(struct adapter *sc)
{
struct sysctl_ctx_list *ctx;
@@ -4952,91 +5313,112 @@ t4_sysctls(struct adapter *sc)
CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
}
#endif
-
-
- return (0);
}
-static int
-cxgbe_sysctls(struct port_info *pi)
+void
+vi_sysctls(struct vi_info *vi)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *oid;
struct sysctl_oid_list *children;
- struct adapter *sc = pi->adapter;
- ctx = device_get_sysctl_ctx(pi->dev);
+ ctx = device_get_sysctl_ctx(vi->dev);
/*
- * dev.cxgbe.X.
+ * dev.v?(cxgbe|cxl).X.
*/
- oid = device_get_sysctl_tree(pi->dev);
+ oid = device_get_sysctl_tree(vi->dev);
children = SYSCTL_CHILDREN(oid);
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
- CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
- if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
- CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
- "PHY temperature (in Celsius)");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
- CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
- "PHY firmware version");
- }
+ SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
+ vi->viid, "VI identifer");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
- &pi->nrxq, 0, "# of rx queues");
+ &vi->nrxq, 0, "# of rx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
- &pi->ntxq, 0, "# of tx queues");
+ &vi->ntxq, 0, "# of tx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
- &pi->first_rxq, 0, "index of first rx queue");
+ &vi->first_rxq, 0, "index of first rx queue");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
- &pi->first_txq, 0, "index of first tx queue");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
- CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
- "Reserve queue 0 for non-flowid packets");
+ &vi->first_txq, 0, "index of first tx queue");
+
+ if (IS_MAIN_VI(vi)) {
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
+ CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
+ "Reserve queue 0 for non-flowid packets");
+ }
#ifdef TCP_OFFLOAD
- if (is_offload(sc)) {
+ if (vi->nofldrxq != 0) {
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
- &pi->nofldrxq, 0,
+ &vi->nofldrxq, 0,
"# of rx queues for offloaded TCP connections");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
- &pi->nofldtxq, 0,
+ &vi->nofldtxq, 0,
"# of tx queues for offloaded TCP connections");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
- CTLFLAG_RD, &pi->first_ofld_rxq, 0,
+ CTLFLAG_RD, &vi->first_ofld_rxq, 0,
"index of first TOE rx queue");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
- CTLFLAG_RD, &pi->first_ofld_txq, 0,
+ CTLFLAG_RD, &vi->first_ofld_txq, 0,
"index of first TOE tx queue");
}
#endif
#ifdef DEV_NETMAP
- SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
- &pi->nnmrxq, 0, "# of rx queues for netmap");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
- &pi->nnmtxq, 0, "# of tx queues for netmap");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
- CTLFLAG_RD, &pi->first_nm_rxq, 0,
- "index of first netmap rx queue");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
- CTLFLAG_RD, &pi->first_nm_txq, 0,
- "index of first netmap tx queue");
+ if (vi->nnmrxq != 0) {
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
+ &vi->nnmrxq, 0, "# of netmap rx queues");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
+ &vi->nnmtxq, 0, "# of netmap tx queues");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
+ CTLFLAG_RD, &vi->first_nm_rxq, 0,
+ "index of first netmap rx queue");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
+ CTLFLAG_RD, &vi->first_nm_txq, 0,
+ "index of first netmap tx queue");
+ }
#endif
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
- CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
+ CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
"holdoff timer index");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
- CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
+ CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
"holdoff packet counter index");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
- CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
+ CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
"rx queue size");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
- CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
+ CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
"tx queue size");
+}
+
+static void
+cxgbe_sysctls(struct port_info *pi)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+ struct adapter *sc = pi->adapter;
+
+ ctx = device_get_sysctl_ctx(pi->dev);
+
+ /*
+ * dev.cxgbe.X.
+ */
+ oid = device_get_sysctl_tree(pi->dev);
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
+ CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
+ if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
+ CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
+ "PHY temperature (in Celsius)");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
+ CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
+ "PHY firmware version");
+ }
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
@@ -5200,8 +5582,6 @@ cxgbe_sysctls(struct port_info *pi)
"# of buffer-group 3 truncated packets");
#undef SYSCTL_ADD_T4_PORTSTAT
-
- return (0);
}
static int
@@ -5253,7 +5633,7 @@ sysctl_btphy(SYSCTL_HANDLER_ARGS)
u_int v;
int rc;
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
+ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
if (rc)
return (rc);
/* XXX: magic numbers */
@@ -5272,18 +5652,18 @@ sysctl_btphy(SYSCTL_HANDLER_ARGS)
static int
sysctl_noflowq(SYSCTL_HANDLER_ARGS)
{
- struct port_info *pi = arg1;
+ struct vi_info *vi = arg1;
int rc, val;
- val = pi->rsrv_noflowq;
+ val = vi->rsrv_noflowq;
rc = sysctl_handle_int(oidp, &val, 0, req);
if (rc != 0 || req->newptr == NULL)
return (rc);
- if ((val >= 1) && (pi->ntxq > 1))
- pi->rsrv_noflowq = 1;
+ if ((val >= 1) && (vi->ntxq > 1))
+ vi->rsrv_noflowq = 1;
else
- pi->rsrv_noflowq = 0;
+ vi->rsrv_noflowq = 0;
return (rc);
}
@@ -5291,8 +5671,8 @@ sysctl_noflowq(SYSCTL_HANDLER_ARGS)
static int
sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
{
- struct port_info *pi = arg1;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = arg1;
+ struct adapter *sc = vi->pi->adapter;
int idx, rc, i;
struct sge_rxq *rxq;
#ifdef TCP_OFFLOAD
@@ -5300,7 +5680,7 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
#endif
uint8_t v;
- idx = pi->tmr_idx;
+ idx = vi->tmr_idx;
rc = sysctl_handle_int(oidp, &idx, 0, req);
if (rc != 0 || req->newptr == NULL)
@@ -5309,13 +5689,13 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
if (idx < 0 || idx >= SGE_NTIMERS)
return (EINVAL);
- rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+ rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4tmr");
if (rc)
return (rc);
- v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
- for_each_rxq(pi, i, rxq) {
+ v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
+ for_each_rxq(vi, i, rxq) {
#ifdef atomic_store_rel_8
atomic_store_rel_8(&rxq->iq.intr_params, v);
#else
@@ -5323,7 +5703,7 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
#endif
}
#ifdef TCP_OFFLOAD
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
#ifdef atomic_store_rel_8
atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
#else
@@ -5331,7 +5711,7 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
#endif
}
#endif
- pi->tmr_idx = idx;
+ vi->tmr_idx = idx;
end_synchronized_op(sc, LOCK_HELD);
return (0);
@@ -5340,11 +5720,11 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
static int
sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
{
- struct port_info *pi = arg1;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = arg1;
+ struct adapter *sc = vi->pi->adapter;
int idx, rc;
- idx = pi->pktc_idx;
+ idx = vi->pktc_idx;
rc = sysctl_handle_int(oidp, &idx, 0, req);
if (rc != 0 || req->newptr == NULL)
@@ -5353,15 +5733,15 @@ sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
if (idx < -1 || idx >= SGE_NCOUNTERS)
return (EINVAL);
- rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+ rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4pktc");
if (rc)
return (rc);
- if (pi->flags & PORT_INIT_DONE)
+ if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
- pi->pktc_idx = idx;
+ vi->pktc_idx = idx;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
@@ -5370,11 +5750,11 @@ sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
static int
sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
{
- struct port_info *pi = arg1;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = arg1;
+ struct adapter *sc = vi->pi->adapter;
int qsize, rc;
- qsize = pi->qsize_rxq;
+ qsize = vi->qsize_rxq;
rc = sysctl_handle_int(oidp, &qsize, 0, req);
if (rc != 0 || req->newptr == NULL)
@@ -5383,15 +5763,15 @@ sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
if (qsize < 128 || (qsize & 7))
return (EINVAL);
- rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+ rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4rxqs");
if (rc)
return (rc);
- if (pi->flags & PORT_INIT_DONE)
+ if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
- pi->qsize_rxq = qsize;
+ vi->qsize_rxq = qsize;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
@@ -5400,11 +5780,11 @@ sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
static int
sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
{
- struct port_info *pi = arg1;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = arg1;
+ struct adapter *sc = vi->pi->adapter;
int qsize, rc;
- qsize = pi->qsize_txq;
+ qsize = vi->qsize_txq;
rc = sysctl_handle_int(oidp, &qsize, 0, req);
if (rc != 0 || req->newptr == NULL)
@@ -5413,15 +5793,15 @@ sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
if (qsize < 128 || qsize > 65536)
return (EINVAL);
- rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+ rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
"t4txqs");
if (rc)
return (rc);
- if (pi->flags & PORT_INIT_DONE)
+ if (vi->flags & VI_INIT_DONE)
rc = EBUSY; /* cannot be changed once the queues are created */
else
- pi->qsize_txq = qsize;
+ vi->qsize_txq = qsize;
end_synchronized_op(sc, LOCK_HELD);
return (rc);
@@ -5469,7 +5849,8 @@ sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
if (n & ~(PAUSE_TX | PAUSE_RX))
return (EINVAL); /* some other bit is set too */
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE");
+ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
+ "t4PAUSE");
if (rc)
return (rc);
if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
@@ -7985,6 +8366,7 @@ static int
set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
{
struct port_info *pi = NULL;
+ struct vi_info *vi;
struct sge_txq *txq;
uint32_t fw_mnem, fw_queue, fw_class;
int i, rc;
@@ -8003,8 +8385,10 @@ set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
goto done;
}
+ /* XXX: Only supported for the main VI. */
pi = sc->port[p->port];
- if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
+ vi = &pi->vi[0];
+ if (!in_range(p->queue, 0, vi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
rc = EINVAL;
goto done;
}
@@ -8022,7 +8406,7 @@ set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
* on a single specified TX queue.
*/
if (p->queue >= 0) {
- txq = &sc->sge.txq[pi->first_txq + p->queue];
+ txq = &sc->sge.txq[vi->first_txq + p->queue];
fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
&fw_class);
@@ -8033,7 +8417,7 @@ set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
* Change the scheduling on all the TX queues for the
* interface.
*/
- for_each_txq(pi, i, txq) {
+ for_each_txq(vi, i, txq) {
fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
&fw_class);
@@ -8085,26 +8469,29 @@ void
t4_os_portmod_changed(const struct adapter *sc, int idx)
{
struct port_info *pi = sc->port[idx];
+ struct vi_info *vi;
+ struct ifnet *ifp;
+ int v;
static const char *mod_str[] = {
NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
};
- build_medialist(pi, &pi->media);
-#ifdef DEV_NETMAP
- build_medialist(pi, &pi->nm_media);
-#endif
+ for_each_vi(pi, v, vi) {
+ build_medialist(pi, &vi->media);
+ }
+ ifp = pi->vi[0].ifp;
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
- if_printf(pi->ifp, "transceiver unplugged.\n");
+ if_printf(ifp, "transceiver unplugged.\n");
else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
- if_printf(pi->ifp, "unknown transceiver inserted.\n");
+ if_printf(ifp, "unknown transceiver inserted.\n");
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
- if_printf(pi->ifp, "unsupported transceiver inserted.\n");
+ if_printf(ifp, "unsupported transceiver inserted.\n");
else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
- if_printf(pi->ifp, "%s transceiver inserted.\n",
+ if_printf(ifp, "%s transceiver inserted.\n",
mod_str[pi->mod_type]);
} else {
- if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
+ if_printf(ifp, "transceiver (type %d) inserted.\n",
pi->mod_type);
}
}
@@ -8113,16 +8500,27 @@ void
t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
{
struct port_info *pi = sc->port[idx];
- struct ifnet *ifp = pi->ifp;
+ struct vi_info *vi;
+ struct ifnet *ifp;
+ int v;
- if (link_stat) {
+ if (link_stat)
pi->linkdnrc = -1;
- ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
- if_link_state_change(ifp, LINK_STATE_UP);
- } else {
+ else {
if (reason >= 0)
pi->linkdnrc = reason;
- if_link_state_change(ifp, LINK_STATE_DOWN);
+ }
+ for_each_vi(pi, v, vi) {
+ ifp = vi->ifp;
+ if (ifp == NULL)
+ continue;
+
+ if (link_stat) {
+ ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
+ if_link_state_change(ifp, LINK_STATE_UP);
+ } else {
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ }
}
}
@@ -8242,9 +8640,10 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
rc = read_i2c(sc, (struct t4_i2c_data *)data);
break;
case CHELSIO_T4_CLEAR_STATS: {
- int i;
+ int i, v;
u_int port_id = *(uint32_t *)data;
struct port_info *pi;
+ struct vi_info *vi;
if (port_id >= sc->params.nports)
return (EINVAL);
@@ -8253,46 +8652,61 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
/* MAC stats */
t4_clr_port_stats(sc, pi->tx_chan);
pi->tx_parse_error = 0;
+ mtx_lock(&sc->regwin_lock);
+ for_each_vi(pi, v, vi) {
+ if (vi->flags & VI_INIT_DONE)
+ t4_clr_vi_stats(sc, vi->viid);
+ }
+ mtx_unlock(&sc->regwin_lock);
- if (pi->flags & PORT_INIT_DONE) {
- struct sge_rxq *rxq;
- struct sge_txq *txq;
- struct sge_wrq *wrq;
+ /*
+ * Since this command accepts a port, clear stats for
+ * all VIs on this port.
+ */
+ for_each_vi(pi, v, vi) {
+ if (vi->flags & VI_INIT_DONE) {
+ struct sge_rxq *rxq;
+ struct sge_txq *txq;
+ struct sge_wrq *wrq;
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
#if defined(INET) || defined(INET6)
- rxq->lro.lro_queued = 0;
- rxq->lro.lro_flushed = 0;
+ rxq->lro.lro_queued = 0;
+ rxq->lro.lro_flushed = 0;
#endif
- rxq->rxcsum = 0;
- rxq->vlan_extraction = 0;
- }
+ rxq->rxcsum = 0;
+ rxq->vlan_extraction = 0;
+ }
- for_each_txq(pi, i, txq) {
- txq->txcsum = 0;
- txq->tso_wrs = 0;
- txq->vlan_insertion = 0;
- txq->imm_wrs = 0;
- txq->sgl_wrs = 0;
- txq->txpkt_wrs = 0;
- txq->txpkts0_wrs = 0;
- txq->txpkts1_wrs = 0;
- txq->txpkts0_pkts = 0;
- txq->txpkts1_pkts = 0;
- mp_ring_reset_stats(txq->r);
- }
+ for_each_txq(vi, i, txq) {
+ txq->txcsum = 0;
+ txq->tso_wrs = 0;
+ txq->vlan_insertion = 0;
+ txq->imm_wrs = 0;
+ txq->sgl_wrs = 0;
+ txq->txpkt_wrs = 0;
+ txq->txpkts0_wrs = 0;
+ txq->txpkts1_wrs = 0;
+ txq->txpkts0_pkts = 0;
+ txq->txpkts1_pkts = 0;
+ mp_ring_reset_stats(txq->r);
+ }
#ifdef TCP_OFFLOAD
- /* nothing to clear for each ofld_rxq */
+ /* nothing to clear for each ofld_rxq */
- for_each_ofld_txq(pi, i, wrq) {
- wrq->tx_wrs_direct = 0;
- wrq->tx_wrs_copied = 0;
- }
+ for_each_ofld_txq(vi, i, wrq) {
+ wrq->tx_wrs_direct = 0;
+ wrq->tx_wrs_copied = 0;
+ }
#endif
- wrq = &sc->sge.ctrlq[pi->port_id];
- wrq->tx_wrs_direct = 0;
- wrq->tx_wrs_copied = 0;
+
+ if (IS_MAIN_VI(vi)) {
+ wrq = &sc->sge.ctrlq[pi->port_id];
+ wrq->tx_wrs_direct = 0;
+ wrq->tx_wrs_copied = 0;
+ }
+ }
}
break;
}
@@ -8320,8 +8734,8 @@ void
t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
const unsigned int *pgsz_order)
{
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = ifp->if_softc;
+ struct adapter *sc = vi->pi->adapter;
t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
@@ -8330,9 +8744,10 @@ t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
}
static int
-toe_capability(struct port_info *pi, int enable)
+toe_capability(struct vi_info *vi, int enable)
{
int rc;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
ASSERT_SYNCHRONIZED_OP(sc);
@@ -8341,19 +8756,32 @@ toe_capability(struct port_info *pi, int enable)
return (ENODEV);
if (enable) {
+ if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
+ /* TOE is already enabled. */
+ return (0);
+ }
+
/*
* We need the port's queues around so that we're able to send
* and receive CPLs to/from the TOE even if the ifnet for this
* port has never been UP'd administratively.
*/
- if (!(pi->flags & PORT_INIT_DONE)) {
- rc = cxgbe_init_synchronized(pi);
+ if (!(vi->flags & VI_INIT_DONE)) {
+ rc = vi_full_init(vi);
+ if (rc)
+ return (rc);
+ }
+ if (!(pi->vi[0].flags & VI_INIT_DONE)) {
+ rc = vi_full_init(&pi->vi[0]);
if (rc)
return (rc);
}
- if (isset(&sc->offload_map, pi->port_id))
+ if (isset(&sc->offload_map, pi->port_id)) {
+ /* TOE is enabled on another VI of this port. */
+ pi->uld_vis++;
return (0);
+ }
if (!uld_active(sc, ULD_TOM)) {
rc = t4_activate_uld(sc, ULD_TOM);
@@ -8376,9 +8804,12 @@ toe_capability(struct port_info *pi, int enable)
if (!uld_active(sc, ULD_ISCSI))
(void) t4_activate_uld(sc, ULD_ISCSI);
+ pi->uld_vis++;
setbit(&sc->offload_map, pi->port_id);
} else {
- if (!isset(&sc->offload_map, pi->port_id))
+ pi->uld_vis--;
+
+ if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
return (0);
KASSERT(uld_active(sc, ULD_TOM),
@@ -8540,6 +8971,9 @@ tweak_tunables(void)
#endif
}
+ if (t4_ntxq_vi < 1)
+ t4_ntxq_vi = min(nc, NTXQ_VI);
+
if (t4_nrxq10g < 1) {
#ifdef RSS
t4_nrxq10g = rss_getnumbuckets();
@@ -8557,6 +8991,9 @@ tweak_tunables(void)
#endif
}
+ if (t4_nrxq_vi < 1)
+ t4_nrxq_vi = min(nc, NRXQ_VI);
+
#ifdef TCP_OFFLOAD
if (t4_nofldtxq10g < 1)
t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
@@ -8564,12 +9001,18 @@ tweak_tunables(void)
if (t4_nofldtxq1g < 1)
t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
+ if (t4_nofldtxq_vi < 1)
+ t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI);
+
if (t4_nofldrxq10g < 1)
t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
if (t4_nofldrxq1g < 1)
t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
+ if (t4_nofldrxq_vi < 1)
+ t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI);
+
if (t4_toecaps_allowed == -1)
t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
#else
@@ -8578,17 +9021,11 @@ tweak_tunables(void)
#endif
#ifdef DEV_NETMAP
- if (t4_nnmtxq10g < 1)
- t4_nnmtxq10g = min(nc, NNMTXQ_10G);
-
- if (t4_nnmtxq1g < 1)
- t4_nnmtxq1g = min(nc, NNMTXQ_1G);
+ if (t4_nnmtxq_vi < 1)
+ t4_nnmtxq_vi = min(nc, NNMTXQ_VI);
- if (t4_nnmrxq10g < 1)
- t4_nnmrxq10g = min(nc, NNMRXQ_10G);
-
- if (t4_nnmrxq1g < 1)
- t4_nnmrxq1g = min(nc, NNMRXQ_1G);
+ if (t4_nnmrxq_vi < 1)
+ t4_nnmrxq_vi = min(nc, NNMRXQ_VI);
#endif
if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
@@ -8694,6 +9131,7 @@ done_unload:
static devclass_t t4_devclass, t5_devclass;
static devclass_t cxgbe_devclass, cxl_devclass;
+static devclass_t vcxgbe_devclass, vcxl_devclass;
DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
MODULE_VERSION(t4nex, 1);
@@ -8708,3 +9146,9 @@ MODULE_VERSION(cxgbe, 1);
DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
MODULE_VERSION(cxl, 1);
+
+DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
+MODULE_VERSION(vcxgbe, 1);
+
+DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
+MODULE_VERSION(vcxl, 1);
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index f54a67f..80ec2f9 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -33,10 +33,11 @@ __FBSDID("$FreeBSD$");
#ifdef DEV_NETMAP
#include <sys/param.h>
+#include <sys/bus.h>
#include <sys/eventhandler.h>
#include <sys/lock.h>
-#include <sys/types.h>
#include <sys/mbuf.h>
+#include <sys/module.h>
#include <sys/selinfo.h>
#include <sys/socket.h>
#include <sys/sockio.h>
@@ -86,187 +87,20 @@ SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
static int nm_cong_drop = 1;
TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop);
-/* netmap ifnet routines */
-static void cxgbe_nm_init(void *);
-static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t);
-static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *);
-static void cxgbe_nm_qflush(struct ifnet *);
-
-static int cxgbe_nm_init_synchronized(struct port_info *);
-static int cxgbe_nm_uninit_synchronized(struct port_info *);
-
-static void
-cxgbe_nm_init(void *arg)
-{
- struct port_info *pi = arg;
- struct adapter *sc = pi->adapter;
-
- if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
- return;
- cxgbe_nm_init_synchronized(pi);
- end_synchronized_op(sc, 0);
-
- return;
-}
-
-static int
-cxgbe_nm_init_synchronized(struct port_info *pi)
-{
- struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->nm_ifp;
- int rc = 0;
-
- ASSERT_SYNCHRONIZED_OP(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- return (0); /* already running */
-
- if (!(sc->flags & FULL_INIT_DONE) &&
- ((rc = adapter_full_init(sc)) != 0))
- return (rc); /* error message displayed already */
-
- if (!(pi->flags & PORT_INIT_DONE) &&
- ((rc = port_full_init(pi)) != 0))
- return (rc); /* error message displayed already */
-
- rc = update_mac_settings(ifp, XGMAC_ALL);
- if (rc)
- return (rc); /* error message displayed already */
-
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
- return (rc);
-}
-
-static int
-cxgbe_nm_uninit_synchronized(struct port_info *pi)
-{
-#ifdef INVARIANTS
- struct adapter *sc = pi->adapter;
-#endif
- struct ifnet *ifp = pi->nm_ifp;
-
- ASSERT_SYNCHRONIZED_OP(sc);
-
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
-
- return (0);
-}
-
-static int
-cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
-{
- int rc = 0, mtu, flags;
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
- struct ifreq *ifr = (struct ifreq *)data;
- uint32_t mask;
-
- MPASS(pi->nm_ifp == ifp);
-
- switch (cmd) {
- case SIOCSIFMTU:
- mtu = ifr->ifr_mtu;
- if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
- return (EINVAL);
-
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu");
- if (rc)
- return (rc);
- ifp->if_mtu = mtu;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rc = update_mac_settings(ifp, XGMAC_MTU);
- end_synchronized_op(sc, 0);
- break;
-
- case SIOCSIFFLAGS:
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg");
- if (rc)
- return (rc);
-
- if (ifp->if_flags & IFF_UP) {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- flags = pi->nmif_flags;
- if ((ifp->if_flags ^ flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) {
- rc = update_mac_settings(ifp,
- XGMAC_PROMISC | XGMAC_ALLMULTI);
- }
- } else
- rc = cxgbe_nm_init_synchronized(pi);
- pi->nmif_flags = ifp->if_flags;
- } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rc = cxgbe_nm_uninit_synchronized(pi);
- end_synchronized_op(sc, 0);
- break;
-
- case SIOCADDMULTI:
- case SIOCDELMULTI: /* these two are called with a mutex held :-( */
- rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti");
- if (rc)
- return (rc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rc = update_mac_settings(ifp, XGMAC_MCADDRS);
- end_synchronized_op(sc, LOCK_HELD);
- break;
-
- case SIOCSIFCAP:
- mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- if (mask & IFCAP_TXCSUM) {
- ifp->if_capenable ^= IFCAP_TXCSUM;
- ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
- }
- if (mask & IFCAP_TXCSUM_IPV6) {
- ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
- ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
- }
- if (mask & IFCAP_RXCSUM)
- ifp->if_capenable ^= IFCAP_RXCSUM;
- if (mask & IFCAP_RXCSUM_IPV6)
- ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
- break;
-
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd);
- break;
-
- default:
- rc = ether_ioctl(ifp, cmd, data);
- }
-
- return (rc);
-}
-
-static int
-cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m)
-{
-
- m_freem(m);
- return (0);
-}
-
-static void
-cxgbe_nm_qflush(struct ifnet *ifp)
-{
-
- return;
-}
-
static int
-alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
+alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
{
int rc, cntxt_id, i;
__be32 v;
- struct adapter *sc = pi->adapter;
- struct netmap_adapter *na = NA(pi->nm_ifp);
+ struct adapter *sc = vi->pi->adapter;
+ struct netmap_adapter *na = NA(vi->ifp);
struct fw_iq_cmd c;
MPASS(na != NULL);
MPASS(nm_rxq->iq_desc != NULL);
MPASS(nm_rxq->fl_desc != NULL);
- bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE);
+ bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len);
bzero(&c, sizeof(c));
@@ -275,7 +109,7 @@ alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
V_FW_IQ_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
FW_LEN16(c));
- if (pi->flags & INTR_NM_RXQ) {
+ if (vi->flags & INTR_RXQ) {
KASSERT(nm_rxq->intr_idx < sc->intr_count,
("%s: invalid direct intr_idx %d", __func__,
nm_rxq->intr_idx));
@@ -287,13 +121,13 @@ alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
}
c.type_to_iqandstindex = htobe32(v |
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- V_FW_IQ_CMD_VIID(pi->nm_viid) |
+ V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
- c.iqsize = htobe16(pi->qsize_rxq);
+ c.iqsize = htobe16(vi->qsize_rxq);
c.iqaddr = htobe64(nm_rxq->iq_ba);
if (cong >= 0) {
c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
@@ -319,7 +153,7 @@ alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
}
nm_rxq->iq_cidx = 0;
- MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE);
+ MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - spg_len / IQ_ESIZE);
nm_rxq->iq_gen = F_RSPD_GEN;
nm_rxq->iq_cntxt_id = be16toh(c.iqid);
nm_rxq->iq_abs_id = be16toh(c.physiqid);
@@ -380,9 +214,9 @@ alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
}
static int
-free_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
+free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
{
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
int rc;
rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
@@ -394,12 +228,12 @@ free_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
}
static int
-alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
+alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
{
int rc, cntxt_id;
size_t len;
- struct adapter *sc = pi->adapter;
- struct netmap_adapter *na = NA(pi->nm_ifp);
+ struct adapter *sc = vi->pi->adapter;
+ struct netmap_adapter *na = NA(vi->ifp);
struct fw_eq_eth_cmd c;
MPASS(na != NULL);
@@ -415,10 +249,10 @@ alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
- F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->nm_viid));
+ F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
@@ -427,7 +261,7 @@ alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- device_printf(pi->dev,
+ device_printf(vi->dev,
"failed to create netmap egress queue: %d\n", rc);
return (rc);
}
@@ -467,9 +301,9 @@ alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
}
static int
-free_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
+free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
{
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
int rc;
rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
@@ -480,7 +314,7 @@ free_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
}
static int
-cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
+cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
struct netmap_adapter *na)
{
struct netmap_slot *slot;
@@ -488,11 +322,10 @@ cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
struct sge_nm_txq *nm_txq;
int rc, i, j, hwidx;
struct hw_buf_info *hwb;
- uint16_t *rss;
ASSERT_SYNCHRONIZED_OP(sc);
- if ((pi->flags & PORT_INIT_DONE) == 0 ||
+ if ((vi->flags & VI_INIT_DONE) == 0 ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return (EAGAIN);
@@ -511,8 +344,10 @@ cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
/* Must set caps before calling netmap_reset */
nm_set_native_flags(na);
- for_each_nm_rxq(pi, i, nm_rxq) {
- alloc_nm_rxq_hwq(pi, nm_rxq, tnl_cong(pi, nm_cong_drop));
+ for_each_nm_rxq(vi, i, nm_rxq) {
+ struct irq *irq = &sc->irq[vi->first_intr + i];
+
+ alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
nm_rxq->fl_hwidx = hwidx;
slot = netmap_reset(na, NR_RX, i, 0);
MPASS(slot != NULL); /* XXXNM: error check, not assert */
@@ -533,38 +368,37 @@ cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
wmb();
t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
nm_rxq->fl_db_val | V_PIDX(j));
+
+ atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON);
}
- for_each_nm_txq(pi, i, nm_txq) {
- alloc_nm_txq_hwq(pi, nm_txq);
+ for_each_nm_txq(vi, i, nm_txq) {
+ alloc_nm_txq_hwq(vi, nm_txq);
slot = netmap_reset(na, NR_TX, i, 0);
MPASS(slot != NULL); /* XXXNM: error check, not assert */
}
- rss = malloc(pi->nm_rss_size * sizeof (*rss), M_CXGBE, M_ZERO |
- M_WAITOK);
- for (i = 0; i < pi->nm_rss_size;) {
- for_each_nm_rxq(pi, j, nm_rxq) {
- rss[i++] = nm_rxq->iq_abs_id;
- if (i == pi->nm_rss_size)
+ if (vi->nm_rss == NULL) {
+ vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ }
+ for (i = 0; i < vi->rss_size;) {
+ for_each_nm_rxq(vi, j, nm_rxq) {
+ vi->nm_rss[i++] = nm_rxq->iq_abs_id;
+ if (i == vi->rss_size)
break;
}
}
- rc = -t4_config_rss_range(sc, sc->mbox, pi->nm_viid, 0, pi->nm_rss_size,
- rss, pi->nm_rss_size);
+ rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
+ vi->nm_rss, vi->rss_size);
if (rc != 0)
if_printf(ifp, "netmap rss_config failed: %d\n", rc);
- free(rss, M_CXGBE);
-
- rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, true, true);
- if (rc != 0)
- if_printf(ifp, "netmap enable_vi failed: %d\n", rc);
return (rc);
}
static int
-cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
+cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
struct netmap_adapter *na)
{
int rc, i;
@@ -573,12 +407,16 @@ cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
ASSERT_SYNCHRONIZED_OP(sc);
- rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, false, false);
+ if ((vi->flags & VI_INIT_DONE) == 0)
+ return (0);
+
+ rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
+ vi->rss, vi->rss_size);
if (rc != 0)
- if_printf(ifp, "netmap disable_vi failed: %d\n", rc);
+ if_printf(ifp, "failed to restore RSS config: %d\n", rc);
nm_clear_native_flags(na);
- for_each_nm_txq(pi, i, nm_txq) {
+ for_each_nm_txq(vi, i, nm_txq) {
struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
/* Wait for hw pidx to catch up ... */
@@ -589,10 +427,15 @@ cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
while (spg->pidx != spg->cidx)
pause("nmcidx", 1);
- free_nm_txq_hwq(pi, nm_txq);
+ free_nm_txq_hwq(vi, nm_txq);
}
- for_each_nm_rxq(pi, i, nm_rxq) {
- free_nm_rxq_hwq(pi, nm_rxq);
+ for_each_nm_rxq(vi, i, nm_rxq) {
+ struct irq *irq = &sc->irq[vi->first_intr + i];
+
+ while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF))
+ pause("nmst", 1);
+
+ free_nm_rxq_hwq(vi, nm_rxq);
}
return (rc);
@@ -602,17 +445,17 @@ static int
cxgbe_netmap_reg(struct netmap_adapter *na, int on)
{
struct ifnet *ifp = na->ifp;
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = ifp->if_softc;
+ struct adapter *sc = vi->pi->adapter;
int rc;
- rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmreg");
+ rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
if (rc != 0)
return (rc);
if (on)
- rc = cxgbe_netmap_on(sc, pi, ifp, na);
+ rc = cxgbe_netmap_on(sc, vi, ifp, na);
else
- rc = cxgbe_netmap_off(sc, pi, ifp, na);
+ rc = cxgbe_netmap_off(sc, vi, ifp, na);
end_synchronized_op(sc, 0);
return (rc);
@@ -861,9 +704,9 @@ cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
{
struct netmap_adapter *na = kring->na;
struct ifnet *ifp = na->ifp;
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
- struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[pi->first_nm_txq + kring->ring_id];
+ struct vi_info *vi = ifp->if_softc;
+ struct adapter *sc = vi->pi->adapter;
+ struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
const u_int head = kring->rhead;
u_int reclaimed = 0;
int n, d, npkt_remaining, ndesc_remaining, txcsum;
@@ -928,9 +771,9 @@ cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
struct netmap_adapter *na = kring->na;
struct netmap_ring *ring = kring->ring;
struct ifnet *ifp = na->ifp;
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
- struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[pi->first_nm_rxq + kring->ring_id];
+ struct vi_info *vi = ifp->if_softc;
+ struct adapter *sc = vi->pi->adapter;
+ struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
u_int const head = nm_rxsync_prologue(kring);
u_int n;
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
@@ -998,83 +841,26 @@ cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
return (0);
}
-/*
- * Create an ifnet solely for netmap use and register it with the kernel.
- */
-int
-create_netmap_ifnet(struct port_info *pi)
+void
+cxgbe_nm_attach(struct vi_info *vi)
{
- struct adapter *sc = pi->adapter;
+ struct port_info *pi;
+ struct adapter *sc;
struct netmap_adapter na;
- struct ifnet *ifp;
- device_t dev = pi->dev;
- uint8_t mac[ETHER_ADDR_LEN];
- int rc;
-
- if (pi->nnmtxq <= 0 || pi->nnmrxq <= 0)
- return (0);
- MPASS(pi->nm_ifp == NULL);
-
- /*
- * Allocate a virtual interface exclusively for netmap use. Give it the
- * MAC address normally reserved for use by a TOE interface. (The TOE
- * driver on FreeBSD doesn't use it).
- */
- rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, &mac[0],
- &pi->nm_rss_size, FW_VI_FUNC_OFLD, 0);
- if (rc < 0) {
- device_printf(dev, "unable to allocate netmap virtual "
- "interface for port %d: %d\n", pi->port_id, -rc);
- return (-rc);
- }
- pi->nm_viid = rc;
- pi->nm_xact_addr_filt = -1;
-
- ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Cannot allocate netmap ifnet\n");
- return (ENOMEM);
- }
- pi->nm_ifp = ifp;
- ifp->if_softc = pi;
-
- if_initname(ifp, is_t4(pi->adapter) ? "ncxgbe" : "ncxl",
- device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = cxgbe_nm_init;
- ifp->if_ioctl = cxgbe_nm_ioctl;
- ifp->if_transmit = cxgbe_nm_transmit;
- ifp->if_qflush = cxgbe_nm_qflush;
+ MPASS(vi->nnmrxq > 0);
+ MPASS(vi->ifp != NULL);
- /*
- * netmap(4) says "netmap does not use features such as checksum
- * offloading, TCP segmentation offloading, encryption, VLAN
- * encapsulation/decapsulation, etc."
- *
- * By default we comply with the statement above. But we do declare the
- * ifnet capable of L3/L4 checksumming so that a user can override
- * netmap and have the hardware do the L3/L4 checksums.
- */
- ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU |
- IFCAP_HWCSUM_IPV6;
- ifp->if_capenable = 0;
- ifp->if_hwassist = 0;
-
- /* nm_media has already been setup by the caller */
+ pi = vi->pi;
+ sc = pi->adapter;
- ether_ifattach(ifp, mac);
-
- /*
- * Register with netmap in the kernel.
- */
bzero(&na, sizeof(na));
- na.ifp = pi->nm_ifp;
+ na.ifp = vi->ifp;
na.na_flags = NAF_BDG_MAYSLEEP;
/* Netmap doesn't know about the space reserved for the status page. */
- na.num_tx_desc = pi->qsize_txq - spg_len / EQ_ESIZE;
+ na.num_tx_desc = vi->qsize_txq - spg_len / EQ_ESIZE;
/*
* The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
@@ -1082,32 +868,23 @@ create_netmap_ifnet(struct port_info *pi)
* freelist, and not the number of entries in the iq. (These two are
* not exactly the same due to the space taken up by the status page).
*/
- na.num_rx_desc = (pi->qsize_rxq / 8) * 8;
+ na.num_rx_desc = (vi->qsize_rxq / 8) * 8;
na.nm_txsync = cxgbe_netmap_txsync;
na.nm_rxsync = cxgbe_netmap_rxsync;
na.nm_register = cxgbe_netmap_reg;
- na.num_tx_rings = pi->nnmtxq;
- na.num_rx_rings = pi->nnmrxq;
+ na.num_tx_rings = vi->nnmtxq;
+ na.num_rx_rings = vi->nnmrxq;
netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
-
- return (0);
}
-int
-destroy_netmap_ifnet(struct port_info *pi)
+void
+cxgbe_nm_detach(struct vi_info *vi)
{
- struct adapter *sc = pi->adapter;
- if (pi->nm_ifp == NULL)
- return (0);
-
- netmap_detach(pi->nm_ifp);
- ifmedia_removeall(&pi->nm_media);
- ether_ifdetach(pi->nm_ifp);
- if_free(pi->nm_ifp);
- t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->nm_viid);
+ MPASS(vi->nnmrxq > 0);
+ MPASS(vi->ifp != NULL);
- return (0);
+ netmap_detach(vi->ifp);
}
static void
@@ -1134,9 +911,9 @@ void
t4_nm_intr(void *arg)
{
struct sge_nm_rxq *nm_rxq = arg;
- struct port_info *pi = nm_rxq->pi;
- struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->nm_ifp;
+ struct vi_info *vi = nm_rxq->vi;
+ struct adapter *sc = vi->pi->adapter;
+ struct ifnet *ifp = vi->ifp;
struct netmap_adapter *na = NA(ifp);
struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
struct netmap_ring *ring = kring->ring;
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 5c7eb48..5216dd3 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -171,44 +171,44 @@ static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
bus_addr_t *, void **);
static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
void *);
-static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
+static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
int, int);
-static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
+static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *);
static void add_fl_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
struct sge_fl *);
static int alloc_fwq(struct adapter *);
static int free_fwq(struct adapter *);
static int alloc_mgmtq(struct adapter *);
static int free_mgmtq(struct adapter *);
-static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int,
+static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
struct sysctl_oid *);
-static int free_rxq(struct port_info *, struct sge_rxq *);
+static int free_rxq(struct vi_info *, struct sge_rxq *);
#ifdef TCP_OFFLOAD
-static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int,
+static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
struct sysctl_oid *);
-static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *);
+static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
#endif
#ifdef DEV_NETMAP
-static int alloc_nm_rxq(struct port_info *, struct sge_nm_rxq *, int, int,
+static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int,
struct sysctl_oid *);
-static int free_nm_rxq(struct port_info *, struct sge_nm_rxq *);
-static int alloc_nm_txq(struct port_info *, struct sge_nm_txq *, int, int,
+static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *);
+static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int,
struct sysctl_oid *);
-static int free_nm_txq(struct port_info *, struct sge_nm_txq *);
+static int free_nm_txq(struct vi_info *, struct sge_nm_txq *);
#endif
static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
-static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
+static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
#ifdef TCP_OFFLOAD
-static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
+static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
#endif
-static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *);
+static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *);
static int free_eq(struct adapter *, struct sge_eq *);
-static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *,
+static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
struct sysctl_oid *);
static int free_wrq(struct adapter *, struct sge_wrq *);
-static int alloc_txq(struct port_info *, struct sge_txq *, int,
+static int alloc_txq(struct vi_info *, struct sge_txq *, int,
struct sysctl_oid *);
-static int free_txq(struct port_info *, struct sge_txq *);
+static int free_txq(struct vi_info *, struct sge_txq *);
static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
static inline void ring_fl_db(struct adapter *, struct sge_fl *);
static int refill_fl(struct adapter *, struct sge_fl *, int);
@@ -833,51 +833,25 @@ t4_teardown_adapter_queues(struct adapter *sc)
}
static inline int
-port_intr_count(struct port_info *pi)
+first_vector(struct vi_info *vi)
{
- int rc = 0;
-
- if (pi->flags & INTR_RXQ)
- rc += pi->nrxq;
-#ifdef TCP_OFFLOAD
- if (pi->flags & INTR_OFLD_RXQ)
- rc += pi->nofldrxq;
-#endif
-#ifdef DEV_NETMAP
- if (pi->flags & INTR_NM_RXQ)
- rc += pi->nnmrxq;
-#endif
- return (rc);
-}
-
-static inline int
-first_vector(struct port_info *pi)
-{
- struct adapter *sc = pi->adapter;
- int rc = T4_EXTRA_INTR, i;
+ struct adapter *sc = vi->pi->adapter;
if (sc->intr_count == 1)
return (0);
- for_each_port(sc, i) {
- if (i == pi->port_id)
- break;
-
- rc += port_intr_count(sc->port[i]);
- }
-
- return (rc);
+ return (vi->first_intr);
}
/*
* Given an arbitrary "index," come up with an iq that can be used by other
- * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
+ * queues (of this VI) for interrupt forwarding, SGE egress updates, etc.
* The iq returned is guaranteed to be something that takes direct interrupts.
*/
static struct sge_iq *
-port_intr_iq(struct port_info *pi, int idx)
+vi_intr_iq(struct vi_info *vi, int idx)
{
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
struct sge *s = &sc->sge;
struct sge_iq *iq = NULL;
int nintr, i;
@@ -885,43 +859,35 @@ port_intr_iq(struct port_info *pi, int idx)
if (sc->intr_count == 1)
return (&sc->sge.fwq);
- nintr = port_intr_count(pi);
+ nintr = vi->nintr;
KASSERT(nintr != 0,
- ("%s: pi %p has no exclusive interrupts, total interrupts = %d",
- __func__, pi, sc->intr_count));
-#ifdef DEV_NETMAP
- /* Exclude netmap queues as they can't take anyone else's interrupts */
- if (pi->flags & INTR_NM_RXQ)
- nintr -= pi->nnmrxq;
- KASSERT(nintr > 0,
- ("%s: pi %p has nintr %d after netmap adjustment of %d", __func__,
- pi, nintr, pi->nnmrxq));
-#endif
+ ("%s: vi %p has no exclusive interrupts, total interrupts = %d",
+ __func__, vi, sc->intr_count));
i = idx % nintr;
- if (pi->flags & INTR_RXQ) {
- if (i < pi->nrxq) {
- iq = &s->rxq[pi->first_rxq + i].iq;
+ if (vi->flags & INTR_RXQ) {
+ if (i < vi->nrxq) {
+ iq = &s->rxq[vi->first_rxq + i].iq;
goto done;
}
- i -= pi->nrxq;
+ i -= vi->nrxq;
}
#ifdef TCP_OFFLOAD
- if (pi->flags & INTR_OFLD_RXQ) {
- if (i < pi->nofldrxq) {
- iq = &s->ofld_rxq[pi->first_ofld_rxq + i].iq;
+ if (vi->flags & INTR_OFLD_RXQ) {
+ if (i < vi->nofldrxq) {
+ iq = &s->ofld_rxq[vi->first_ofld_rxq + i].iq;
goto done;
}
- i -= pi->nofldrxq;
+ i -= vi->nofldrxq;
}
#endif
- panic("%s: pi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
- pi, pi->flags & INTR_ALL, idx, nintr);
+ panic("%s: vi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
+ vi, vi->flags & INTR_ALL, idx, nintr);
done:
MPASS(iq != NULL);
KASSERT(iq->flags & IQ_INTR,
- ("%s: iq %p (port %p, intr_flags 0x%lx, idx %d)", __func__, iq, pi,
- pi->flags & INTR_ALL, idx));
+ ("%s: iq %p (vi %p, intr_flags 0x%lx, idx %d)", __func__, iq, vi,
+ vi->flags & INTR_ALL, idx));
return (iq);
}
@@ -948,7 +914,7 @@ mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
}
int
-t4_setup_port_queues(struct port_info *pi)
+t4_setup_vi_queues(struct vi_info *vi)
{
int rc = 0, i, j, intr_idx, iqid;
struct sge_rxq *rxq;
@@ -959,18 +925,55 @@ t4_setup_port_queues(struct port_info *pi)
struct sge_wrq *ofld_txq;
#endif
#ifdef DEV_NETMAP
+ int saved_idx;
struct sge_nm_rxq *nm_rxq;
struct sge_nm_txq *nm_txq;
#endif
char name[16];
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
- struct ifnet *ifp = pi->ifp;
- struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
+ struct ifnet *ifp = vi->ifp;
+ struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev);
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
int maxp, mtu = ifp->if_mtu;
/* Interrupt vector to start from (when using multiple vectors) */
- intr_idx = first_vector(pi);
+ intr_idx = first_vector(vi);
+
+#ifdef DEV_NETMAP
+ saved_idx = intr_idx;
+ if (ifp->if_capabilities & IFCAP_NETMAP) {
+
+ /* netmap is supported with direct interrupts only. */
+ MPASS(vi->flags & INTR_RXQ);
+
+ /*
+ * We don't have buffers to back the netmap rx queues
+ * right now so we create the queues in a way that
+ * doesn't set off any congestion signal in the chip.
+ */
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq",
+ CTLFLAG_RD, NULL, "rx queues");
+ for_each_nm_rxq(vi, i, nm_rxq) {
+ rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid);
+ if (rc != 0)
+ goto done;
+ intr_idx++;
+ }
+
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
+ CTLFLAG_RD, NULL, "tx queues");
+ for_each_nm_txq(vi, i, nm_txq) {
+ iqid = vi->first_nm_rxq + (i % vi->nnmrxq);
+ rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid);
+ if (rc != 0)
+ goto done;
+ }
+ }
+
+ /* Normal rx queues and netmap rx queues share the same interrupts. */
+ intr_idx = saved_idx;
+#endif
/*
* First pass over all NIC and TOE rx queues:
@@ -978,62 +981,49 @@ t4_setup_port_queues(struct port_info *pi)
* b) allocate queue iff it will take direct interrupts.
*/
maxp = mtu_to_max_payload(sc, mtu, 0);
- if (pi->flags & INTR_RXQ) {
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq",
+ if (vi->flags & INTR_RXQ) {
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
CTLFLAG_RD, NULL, "rx queues");
}
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
- init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq);
+ init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
snprintf(name, sizeof(name), "%s rxq%d-fl",
- device_get_nameunit(pi->dev), i);
- init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, name);
+ device_get_nameunit(vi->dev), i);
+ init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
- if (pi->flags & INTR_RXQ) {
+ if (vi->flags & INTR_RXQ) {
rxq->iq.flags |= IQ_INTR;
- rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
+ rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
intr_idx++;
}
}
+#ifdef DEV_NETMAP
+ if (ifp->if_capabilities & IFCAP_NETMAP)
+ intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
+#endif
#ifdef TCP_OFFLOAD
maxp = mtu_to_max_payload(sc, mtu, 1);
- if (is_offload(sc) && pi->flags & INTR_OFLD_RXQ) {
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
+ if (vi->flags & INTR_OFLD_RXQ) {
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
CTLFLAG_RD, NULL,
"rx queues for offloaded TCP connections");
}
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
- init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
- pi->qsize_rxq);
+ init_iq(&ofld_rxq->iq, sc, vi->tmr_idx, vi->pktc_idx,
+ vi->qsize_rxq);
snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
- device_get_nameunit(pi->dev), i);
- init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, name);
+ device_get_nameunit(vi->dev), i);
+ init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
- if (pi->flags & INTR_OFLD_RXQ) {
+ if (vi->flags & INTR_OFLD_RXQ) {
ofld_rxq->iq.flags |= IQ_INTR;
- rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid);
- if (rc != 0)
- goto done;
- intr_idx++;
- }
- }
-#endif
-#ifdef DEV_NETMAP
- /*
- * We don't have buffers to back the netmap rx queues right now so we
- * create the queues in a way that doesn't set off any congestion signal
- * in the chip.
- */
- if (pi->flags & INTR_NM_RXQ) {
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_rxq",
- CTLFLAG_RD, NULL, "rx queues for netmap");
- for_each_nm_rxq(pi, i, nm_rxq) {
- rc = alloc_nm_rxq(pi, nm_rxq, intr_idx, i, oid);
+ rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
intr_idx++;
@@ -1046,88 +1036,73 @@ t4_setup_port_queues(struct port_info *pi)
* their interrupts are allocated now.
*/
j = 0;
- if (!(pi->flags & INTR_RXQ)) {
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq",
+ if (!(vi->flags & INTR_RXQ)) {
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
CTLFLAG_RD, NULL, "rx queues");
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
MPASS(!(rxq->iq.flags & IQ_INTR));
- intr_idx = port_intr_iq(pi, j)->abs_id;
+ intr_idx = vi_intr_iq(vi, j)->abs_id;
- rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
+ rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
j++;
}
}
#ifdef TCP_OFFLOAD
- if (is_offload(sc) && !(pi->flags & INTR_OFLD_RXQ)) {
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
+ if (vi->nofldrxq != 0 && !(vi->flags & INTR_OFLD_RXQ)) {
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
CTLFLAG_RD, NULL,
"rx queues for offloaded TCP connections");
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
MPASS(!(ofld_rxq->iq.flags & IQ_INTR));
- intr_idx = port_intr_iq(pi, j)->abs_id;
+ intr_idx = vi_intr_iq(vi, j)->abs_id;
- rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid);
+ rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
j++;
}
}
#endif
-#ifdef DEV_NETMAP
- if (!(pi->flags & INTR_NM_RXQ))
- CXGBE_UNIMPLEMENTED(__func__);
-#endif
/*
* Now the tx queues. Only one pass needed.
*/
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
NULL, "tx queues");
j = 0;
- for_each_txq(pi, i, txq) {
- iqid = port_intr_iq(pi, j)->cntxt_id;
+ for_each_txq(vi, i, txq) {
+ iqid = vi_intr_iq(vi, j)->cntxt_id;
snprintf(name, sizeof(name), "%s txq%d",
- device_get_nameunit(pi->dev), i);
- init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid,
+ device_get_nameunit(vi->dev), i);
+ init_eq(&txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
name);
- rc = alloc_txq(pi, txq, i, oid);
+ rc = alloc_txq(vi, txq, i, oid);
if (rc != 0)
goto done;
j++;
}
#ifdef TCP_OFFLOAD
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq",
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
- for_each_ofld_txq(pi, i, ofld_txq) {
+ for_each_ofld_txq(vi, i, ofld_txq) {
struct sysctl_oid *oid2;
- iqid = port_intr_iq(pi, j)->cntxt_id;
+ iqid = vi_intr_iq(vi, j)->cntxt_id;
snprintf(name, sizeof(name), "%s ofld_txq%d",
- device_get_nameunit(pi->dev), i);
- init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan,
+ device_get_nameunit(vi->dev), i);
+ init_eq(&ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
iqid, name);
snprintf(name, sizeof(name), "%d", i);
- oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
name, CTLFLAG_RD, NULL, "offload tx queue");
- rc = alloc_wrq(sc, pi, ofld_txq, oid2);
- if (rc != 0)
- goto done;
- j++;
- }
-#endif
-#ifdef DEV_NETMAP
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_txq",
- CTLFLAG_RD, NULL, "tx queues for netmap use");
- for_each_nm_txq(pi, i, nm_txq) {
- iqid = pi->first_nm_rxq + (j % pi->nnmrxq);
- rc = alloc_nm_txq(pi, nm_txq, iqid, i, oid);
+ rc = alloc_wrq(sc, vi, ofld_txq, oid2);
if (rc != 0)
goto done;
j++;
@@ -1137,17 +1112,19 @@ t4_setup_port_queues(struct port_info *pi)
/*
* Finally, the control queue.
*/
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
+ if (!IS_MAIN_VI(vi))
+ goto done;
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
NULL, "ctrl queue");
ctrlq = &sc->sge.ctrlq[pi->port_id];
- iqid = port_intr_iq(pi, 0)->cntxt_id;
- snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev));
+ iqid = vi_intr_iq(vi, 0)->cntxt_id;
+ snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name);
- rc = alloc_wrq(sc, pi, ctrlq, oid);
+ rc = alloc_wrq(sc, vi, ctrlq, oid);
done:
if (rc)
- t4_teardown_port_queues(pi);
+ t4_teardown_vi_queues(vi);
return (rc);
}
@@ -1156,9 +1133,10 @@ done:
* Idempotent
*/
int
-t4_teardown_port_queues(struct port_info *pi)
+t4_teardown_vi_queues(struct vi_info *vi)
{
int i;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct sge_rxq *rxq;
struct sge_txq *txq;
@@ -1172,63 +1150,68 @@ t4_teardown_port_queues(struct port_info *pi)
#endif
/* Do this before freeing the queues */
- if (pi->flags & PORT_SYSCTL_CTX) {
- sysctl_ctx_free(&pi->ctx);
- pi->flags &= ~PORT_SYSCTL_CTX;
+ if (vi->flags & VI_SYSCTL_CTX) {
+ sysctl_ctx_free(&vi->ctx);
+ vi->flags &= ~VI_SYSCTL_CTX;
}
+#ifdef DEV_NETMAP
+ if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
+ for_each_nm_txq(vi, i, nm_txq) {
+ free_nm_txq(vi, nm_txq);
+ }
+
+ for_each_nm_rxq(vi, i, nm_rxq) {
+ free_nm_rxq(vi, nm_rxq);
+ }
+ }
+#endif
+
/*
* Take down all the tx queues first, as they reference the rx queues
* (for egress updates, etc.).
*/
- free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
+ if (IS_MAIN_VI(vi))
+ free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
- for_each_txq(pi, i, txq) {
- free_txq(pi, txq);
+ for_each_txq(vi, i, txq) {
+ free_txq(vi, txq);
}
#ifdef TCP_OFFLOAD
- for_each_ofld_txq(pi, i, ofld_txq) {
+ for_each_ofld_txq(vi, i, ofld_txq) {
free_wrq(sc, ofld_txq);
}
#endif
-#ifdef DEV_NETMAP
- for_each_nm_txq(pi, i, nm_txq)
- free_nm_txq(pi, nm_txq);
-#endif
/*
* Then take down the rx queues that forward their interrupts, as they
* reference other rx queues.
*/
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
if ((rxq->iq.flags & IQ_INTR) == 0)
- free_rxq(pi, rxq);
+ free_rxq(vi, rxq);
}
#ifdef TCP_OFFLOAD
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
- free_ofld_rxq(pi, ofld_rxq);
+ free_ofld_rxq(vi, ofld_rxq);
}
#endif
-#ifdef DEV_NETMAP
- for_each_nm_rxq(pi, i, nm_rxq)
- free_nm_rxq(pi, nm_rxq);
-#endif
/*
* Then take down the rx queues that take direct interrupts.
*/
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
if (rxq->iq.flags & IQ_INTR)
- free_rxq(pi, rxq);
+ free_rxq(vi, rxq);
}
#ifdef TCP_OFFLOAD
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
if (ofld_rxq->iq.flags & IQ_INTR)
- free_ofld_rxq(pi, ofld_rxq);
+ free_ofld_rxq(vi, ofld_rxq);
}
#endif
@@ -1284,6 +1267,21 @@ t4_intr(void *arg)
}
}
+void
+t4_vi_intr(void *arg)
+{
+ struct irq *irq = arg;
+
+#ifdef DEV_NETMAP
+ if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) {
+ t4_nm_intr(irq->nm_rxq);
+ atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON);
+ }
+#endif
+ if (irq->rxq != NULL)
+ t4_intr(irq->rxq);
+}
+
/*
* Deals with anything and everything on the given ingress queue.
*/
@@ -1887,8 +1885,8 @@ t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
void
t4_update_fl_bufsize(struct ifnet *ifp)
{
- struct port_info *pi = ifp->if_softc;
- struct adapter *sc = pi->adapter;
+ struct vi_info *vi = ifp->if_softc;
+ struct adapter *sc = vi->pi->adapter;
struct sge_rxq *rxq;
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
@@ -1897,7 +1895,7 @@ t4_update_fl_bufsize(struct ifnet *ifp)
int i, maxp, mtu = ifp->if_mtu;
maxp = mtu_to_max_payload(sc, mtu, 0);
- for_each_rxq(pi, i, rxq) {
+ for_each_rxq(vi, i, rxq) {
fl = &rxq->fl;
FL_LOCK(fl);
@@ -1906,7 +1904,7 @@ t4_update_fl_bufsize(struct ifnet *ifp)
}
#ifdef TCP_OFFLOAD
maxp = mtu_to_max_payload(sc, mtu, 1);
- for_each_ofld_rxq(pi, i, ofld_rxq) {
+ for_each_ofld_rxq(vi, i, ofld_rxq) {
fl = &ofld_rxq->fl;
FL_LOCK(fl);
@@ -2328,7 +2326,8 @@ eth_tx(struct mp_ring *r, u_int cidx, u_int pidx)
struct sge_txq *txq = r->cookie;
struct sge_eq *eq = &txq->eq;
struct ifnet *ifp = txq->ifp;
- struct port_info *pi = (void *)ifp->if_softc;
+ struct vi_info *vi = ifp->if_softc;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
u_int total, remaining; /* # of packets */
u_int available, dbdiff; /* # of hardware descriptors */
@@ -2556,12 +2555,13 @@ free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
* the abs_id of the ingress queue to which its interrupts should be forwarded.
*/
static int
-alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
+alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
int intr_idx, int cong)
{
int rc, i, cntxt_id;
size_t len;
struct fw_iq_cmd c;
+ struct port_info *pi = vi->pi;
struct adapter *sc = iq->adapter;
__be32 v = 0;
@@ -2592,7 +2592,7 @@ alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
c.type_to_iqandstindex = htobe32(v |
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
- V_FW_IQ_CMD_VIID(pi->viid) |
+ V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
F_FW_IQ_CMD_IQGTSMODE |
@@ -2744,7 +2744,7 @@ alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
}
static int
-free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
+free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
{
int rc;
struct adapter *sc = iq->adapter;
@@ -2753,7 +2753,7 @@ free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
if (sc == NULL)
return (0); /* nothing to do */
- dev = pi ? pi->dev : sc->dev;
+ dev = vi ? vi->dev : sc->dev;
if (iq->flags & IQ_ALLOCATED) {
rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
@@ -2835,7 +2835,7 @@ alloc_fwq(struct adapter *sc)
init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
fwq->flags |= IQ_INTR; /* always */
intr_idx = sc->intr_count > 1 ? 1 : 0;
- rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
+ rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1);
if (rc != 0) {
device_printf(sc->dev,
"failed to create firmware event queue: %d\n", rc);
@@ -2910,15 +2910,15 @@ tnl_cong(struct port_info *pi, int drop)
}
static int
-alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx,
+alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
struct sysctl_oid *oid)
{
int rc;
struct sysctl_oid_list *children;
char name[16];
- rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx,
- tnl_cong(pi, cong_drop));
+ rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx,
+ tnl_cong(vi->pi, cong_drop));
if (rc != 0)
return (rc);
@@ -2927,55 +2927,55 @@ alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx,
* fill it up a bit more.
*/
FL_LOCK(&rxq->fl);
- refill_fl(pi->adapter, &rxq->fl, 128);
+ refill_fl(vi->pi->adapter, &rxq->fl, 128);
FL_UNLOCK(&rxq->fl);
#if defined(INET) || defined(INET6)
rc = tcp_lro_init(&rxq->lro);
if (rc != 0)
return (rc);
- rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
+ rxq->lro.ifp = vi->ifp; /* also indicates LRO init'ed */
- if (pi->ifp->if_capenable & IFCAP_LRO)
+ if (vi->ifp->if_capenable & IFCAP_LRO)
rxq->iq.flags |= IQ_LRO_ENABLED;
#endif
- rxq->ifp = pi->ifp;
+ rxq->ifp = vi->ifp;
children = SYSCTL_CHILDREN(oid);
snprintf(name, sizeof(name), "%d", idx);
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
NULL, "rx queue");
children = SYSCTL_CHILDREN(oid);
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id",
CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
"absolute id of the queue");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id",
CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
"SGE context id of the queue");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
"consumer index");
#if defined(INET) || defined(INET6)
- SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
+ SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
&rxq->lro.lro_queued, 0, NULL);
- SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
+ SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
&rxq->lro.lro_flushed, 0, NULL);
#endif
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
&rxq->rxcsum, "# of times hardware assisted with checksum");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction",
CTLFLAG_RD, &rxq->vlan_extraction,
"# of times hardware extracted 802.1Q tag");
- add_fl_sysctls(&pi->ctx, oid, &rxq->fl);
+ add_fl_sysctls(&vi->ctx, oid, &rxq->fl);
return (rc);
}
static int
-free_rxq(struct port_info *pi, struct sge_rxq *rxq)
+free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
{
int rc;
@@ -2986,7 +2986,7 @@ free_rxq(struct port_info *pi, struct sge_rxq *rxq)
}
#endif
- rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
+ rc = free_iq_fl(vi, &rxq->iq, &rxq->fl);
if (rc == 0)
bzero(rxq, sizeof(*rxq));
@@ -2995,46 +2995,46 @@ free_rxq(struct port_info *pi, struct sge_rxq *rxq)
#ifdef TCP_OFFLOAD
static int
-alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
+alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq,
int intr_idx, int idx, struct sysctl_oid *oid)
{
int rc;
struct sysctl_oid_list *children;
char name[16];
- rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
- pi->rx_chan_map);
+ rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
+ vi->pi->rx_chan_map);
if (rc != 0)
return (rc);
children = SYSCTL_CHILDREN(oid);
snprintf(name, sizeof(name), "%d", idx);
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
NULL, "rx queue");
children = SYSCTL_CHILDREN(oid);
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id",
CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16,
"I", "absolute id of the queue");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id",
CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16,
"I", "SGE context id of the queue");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I",
"consumer index");
- add_fl_sysctls(&pi->ctx, oid, &ofld_rxq->fl);
+ add_fl_sysctls(&vi->ctx, oid, &ofld_rxq->fl);
return (rc);
}
static int
-free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
+free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
{
int rc;
- rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
+ rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl);
if (rc == 0)
bzero(ofld_rxq, sizeof(*ofld_rxq));
@@ -3044,7 +3044,7 @@ free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
#ifdef DEV_NETMAP
static int
-alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx,
+alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
int idx, struct sysctl_oid *oid)
{
int rc;
@@ -3052,12 +3052,12 @@ alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx,
struct sysctl_ctx_list *ctx;
char name[16];
size_t len;
- struct adapter *sc = pi->adapter;
- struct netmap_adapter *na = NA(pi->nm_ifp);
+ struct adapter *sc = vi->pi->adapter;
+ struct netmap_adapter *na = NA(vi->ifp);
MPASS(na != NULL);
- len = pi->qsize_rxq * IQ_ESIZE;
+ len = vi->qsize_rxq * IQ_ESIZE;
rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
&nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
if (rc != 0)
@@ -3069,16 +3069,16 @@ alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx,
if (rc != 0)
return (rc);
- nm_rxq->pi = pi;
+ nm_rxq->vi = vi;
nm_rxq->nid = idx;
nm_rxq->iq_cidx = 0;
- nm_rxq->iq_sidx = pi->qsize_rxq - spg_len / IQ_ESIZE;
+ nm_rxq->iq_sidx = vi->qsize_rxq - spg_len / IQ_ESIZE;
nm_rxq->iq_gen = F_RSPD_GEN;
nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
nm_rxq->fl_sidx = na->num_rx_desc;
nm_rxq->intr_idx = intr_idx;
- ctx = &pi->ctx;
+ ctx = &vi->ctx;
children = SYSCTL_CHILDREN(oid);
snprintf(name, sizeof(name), "%d", idx);
@@ -3114,9 +3114,9 @@ alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx,
static int
-free_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
+free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
{
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
nm_rxq->iq_desc);
@@ -3127,13 +3127,14 @@ free_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
}
static int
-alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
+alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
struct sysctl_oid *oid)
{
int rc;
size_t len;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
- struct netmap_adapter *na = NA(pi->nm_ifp);
+ struct netmap_adapter *na = NA(vi->ifp);
char name[16];
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
@@ -3148,19 +3149,20 @@ alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf));
+ V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_VF_VLD(1) |
+ V_TXPKT_VF(vi->viid));
snprintf(name, sizeof(name), "%d", idx);
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
NULL, "netmap tx queue");
children = SYSCTL_CHILDREN(oid);
- SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
+ SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
&nm_txq->cntxt_id, 0, "SGE context id of the queue");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I",
"consumer index");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I",
"producer index");
@@ -3168,9 +3170,9 @@ alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx
}
static int
-free_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq)
+free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
{
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
nm_txq->desc);
@@ -3224,7 +3226,7 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
}
static int
-eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
+eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
{
int rc, cntxt_id;
struct fw_eq_eth_cmd c;
@@ -3238,7 +3240,7 @@ eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
- F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid));
+ F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
@@ -3250,7 +3252,7 @@ eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- device_printf(pi->dev,
+ device_printf(vi->dev,
"failed to create Ethernet egress queue: %d\n", rc);
return (rc);
}
@@ -3268,7 +3270,7 @@ eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
#ifdef TCP_OFFLOAD
static int
-ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
+ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
{
int rc, cntxt_id;
struct fw_eq_ofld_cmd c;
@@ -3293,7 +3295,7 @@ ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- device_printf(pi->dev,
+ device_printf(vi->dev,
"failed to create egress queue for TCP offload: %d\n", rc);
return (rc);
}
@@ -3311,7 +3313,7 @@ ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
#endif
static int
-alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
+alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
{
int rc, qsize;
size_t len;
@@ -3335,12 +3337,12 @@ alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
break;
case EQ_ETH:
- rc = eth_eq_alloc(sc, pi, eq);
+ rc = eth_eq_alloc(sc, vi, eq);
break;
#ifdef TCP_OFFLOAD
case EQ_OFLD:
- rc = ofld_eq_alloc(sc, pi, eq);
+ rc = ofld_eq_alloc(sc, vi, eq);
break;
#endif
@@ -3423,14 +3425,14 @@ free_eq(struct adapter *sc, struct sge_eq *eq)
}
static int
-alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
+alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
struct sysctl_oid *oid)
{
int rc;
- struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx;
+ struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx;
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
- rc = alloc_eq(sc, pi, &wrq->eq);
+ rc = alloc_eq(sc, vi, &wrq->eq);
if (rc)
return (rc);
@@ -3471,10 +3473,11 @@ free_wrq(struct adapter *sc, struct sge_wrq *wrq)
}
static int
-alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
+alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
struct sysctl_oid *oid)
{
int rc;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct sge_eq *eq = &txq->eq;
char name[16];
@@ -3487,7 +3490,7 @@ alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
return (rc);
}
- rc = alloc_eq(sc, pi, eq);
+ rc = alloc_eq(sc, vi, eq);
if (rc != 0) {
mp_ring_free(txq->r);
txq->r = NULL;
@@ -3497,69 +3500,70 @@ alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
/* Can't fail after this point. */
TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
- txq->ifp = pi->ifp;
+ txq->ifp = vi->ifp;
txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf));
+ V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_VF_VLD(1) |
+ V_TXPKT_VF(vi->viid));
txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
M_ZERO | M_WAITOK);
snprintf(name, sizeof(name), "%d", idx);
- oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
NULL, "tx queue");
children = SYSCTL_CHILDREN(oid);
- SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
+ SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
&eq->cntxt_id, 0, "SGE context id of the queue");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
"consumer index");
- SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
+ SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
"producer index");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
&txq->txcsum, "# of times hardware assisted with checksum");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion",
CTLFLAG_RD, &txq->vlan_insertion,
"# of times hardware inserted 802.1Q tag");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
&txq->tso_wrs, "# of TSO work requests");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
&txq->imm_wrs, "# of work requests with immediate data");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
&txq->sgl_wrs, "# of work requests with direct SGL");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
&txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts0_wrs",
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs",
CTLFLAG_RD, &txq->txpkts0_wrs,
"# of txpkts (type 0) work requests");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts1_wrs",
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs",
CTLFLAG_RD, &txq->txpkts1_wrs,
"# of txpkts (type 1) work requests");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts0_pkts",
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts",
CTLFLAG_RD, &txq->txpkts0_pkts,
"# of frames tx'd using type0 txpkts work requests");
- SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts1_pkts",
+ SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts",
CTLFLAG_RD, &txq->txpkts1_pkts,
"# of frames tx'd using type1 txpkts work requests");
- SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_enqueues",
+ SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues",
CTLFLAG_RD, &txq->r->enqueues,
"# of enqueues to the mp_ring for this queue");
- SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_drops",
+ SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops",
CTLFLAG_RD, &txq->r->drops,
"# of drops in the mp_ring for this queue");
- SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_starts",
+ SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts",
CTLFLAG_RD, &txq->r->starts,
"# of normal consumer starts in the mp_ring for this queue");
- SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_stalls",
+ SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls",
CTLFLAG_RD, &txq->r->stalls,
"# of consumer stalls in the mp_ring for this queue");
- SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_restarts",
+ SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts",
CTLFLAG_RD, &txq->r->restarts,
"# of consumer restarts in the mp_ring for this queue");
- SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_abdications",
+ SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications",
CTLFLAG_RD, &txq->r->abdications,
"# of consumer abdications in the mp_ring for this queue");
@@ -3567,10 +3571,10 @@ alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
}
static int
-free_txq(struct port_info *pi, struct sge_txq *txq)
+free_txq(struct vi_info *vi, struct sge_txq *txq)
{
int rc;
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
struct sge_eq *eq = &txq->eq;
rc = free_eq(sc, eq);
@@ -3750,7 +3754,7 @@ refill_sfl(void *arg)
struct adapter *sc = arg;
struct sge_fl *fl, *fl_temp;
- mtx_lock(&sc->sfl_lock);
+ mtx_assert(&sc->sfl_lock, MA_OWNED);
TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
FL_LOCK(fl);
refill_fl(sc, fl, 64);
@@ -3763,7 +3767,6 @@ refill_sfl(void *arg)
if (!TAILQ_EMPTY(&sc->sfl))
callout_schedule(&sc->sfl_callout, hz / 5);
- mtx_unlock(&sc->sfl_lock);
}
static int
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 941f4d4..60f1e6c 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -233,7 +233,7 @@ static uint32_t
calc_opt2a(struct socket *so, struct toepcb *toep)
{
struct tcpcb *tp = so_sototcpcb(so);
- struct port_info *pi = toep->port;
+ struct port_info *pi = toep->vi->pi;
struct adapter *sc = pi->adapter;
uint32_t opt2;
@@ -321,7 +321,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
struct toepcb *toep = NULL;
struct wrqe *wr = NULL;
struct ifnet *rt_ifp = rt->rt_ifp;
- struct port_info *pi;
+ struct vi_info *vi;
int mtu_idx, rscale, qid_atid, rc, isipv6;
struct inpcb *inp = sotoinpcb(so);
struct tcpcb *tp = intotcpcb(inp);
@@ -332,17 +332,17 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));
if (rt_ifp->if_type == IFT_ETHER)
- pi = rt_ifp->if_softc;
+ vi = rt_ifp->if_softc;
else if (rt_ifp->if_type == IFT_L2VLAN) {
struct ifnet *ifp = VLAN_COOKIE(rt_ifp);
- pi = ifp->if_softc;
+ vi = ifp->if_softc;
} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
else
DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
- toep = alloc_toepcb(pi, -1, -1, M_NOWAIT);
+ toep = alloc_toepcb(vi, -1, -1, M_NOWAIT);
if (toep == NULL)
DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
@@ -350,7 +350,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
if (toep->tid < 0)
DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
- toep->l2te = t4_l2t_get(pi, rt_ifp,
+ toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
if (toep->l2te == NULL)
DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
@@ -398,13 +398,13 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
if (is_t4(sc)) {
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(pi, toep->l2te);
+ cpl->params = select_ntuple(vi, toep->l2te);
} else {
struct cpl_t5_act_open_req6 *c5 = (void *)cpl;
INIT_TP_WR(c5, 0);
c5->iss = htobe32(tp->iss);
- c5->params = select_ntuple(pi, toep->l2te);
+ c5->params = select_ntuple(vi, toep->l2te);
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
qid_atid));
@@ -414,7 +414,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
cpl->peer_port = inp->inp_fport;
cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
- cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale,
+ cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
toep->rx_credits, toep->ulp_mode);
cpl->opt2 = calc_opt2a(so, toep);
} else {
@@ -422,19 +422,19 @@ t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
if (is_t4(sc)) {
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(pi, toep->l2te);
+ cpl->params = select_ntuple(vi, toep->l2te);
} else {
struct cpl_t5_act_open_req *c5 = (void *)cpl;
INIT_TP_WR(c5, 0);
c5->iss = htobe32(tp->iss);
- c5->params = select_ntuple(pi, toep->l2te);
+ c5->params = select_ntuple(vi, toep->l2te);
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
&cpl->peer_ip, &cpl->peer_port);
- cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale,
+ cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
toep->rx_credits, toep->ulp_mode);
cpl->opt2 = calc_opt2a(so, toep);
}
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 6935b44..f1ac76e 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -104,9 +104,10 @@ send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
struct wrqe *wr;
struct fw_flowc_wr *flowc;
unsigned int nparams = ftxp ? 8 : 6, flowclen;
- struct port_info *pi = toep->port;
+ struct vi_info *vi = toep->vi;
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
- unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
+ unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
@@ -513,7 +514,7 @@ write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
if (txalign > 0) {
struct tcpcb *tp = intotcpcb(toep->inp);
- if (plen < 2 * tp->t_maxseg || is_10G_port(toep->port))
+ if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi))
txwr->lsodisable_to_flags |=
htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE);
else
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index e6cbfe4..fa2385d 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -72,7 +72,7 @@ static void free_stid(struct adapter *, struct listen_ctx *);
/* lctx services */
static struct listen_ctx *alloc_lctx(struct adapter *, struct inpcb *,
- struct port_info *);
+ struct vi_info *);
static int free_lctx(struct adapter *, struct listen_ctx *);
static void hold_lctx(struct listen_ctx *);
static void listen_hash_add(struct adapter *, struct listen_ctx *);
@@ -80,7 +80,7 @@ static struct listen_ctx *listen_hash_find(struct adapter *, struct inpcb *);
static struct listen_ctx *listen_hash_del(struct adapter *, struct inpcb *);
static struct inpcb *release_lctx(struct adapter *, struct listen_ctx *);
-static inline void save_qids_in_mbuf(struct mbuf *, struct port_info *);
+static inline void save_qids_in_mbuf(struct mbuf *, struct vi_info *);
static inline void get_qids_from_mbuf(struct mbuf *m, int *, int *);
static void send_reset_synqe(struct toedev *, struct synq_entry *);
@@ -187,7 +187,7 @@ free_stid(struct adapter *sc, struct listen_ctx *lctx)
}
static struct listen_ctx *
-alloc_lctx(struct adapter *sc, struct inpcb *inp, struct port_info *pi)
+alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi)
{
struct listen_ctx *lctx;
@@ -214,8 +214,8 @@ alloc_lctx(struct adapter *sc, struct inpcb *inp, struct port_info *pi)
}
}
- lctx->ctrlq = &sc->sge.ctrlq[pi->port_id];
- lctx->ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
+ lctx->ctrlq = &sc->sge.ctrlq[vi->pi->port_id];
+ lctx->ofld_rxq = &sc->sge.ofld_rxq[vi->first_ofld_rxq];
refcount_init(&lctx->refcount, 1);
TAILQ_INIT(&lctx->synq);
@@ -346,7 +346,8 @@ send_reset_synqe(struct toedev *tod, struct synq_entry *synqe)
struct adapter *sc = tod->tod_softc;
struct mbuf *m = synqe->syn;
struct ifnet *ifp = m->m_pkthdr.rcvif;
- struct port_info *pi = ifp->if_softc;
+ struct vi_info *vi = ifp->if_softc;
+ struct port_info *pi = vi->pi;
struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
struct wrqe *wr;
struct fw_flowc_wr *flowc;
@@ -355,7 +356,7 @@ send_reset_synqe(struct toedev *tod, struct synq_entry *synqe)
struct sge_wrq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
const int nparams = 6;
- unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
+ unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
INP_WLOCK_ASSERT(synqe->lctx->inp);
@@ -495,17 +496,18 @@ destroy_server(struct adapter *sc, struct listen_ctx *lctx)
/*
* Start a listening server by sending a passive open request to HW.
*
- * Can't take adapter lock here and access to sc->flags, sc->open_device_map,
+ * Can't take adapter lock here and access to sc->flags,
* sc->offload_map, if_capenable are all race prone.
*/
int
t4_listen_start(struct toedev *tod, struct tcpcb *tp)
{
struct adapter *sc = tod->tod_softc;
+ struct vi_info *vi;
struct port_info *pi;
struct inpcb *inp = tp->t_inpcb;
struct listen_ctx *lctx;
- int i, rc;
+ int i, rc, v;
INP_WLOCK_ASSERT(inp);
@@ -527,12 +529,9 @@ t4_listen_start(struct toedev *tod, struct tcpcb *tp)
("%s: TOM not initialized", __func__));
#endif
- if ((sc->open_device_map & sc->offload_map) == 0)
- goto done; /* no port that's UP with IFCAP_TOE enabled */
-
/*
- * Find a running port with IFCAP_TOE (4 or 6). We'll use the first
- * such port's queues to send the passive open and receive the reply to
+ * Find an initialized VI with IFCAP_TOE (4 or 6). We'll use the first
+ * such VI's queues to send the passive open and receive the reply to
* it.
*
* XXX: need a way to mark a port in use by offload. if_cxgbe should
@@ -540,18 +539,20 @@ t4_listen_start(struct toedev *tod, struct tcpcb *tp)
* attempts to disable IFCAP_TOE on that port too?).
*/
for_each_port(sc, i) {
- if (isset(&sc->open_device_map, i) &&
- sc->port[i]->ifp->if_capenable & IFCAP_TOE)
- break;
+ pi = sc->port[i];
+ for_each_vi(pi, v, vi) {
+ if (vi->flags & VI_INIT_DONE &&
+ vi->ifp->if_capenable & IFCAP_TOE)
+ goto found;
+ }
}
- KASSERT(i < sc->params.nports,
- ("%s: no running port with TOE capability enabled.", __func__));
- pi = sc->port[i];
+ goto done; /* no port that's UP with IFCAP_TOE enabled */
+found:
if (listen_hash_find(sc, inp) != NULL)
goto done; /* already setup */
- lctx = alloc_lctx(sc, inp, pi);
+ lctx = alloc_lctx(sc, inp, vi);
if (lctx == NULL) {
log(LOG_ERR,
"%s: listen request ignored, %s couldn't allocate lctx\n",
@@ -822,7 +823,7 @@ done_with_synqe(struct adapter *sc, struct synq_entry *synqe)
{
struct listen_ctx *lctx = synqe->lctx;
struct inpcb *inp = lctx->inp;
- struct port_info *pi = synqe->syn->m_pkthdr.rcvif->if_softc;
+ struct vi_info *vi = synqe->syn->m_pkthdr.rcvif->if_softc;
struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
INP_WLOCK_ASSERT(inp);
@@ -832,7 +833,7 @@ done_with_synqe(struct adapter *sc, struct synq_entry *synqe)
if (inp)
INP_WUNLOCK(inp);
remove_tid(sc, synqe->tid);
- release_tid(sc, synqe->tid, &sc->sge.ctrlq[pi->port_id]);
+ release_tid(sc, synqe->tid, &sc->sge.ctrlq[vi->pi->port_id]);
t4_l2t_release(e);
release_synqe(synqe); /* removed from synq list */
}
@@ -943,12 +944,12 @@ t4_offload_socket(struct toedev *tod, void *arg, struct socket *so)
}
static inline void
-save_qids_in_mbuf(struct mbuf *m, struct port_info *pi)
+save_qids_in_mbuf(struct mbuf *m, struct vi_info *vi)
{
uint32_t txqid, rxqid;
- txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq;
- rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq;
+ txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq;
+ rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq;
m->m_pkthdr.flowid = (txqid << 16) | (rxqid & 0xffff);
}
@@ -1224,11 +1225,12 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
struct tcphdr th;
struct tcpopt to;
struct port_info *pi;
+ struct vi_info *vi;
struct ifnet *hw_ifp, *ifp;
struct l2t_entry *e = NULL;
int rscale, mtu_idx, rx_credits, rxqid, ulp_mode;
struct synq_entry *synqe = NULL;
- int reject_reason;
+ int reject_reason, v;
uint16_t vid;
#ifdef INVARIANTS
unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
@@ -1245,7 +1247,26 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
t4opt_to_tcpopt(&cpl->tcpopt, &to);
pi = sc->port[G_SYN_INTF(be16toh(cpl->l2info))];
- hw_ifp = pi->ifp; /* the cxgbeX ifnet */
+
+ /*
+ * Use the MAC index to lookup the associated VI. If this SYN
+ * didn't match a perfect MAC filter, punt.
+ */
+ if (!(be16toh(cpl->l2info) & F_SYN_XACT_MATCH)) {
+ m_freem(m);
+ m = NULL;
+ REJECT_PASS_ACCEPT();
+ }
+ for_each_vi(pi, v, vi) {
+ if (vi->xact_addr_filt == G_SYN_MAC_IDX(be16toh(cpl->l2info)))
+ goto found;
+ }
+ m_freem(m);
+ m = NULL;
+ REJECT_PASS_ACCEPT();
+
+found:
+ hw_ifp = vi->ifp; /* the (v)cxgbeX ifnet */
m->m_pkthdr.rcvif = hw_ifp;
tod = TOEDEV(hw_ifp);
@@ -1344,7 +1365,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
SOCKBUF_UNLOCK(&so->so_rcv);
- save_qids_in_mbuf(m, pi);
+ save_qids_in_mbuf(m, vi);
get_qids_from_mbuf(m, NULL, &rxqid);
if (is_t4(sc))
@@ -1359,7 +1380,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
synqe->flags |= TPF_SYNQE_TCPDDP;
} else
ulp_mode = ULP_MODE_NONE;
- rpl->opt0 = calc_opt0(so, pi, e, mtu_idx, rscale, rx_credits, ulp_mode);
+ rpl->opt0 = calc_opt0(so, vi, e, mtu_idx, rscale, rx_credits, ulp_mode);
rpl->opt2 = calc_opt2p(sc, pi, rxqid, &cpl->tcpopt, &th, ulp_mode);
synqe->tid = tid;
@@ -1484,7 +1505,7 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct adapter *sc = iq->adapter;
- struct port_info *pi;
+ struct vi_info *vi;
struct ifnet *ifp;
const struct cpl_pass_establish *cpl = (const void *)(rss + 1);
#if defined(KTR) || defined(INVARIANTS)
@@ -1532,16 +1553,16 @@ do_pass_establish(struct sge_iq *iq, const struct rss_header *rss,
}
ifp = synqe->syn->m_pkthdr.rcvif;
- pi = ifp->if_softc;
- KASSERT(pi->adapter == sc,
- ("%s: pi %p, sc %p mismatch", __func__, pi, sc));
+ vi = ifp->if_softc;
+ KASSERT(vi->pi->adapter == sc,
+ ("%s: vi %p, sc %p mismatch", __func__, vi, sc));
get_qids_from_mbuf(synqe->syn, &txqid, &rxqid);
KASSERT(rxqid == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0],
("%s: CPL arrived on unexpected rxq. %d %d", __func__, rxqid,
(int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0])));
- toep = alloc_toepcb(pi, txqid, rxqid, M_NOWAIT);
+ toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT);
if (toep == NULL) {
reset:
/*
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 2d88d3a..8d3cb2f 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -104,8 +104,9 @@ static eventhandler_tag ifaddr_evhandler;
static struct timeout_task clip_task;
struct toepcb *
-alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags)
+alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags)
{
+ struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct toepcb *toep;
int tx_credits, txsd_total, len;
@@ -127,18 +128,18 @@ alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags)
howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
if (txqid < 0)
- txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq;
- KASSERT(txqid >= pi->first_ofld_txq &&
- txqid < pi->first_ofld_txq + pi->nofldtxq,
- ("%s: txqid %d for port %p (first %d, n %d)", __func__, txqid, pi,
- pi->first_ofld_txq, pi->nofldtxq));
+ txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq;
+ KASSERT(txqid >= vi->first_ofld_txq &&
+ txqid < vi->first_ofld_txq + vi->nofldtxq,
+ ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi,
+ vi->first_ofld_txq, vi->nofldtxq));
if (rxqid < 0)
- rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq;
- KASSERT(rxqid >= pi->first_ofld_rxq &&
- rxqid < pi->first_ofld_rxq + pi->nofldrxq,
- ("%s: rxqid %d for port %p (first %d, n %d)", __func__, rxqid, pi,
- pi->first_ofld_rxq, pi->nofldrxq));
+ rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq;
+ KASSERT(rxqid >= vi->first_ofld_rxq &&
+ rxqid < vi->first_ofld_rxq + vi->nofldrxq,
+ ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi,
+ vi->first_ofld_rxq, vi->nofldrxq));
len = offsetof(struct toepcb, txsd) +
txsd_total * sizeof(struct ofld_tx_sdesc);
@@ -148,7 +149,7 @@ alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags)
return (NULL);
toep->td = sc->tom_softc;
- toep->port = pi;
+ toep->vi = vi;
toep->tx_total = tx_credits;
toep->tx_credits = tx_credits;
toep->ofld_txq = &sc->sge.ofld_txq[txqid];
@@ -509,7 +510,7 @@ extern int always_keepalive;
* socket so could be a listening socket too.
*/
uint64_t
-calc_opt0(struct socket *so, struct port_info *pi, struct l2t_entry *e,
+calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e,
int mtu_idx, int rscale, int rx_credits, int ulp_mode)
{
uint64_t opt0;
@@ -533,20 +534,20 @@ calc_opt0(struct socket *so, struct port_info *pi, struct l2t_entry *e,
if (e != NULL)
opt0 |= V_L2T_IDX(e->idx);
- if (pi != NULL) {
- opt0 |= V_SMAC_SEL(VIID_SMACIDX(pi->viid));
- opt0 |= V_TX_CHAN(pi->tx_chan);
+ if (vi != NULL) {
+ opt0 |= V_SMAC_SEL(VIID_SMACIDX(vi->viid));
+ opt0 |= V_TX_CHAN(vi->pi->tx_chan);
}
return htobe64(opt0);
}
uint64_t
-select_ntuple(struct port_info *pi, struct l2t_entry *e)
+select_ntuple(struct vi_info *vi, struct l2t_entry *e)
{
- struct adapter *sc = pi->adapter;
+ struct adapter *sc = vi->pi->adapter;
struct tp_params *tp = &sc->params.tp;
- uint16_t viid = pi->viid;
+ uint16_t viid = vi->viid;
uint64_t ntuple = 0;
/*
@@ -961,7 +962,8 @@ t4_tom_activate(struct adapter *sc)
{
struct tom_data *td;
struct toedev *tod;
- int i, rc;
+ struct vi_info *vi;
+ int i, rc, v;
ASSERT_SYNCHRONIZED_OP(sc);
@@ -1020,8 +1022,11 @@ t4_tom_activate(struct adapter *sc)
tod->tod_offload_socket = t4_offload_socket;
tod->tod_ctloutput = t4_ctloutput;
- for_each_port(sc, i)
- TOEDEV(sc->port[i]->ifp) = &td->tod;
+ for_each_port(sc, i) {
+ for_each_vi(sc->port[i], v, vi) {
+ TOEDEV(vi->ifp) = &td->tod;
+ }
+ }
sc->tom_softc = td;
register_toedev(sc->tom_softc);
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index c54f6be..cc5d3b4 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -96,7 +96,7 @@ struct toepcb {
u_int flags; /* miscellaneous flags */
struct tom_data *td;
struct inpcb *inp; /* backpointer to host stack's PCB */
- struct port_info *port; /* physical port */
+ struct vi_info *vi; /* virtual interface */
struct sge_wrq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
struct sge_wrq *ctrlq;
@@ -221,7 +221,7 @@ td_adapter(struct tom_data *td)
}
/* t4_tom.c */
-struct toepcb *alloc_toepcb(struct port_info *, int, int, int);
+struct toepcb *alloc_toepcb(struct vi_info *, int, int, int);
void free_toepcb(struct toepcb *);
void offload_socket(struct socket *, struct toepcb *);
void undo_offload_socket(struct socket *);
@@ -234,9 +234,9 @@ void release_tid(struct adapter *, int, struct sge_wrq *);
int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int);
u_long select_rcv_wnd(struct socket *);
int select_rcv_wscale(void);
-uint64_t calc_opt0(struct socket *, struct port_info *, struct l2t_entry *,
+uint64_t calc_opt0(struct socket *, struct vi_info *, struct l2t_entry *,
int, int, int, int);
-uint64_t select_ntuple(struct port_info *, struct l2t_entry *);
+uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
void set_tcpddp_ulp_mode(struct toepcb *);
int negative_advice(int);
struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *);
OpenPOWER on IntegriCloud