summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornp <np@FreeBSD.org>2016-03-08 00:23:56 +0000
committernp <np@FreeBSD.org>2016-03-08 00:23:56 +0000
commit1dda046f140e9de5102860c2f63761a18fac5602 (patch)
tree8e56b670332748711d5438289b1bfaf5d7e1aa78
parent936c6d85ac48298ab8398ba9b140b7391fafa041 (diff)
downloadFreeBSD-src-1dda046f140e9de5102860c2f63761a18fac5602.zip
FreeBSD-src-1dda046f140e9de5102860c2f63761a18fac5602.tar.gz
cxgbe(4): Add a struct sge_params to store per-adapter SGE parameters.
Move the code that reads all the parameters to t4_init_sge_params in the shared code. Use these per-adapter values instead of globals. Sponsored by: Chelsio Communications
-rw-r--r--sys/dev/cxgbe/adapter.h9
-rw-r--r--sys/dev/cxgbe/common/common.h24
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c70
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c19
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h1
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c6
-rw-r--r--sys/dev/cxgbe/t4_main.c8
-rw-r--r--sys/dev/cxgbe/t4_netmap.c18
-rw-r--r--sys/dev/cxgbe/t4_sge.c155
9 files changed, 172 insertions, 138 deletions
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index c91e6a5..88807d0 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -671,13 +671,6 @@ struct sge_nm_txq {
#endif
struct sge {
- int timer_val[SGE_NTIMERS];
- int counter_val[SGE_NCOUNTERS];
- int fl_starve_threshold;
- int fl_starve_threshold2;
- int eq_s_qpp;
- int iq_s_qpp;
-
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx tx queues */
#ifdef TCP_OFFLOAD
@@ -710,8 +703,6 @@ struct sge {
struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
- int pad_boundary;
- int pack_boundary;
int8_t safe_hwidx1; /* may not have room for metadata */
int8_t safe_hwidx2; /* with room for metadata and maybe more */
struct sw_zone_info sw_zone_info[SW_ZONE_SIZES];
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 49e0285..142ab73 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -219,6 +219,20 @@ struct tp_rdma_stats {
u32 rqe_dfr_mod;
};
+struct sge_params {
+ int timer_val[SGE_NTIMERS];
+ int counter_val[SGE_NCOUNTERS];
+ int fl_starve_threshold;
+ int fl_starve_threshold2;
+ int page_shift;
+ int eq_s_qpp;
+ int iq_s_qpp;
+ int spg_len;
+ int pad_boundary;
+ int pack_boundary;
+ int fl_pktshift;
+};
+
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
@@ -272,6 +286,7 @@ struct chip_params {
};
struct adapter_params {
+ struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
@@ -406,6 +421,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
return (us * adap->params.vpd.cclk) / 1000;
}
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+ adapter->params.vpd.cclk);
+}
+
static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
unsigned int ticks)
{
@@ -465,6 +488,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_check_fw_version(struct adapter *adapter);
int t4_init_hw(struct adapter *adapter, u32 fw_params);
int t4_prep_adapter(struct adapter *adapter);
+int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct port_info *p, int mbox, int pf, int vf);
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 1a72737..32d0f53 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2012 Chelsio Communications, Inc.
+ * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -7633,6 +7633,74 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
}
/**
+ * t4_init_sge_params - initialize adap->params.sge
+ * @adapter: the adapter
+ *
+ * Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+ u32 r;
+ struct sge_params *sp = &adapter->params.sge;
+
+ r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
+ sp->counter_val[0] = G_THRESHOLD_0(r);
+ sp->counter_val[1] = G_THRESHOLD_1(r);
+ sp->counter_val[2] = G_THRESHOLD_2(r);
+ sp->counter_val[3] = G_THRESHOLD_3(r);
+
+ r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
+ sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r));
+ sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r));
+ r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
+ sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r));
+ sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r));
+ r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
+ sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r));
+ sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r));
+
+ r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
+ sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
+ if (is_t4(adapter))
+ sp->fl_starve_threshold2 = sp->fl_starve_threshold;
+ else
+ sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
+
+ /* egress queues: log2 of # of doorbells per BAR2 page */
+ r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
+ r >>= S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
+ sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
+
+ /* ingress queues: log2 of # of doorbells per BAR2 page */
+ r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
+ r >>= S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
+ sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
+
+ r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
+ r >>= S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
+ sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
+
+ r = t4_read_reg(adapter, A_SGE_CONTROL);
+ sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
+ sp->fl_pktshift = G_PKTSHIFT(r);
+ sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
+ if (is_t4(adapter))
+ sp->pack_boundary = sp->pad_boundary;
+ else {
+ r = t4_read_reg(adapter, A_SGE_CONTROL2);
+ if (G_INGPACKBOUNDARY(r) == 0)
+ sp->pack_boundary = 16;
+ else
+ sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
+ }
+
+ return 0;
+}
+
+/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
*
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index 6de0de6..ea04190 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -45,8 +45,6 @@ __FBSDID("$FreeBSD$");
#ifdef TCP_OFFLOAD
#include "iw_cxgbe.h"
-int spg_creds = 2; /* Default status page size is 2 credits = 128B */
-
void
c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
struct c4iw_dev_ucontext *uctx)
@@ -89,27 +87,24 @@ static int
c4iw_rdev_open(struct c4iw_rdev *rdev)
{
struct adapter *sc = rdev->adap;
+ struct sge_params *sp = &sc->params.sge;
int rc;
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
- /* Save the status page size set by if_cxgbe */
- spg_creds = (t4_read_reg(sc, A_SGE_CONTROL) & F_EGRSTATUSPAGESIZE) ?
- 2 : 1;
-
/* XXX: we can probably make this work */
- if (sc->sge.eq_s_qpp > PAGE_SHIFT || sc->sge.iq_s_qpp > PAGE_SHIFT) {
+ if (sp->eq_s_qpp > PAGE_SHIFT || sp->iq_s_qpp > PAGE_SHIFT) {
device_printf(sc->dev,
"doorbell density too high (eq %d, iq %d, pg %d).\n",
- sc->sge.eq_s_qpp, sc->sge.eq_s_qpp, PAGE_SHIFT);
+ sp->eq_s_qpp, sp->eq_s_qpp, PAGE_SHIFT);
rc = -EINVAL;
goto err1;
}
- rdev->qpshift = PAGE_SHIFT - sc->sge.eq_s_qpp;
- rdev->qpmask = (1 << sc->sge.eq_s_qpp) - 1;
- rdev->cqshift = PAGE_SHIFT - sc->sge.iq_s_qpp;
- rdev->cqmask = (1 << sc->sge.iq_s_qpp) - 1;
+ rdev->qpshift = PAGE_SHIFT - sp->eq_s_qpp;
+ rdev->qpmask = (1 << sp->eq_s_qpp) - 1;
+ rdev->cqshift = PAGE_SHIFT - sp->iq_s_qpp;
+ rdev->cqmask = (1 << sp->iq_s_qpp) - 1;
if (c4iw_num_stags(rdev) == 0) {
rc = -EINVAL;
diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
index f6c8a59..c232f70 100644
--- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
+++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
@@ -1040,5 +1040,4 @@ void your_reg_device(struct c4iw_dev *dev);
#define SGE_CTRLQ_NUM 0
-extern int spg_creds;/* Status Page size in credit units(1 unit = 64) */
#endif
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index 38c61ea..1c0381c 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -215,7 +215,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
/* eqsize is the number of 64B entries plus the status page size. */
- eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_creds;
+ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
+ (sc->params.sge.spg_len / EQ_ESIZE);
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
@@ -237,7 +238,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
/* eqsize is the number of 64B entries plus the status page size. */
- eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_creds ;
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+ (sc->params.sge.spg_len / EQ_ESIZE);
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 5f62dec..da81228 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -4495,13 +4495,13 @@ t4_sysctls(struct adapter *sc)
sc->params.vpd.cclk, "core clock frequency (in KHz)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
- CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
- sizeof(sc->sge.timer_val), sysctl_int_array, "A",
+ CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
+ sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
"interrupt holdoff timer values (us)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
- CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
- sizeof(sc->sge.counter_val), sysctl_int_array, "A",
+ CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
+ sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
"interrupt holdoff packet counter values");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index d05812d..6f6df10 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -56,8 +56,6 @@ __FBSDID("$FreeBSD$");
#include "common/t4_regs_values.h"
extern int fl_pad; /* XXXNM */
-extern int spg_len; /* XXXNM */
-extern int fl_pktshift; /* XXXNM */
SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters");
@@ -285,6 +283,7 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
int rc, cntxt_id, i;
__be32 v;
struct adapter *sc = vi->pi->adapter;
+ struct sge_params *sp = &sc->params.sge;
struct netmap_adapter *na = NA(vi->ifp);
struct fw_iq_cmd c;
@@ -293,7 +292,7 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
MPASS(nm_rxq->fl_desc != NULL);
bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
- bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len);
+ bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
@@ -334,7 +333,7 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
c.fl0dcaen_to_fl0cidxfthresh =
htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
- c.fl0size = htobe16(na->num_rx_desc / 8 + spg_len / EQ_ESIZE);
+ c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
c.fl0addr = htobe64(nm_rxq->fl_ba);
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
@@ -345,7 +344,7 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
}
nm_rxq->iq_cidx = 0;
- MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - spg_len / IQ_ESIZE);
+ MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
nm_rxq->iq_gen = F_RSPD_GEN;
nm_rxq->iq_cntxt_id = be16toh(c.iqid);
nm_rxq->iq_abs_id = be16toh(c.physiqid);
@@ -430,7 +429,7 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
MPASS(na != NULL);
MPASS(nm_txq->desc != NULL);
- len = na->num_tx_desc * EQ_ESIZE + spg_len;
+ len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
bzero(nm_txq->desc, len);
bzero(&c, sizeof(c));
@@ -472,7 +471,7 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
- uint32_t s_qpp = sc->sge.eq_s_qpp;
+ uint32_t s_qpp = sc->params.sge.eq_s_qpp;
uint32_t mask = (1 << s_qpp) - 1;
volatile uint8_t *udb;
@@ -1112,7 +1111,7 @@ ncxgbe_attach(device_t dev)
na.na_flags = NAF_BDG_MAYSLEEP;
/* Netmap doesn't know about the space reserved for the status page. */
- na.num_tx_desc = vi->qsize_txq - spg_len / EQ_ESIZE;
+ na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
/*
* The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
@@ -1220,7 +1219,8 @@ t4_nm_intr(void *arg)
(const void *)&d->cpl[0]);
break;
case CPL_RX_PKT:
- ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - fl_pktshift;
+ ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
+ sc->params.sge.fl_pktshift;
ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0;
fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0;
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 7f1236b..33d8d48 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -166,8 +166,8 @@ static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int);
static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
-static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t,
- char *);
+static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
+ uint16_t, char *);
static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
bus_addr_t *, void **);
static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
@@ -495,7 +495,7 @@ t4_tweak_chip_settings(struct adapter *sc)
static inline int
hwsz_ok(struct adapter *sc, int hwsz)
{
- int mask = fl_pad ? sc->sge.pad_boundary - 1 : 16 - 1;
+ int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
return (hwsz >= 64 && (hwsz & mask) == 0);
}
@@ -507,6 +507,7 @@ int
t4_read_chip_settings(struct adapter *sc)
{
struct sge *s = &sc->sge;
+ struct sge_params *sp = &sc->params.sge;
int i, j, n, rc = 0;
uint32_t m, v, r;
uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
@@ -521,36 +522,21 @@ t4_read_chip_settings(struct adapter *sc)
struct sw_zone_info *swz, *safe_swz;
struct hw_buf_info *hwb;
- m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
- v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
- V_EGRSTATUSPAGESIZE(spg_len == 128);
+ t4_init_sge_params(sc);
+
+ m = F_RXPKTCPLMODE;
+ v = F_RXPKTCPLMODE;
r = t4_read_reg(sc, A_SGE_CONTROL);
if ((r & m) != v) {
device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
rc = EINVAL;
}
- s->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
-
- if (is_t4(sc))
- s->pack_boundary = s->pad_boundary;
- else {
- r = t4_read_reg(sc, A_SGE_CONTROL2);
- if (G_INGPACKBOUNDARY(r) == 0)
- s->pack_boundary = 16;
- else
- s->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
- }
- v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
- V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
- r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE);
- if (r != v) {
+ /*
+ * If this changes then every single use of PAGE_SHIFT in the driver
+ * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
+ */
+ if (sp->page_shift != PAGE_SHIFT) {
device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
rc = EINVAL;
}
@@ -589,7 +575,7 @@ t4_read_chip_settings(struct adapter *sc)
if (swz->size < PAGE_SIZE) {
MPASS(powerof2(swz->size));
- if (fl_pad && (swz->size % sc->sge.pad_boundary != 0))
+ if (fl_pad && (swz->size % sp->pad_boundary != 0))
continue;
}
@@ -602,7 +588,7 @@ t4_read_chip_settings(struct adapter *sc)
continue;
#ifdef INVARIANTS
if (fl_pad)
- MPASS(hwb->size % sc->sge.pad_boundary == 0);
+ MPASS(hwb->size % sp->pad_boundary == 0);
#endif
hwb->zidx = i;
if (head == -1)
@@ -653,7 +639,7 @@ t4_read_chip_settings(struct adapter *sc)
hwb = &s->hw_buf_info[i];
#ifdef INVARIANTS
if (fl_pad)
- MPASS(hwb->size % sc->sge.pad_boundary == 0);
+ MPASS(hwb->size % sp->pad_boundary == 0);
#endif
spare = safe_swz->size - hwb->size;
if (spare >= CL_METADATA_SIZE) {
@@ -663,22 +649,6 @@ t4_read_chip_settings(struct adapter *sc)
}
}
- r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD);
- s->counter_val[0] = G_THRESHOLD_0(r);
- s->counter_val[1] = G_THRESHOLD_1(r);
- s->counter_val[2] = G_THRESHOLD_2(r);
- s->counter_val[3] = G_THRESHOLD_3(r);
-
- r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1);
- s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc);
- s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc);
- r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3);
- s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc);
- s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc);
- r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5);
- s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc);
- s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc);
-
v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
if (r != v) {
@@ -702,25 +672,6 @@ t4_read_chip_settings(struct adapter *sc)
rc = EINVAL;
}
- r = t4_read_reg(sc, A_SGE_CONM_CTRL);
- s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
- if (is_t4(sc))
- s->fl_starve_threshold2 = s->fl_starve_threshold;
- else
- s->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
-
- /* egress queues: log2 of # of doorbells per BAR2 page */
- r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
- r >>= S_QUEUESPERPAGEPF0 +
- (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
- s->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
-
- /* ingress queues: log2 of # of doorbells per BAR2 page */
- r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
- r >>= S_QUEUESPERPAGEPF0 +
- (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
- s->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
-
t4_init_tp_params(sc);
t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
@@ -750,25 +701,26 @@ void
t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *children)
{
+ struct sge_params *sp = &sc->params.sge;
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A",
"freelist buffer sizes");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
- NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)");
+ NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
- NULL, sc->sge.pad_boundary, "payload pad boundary (bytes)");
+ NULL, sp->pad_boundary, "payload pad boundary (bytes)");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
- NULL, spg_len, "status page size (bytes)");
+ NULL, sp->spg_len, "status page size (bytes)");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
NULL, cong_drop, "congestion drop setting");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
- NULL, sc->sge.pack_boundary, "payload pack boundary (bytes)");
+ NULL, sp->pack_boundary, "payload pack boundary (bytes)");
}
int
@@ -907,8 +859,8 @@ mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
} else {
#endif
/* large enough even when hw VLAN extraction is disabled */
- payload = fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
- mtu;
+ payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
+ ETHER_VLAN_ENCAP_LEN + mtu;
#ifdef TCP_OFFLOAD
}
#endif
@@ -1069,7 +1021,7 @@ t4_setup_vi_queues(struct vi_info *vi)
iqid = vi_intr_iq(vi, j)->cntxt_id;
snprintf(name, sizeof(name), "%s txq%d",
device_get_nameunit(vi->dev), i);
- init_eq(&txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
+ init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
name);
rc = alloc_txq(vi, txq, i, oid);
@@ -1086,7 +1038,7 @@ t4_setup_vi_queues(struct vi_info *vi)
iqid = vi_intr_iq(vi, j)->cntxt_id;
snprintf(name, sizeof(name), "%s ofld_txq%d",
device_get_nameunit(vi->dev), i);
- init_eq(&ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
+ init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
iqid, name);
snprintf(name, sizeof(name), "%d", i);
@@ -1110,7 +1062,8 @@ t4_setup_vi_queues(struct vi_info *vi)
ctrlq = &sc->sge.ctrlq[pi->port_id];
iqid = vi_intr_iq(vi, 0)->cntxt_id;
snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
- init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name);
+ init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid,
+ name);
rc = alloc_wrq(sc, vi, ctrlq, oid);
done:
@@ -1690,6 +1643,7 @@ t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
{
struct sge_rxq *rxq = iq_to_rxq(iq);
struct ifnet *ifp = rxq->ifp;
+ struct adapter *sc = iq->adapter;
const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
#if defined(INET) || defined(INET6)
struct lro_ctrl *lro = &rxq->lro;
@@ -1704,9 +1658,9 @@ t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
rss->opcode));
- m0->m_pkthdr.len -= fl_pktshift;
- m0->m_len -= fl_pktshift;
- m0->m_data += fl_pktshift;
+ m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
+ m0->m_len -= sc->params.sge.fl_pktshift;
+ m0->m_data += sc->params.sge.fl_pktshift;
m0->m_pkthdr.rcvif = ifp;
M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]);
@@ -2445,7 +2399,7 @@ init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
iq->intr_pktc_idx = pktc_idx;
}
iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */
- iq->sidx = iq->qsize - spg_len / IQ_ESIZE;
+ iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
}
static inline void
@@ -2453,7 +2407,7 @@ init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
{
fl->qsize = qsize;
- fl->sidx = qsize - spg_len / EQ_ESIZE;
+ fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
strlcpy(fl->lockname, name, sizeof(fl->lockname));
if (sc->flags & BUF_PACKING_OK &&
((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */
@@ -2464,15 +2418,15 @@ init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
}
static inline void
-init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan,
- uint16_t iqid, char *name)
+init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
+ uint8_t tx_chan, uint16_t iqid, char *name)
{
KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
eq->flags = eqtype & EQ_TYPEMASK;
eq->tx_chan = tx_chan;
eq->iqid = iqid;
- eq->sidx = qsize - spg_len / EQ_ESIZE;
+ eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
strlcpy(eq->lockname, name, sizeof(eq->lockname));
}
@@ -2543,6 +2497,7 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
struct fw_iq_cmd c;
struct port_info *pi = vi->pi;
struct adapter *sc = iq->adapter;
+ struct sge_params *sp = &sc->params.sge;
__be32 v = 0;
len = iq->qsize * IQ_ESIZE;
@@ -2602,14 +2557,14 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
}
if (fl->flags & FL_BUF_PACKING) {
- fl->lowat = roundup2(sc->sge.fl_starve_threshold2, 8);
- fl->buf_boundary = sc->sge.pack_boundary;
+ fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
+ fl->buf_boundary = sp->pack_boundary;
} else {
- fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8);
+ fl->lowat = roundup2(sp->fl_starve_threshold, 8);
fl->buf_boundary = 16;
}
- if (fl_pad && fl->buf_boundary < sc->sge.pad_boundary)
- fl->buf_boundary = sc->sge.pad_boundary;
+ if (fl_pad && fl->buf_boundary < sp->pad_boundary)
+ fl->buf_boundary = sp->pad_boundary;
c.iqns_to_fl0congen |=
htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
@@ -2667,7 +2622,7 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
qid = fl->cntxt_id;
if (isset(&sc->doorbells, DOORBELL_UDB)) {
- uint32_t s_qpp = sc->sge.eq_s_qpp;
+ uint32_t s_qpp = sc->params.sge.eq_s_qpp;
uint32_t mask = (1 << s_qpp) - 1;
volatile uint8_t *udb;
@@ -2856,7 +2811,7 @@ alloc_mgmtq(struct adapter *sc)
NULL, "management queue");
snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
- init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
+ init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
sc->sge.fwq.cntxt_id, name);
rc = alloc_wrq(sc, NULL, mgmtq, oid);
if (rc != 0) {
@@ -3041,7 +2996,7 @@ alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
if (rc != 0)
return (rc);
- len = na->num_rx_desc * EQ_ESIZE + spg_len;
+ len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
&nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
if (rc != 0)
@@ -3050,7 +3005,7 @@ alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
nm_rxq->vi = vi;
nm_rxq->nid = idx;
nm_rxq->iq_cidx = 0;
- nm_rxq->iq_sidx = vi->qsize_rxq - spg_len / IQ_ESIZE;
+ nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
nm_rxq->iq_gen = F_RSPD_GEN;
nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
nm_rxq->fl_sidx = na->num_rx_desc;
@@ -3116,7 +3071,7 @@ alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
char name[16];
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
- len = na->num_tx_desc * EQ_ESIZE + spg_len;
+ len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
&nm_txq->ba, (void **)&nm_txq->desc);
if (rc)
@@ -3164,7 +3119,7 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
{
int rc, cntxt_id;
struct fw_eq_ctrl_cmd c;
- int qsize = eq->sidx + spg_len / EQ_ESIZE;
+ int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
bzero(&c, sizeof(c));
@@ -3208,7 +3163,7 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
{
int rc, cntxt_id;
struct fw_eq_eth_cmd c;
- int qsize = eq->sidx + spg_len / EQ_ESIZE;
+ int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
bzero(&c, sizeof(c));
@@ -3252,7 +3207,7 @@ ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
{
int rc, cntxt_id;
struct fw_eq_ofld_cmd c;
- int qsize = eq->sidx + spg_len / EQ_ESIZE;
+ int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
bzero(&c, sizeof(c));
@@ -3298,7 +3253,7 @@ alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
- qsize = eq->sidx + spg_len / EQ_ESIZE;
+ qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
len = qsize * EQ_ESIZE;
rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
&eq->ba, (void **)&eq->desc);
@@ -3337,7 +3292,7 @@ alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
if (isset(&eq->doorbells, DOORBELL_UDB) ||
isset(&eq->doorbells, DOORBELL_UDBWC) ||
isset(&eq->doorbells, DOORBELL_WCWR)) {
- uint32_t s_qpp = sc->sge.eq_s_qpp;
+ uint32_t s_qpp = sc->params.sge.eq_s_qpp;
uint32_t mask = (1 << s_qpp) - 1;
volatile uint8_t *udb;
@@ -4523,10 +4478,10 @@ done:
* Do not inline mbufs if doing so would violate the pad/pack
* boundary alignment requirement.
*/
- if (fl_pad && (MSIZE % sc->sge.pad_boundary) != 0)
+ if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0)
continue;
if (fl->flags & FL_BUF_PACKING &&
- (MSIZE % sc->sge.pack_boundary) != 0)
+ (MSIZE % sc->params.sge.pack_boundary) != 0)
continue;
if (spare < CL_METADATA_SIZE + MSIZE)
@@ -4612,7 +4567,7 @@ find_safe_refill_source(struct adapter *sc, struct sge_fl *fl)
fl->cll_alt.hwidx = hwidx;
fl->cll_alt.zidx = hwb->zidx;
if (allow_mbufs_in_cluster &&
- (fl_pad == 0 || (MSIZE % sc->sge.pad_boundary) == 0))
+ (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0))
fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE;
else
fl->cll_alt.region1 = 0;
OpenPOWER on IntegriCloud