summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/cxgb/cxgb_ioctl.h29
-rw-r--r--sys/dev/cxgb/cxgb_main.c273
2 files changed, 302 insertions, 0 deletions
diff --git a/sys/dev/cxgb/cxgb_ioctl.h b/sys/dev/cxgb/cxgb_ioctl.h
index e4b4875..3f3ac5a 100644
--- a/sys/dev/cxgb/cxgb_ioctl.h
+++ b/sys/dev/cxgb/cxgb_ioctl.h
@@ -59,6 +59,9 @@ enum {
CH_CLEAR_STATS,
CH_GET_UP_LA,
CH_GET_UP_IOQS,
+ CH_SET_FILTER,
+ CH_DEL_FILTER,
+ CH_GET_FILTER,
};
/* statistics categories */
@@ -215,6 +218,29 @@ struct ch_up_ioqs {
struct t3_ioq_entry *data;
};
+struct ch_filter_tuple {
+ uint32_t sip;
+ uint32_t dip;
+ uint16_t sport;
+ uint16_t dport;
+ uint16_t vlan:12;
+ uint16_t vlan_prio:3;
+};
+
+struct ch_filter {
+ uint32_t filter_id;
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+ uint16_t mac_addr_idx;
+ uint8_t mac_hit:1;
+ uint8_t proto:2;
+
+ uint8_t want_filter_id:1;
+ uint8_t pass:1;
+ uint8_t rss:1;
+ uint8_t qset;
+};
+
#define CHELSIO_SETREG _IOW('f', CH_SETREG, struct ch_reg)
#define CHELSIO_GETREG _IOWR('f', CH_GETREG, struct ch_reg)
#define CHELSIO_GETMTUTAB _IOR('f', CH_GETMTUTAB, struct ch_mtus)
@@ -239,4 +265,7 @@ struct ch_up_ioqs {
#define CHELSIO_GET_EEPROM _IOWR('f', CH_GET_EEPROM, struct ch_eeprom)
#define CHELSIO_GET_UP_LA _IOWR('f', CH_GET_UP_LA, struct ch_up_la)
#define CHELSIO_GET_UP_IOQS _IOWR('f', CH_GET_UP_IOQS, struct ch_up_ioqs)
+#define CHELSIO_SET_FILTER _IOW('f', CH_SET_FILTER, struct ch_filter)
+#define CHELSIO_DEL_FILTER _IOW('f', CH_DEL_FILTER, struct ch_filter)
+#define CHELSIO_GET_FILTER _IOWR('f', CH_GET_FILTER, struct ch_filter)
#endif
diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c
index 01095c4..ad2caed 100644
--- a/sys/dev/cxgb/cxgb_main.c
+++ b/sys/dev/cxgb/cxgb_main.c
@@ -99,6 +99,13 @@ static void cxgb_ext_intr_handler(void *, int);
static void cxgb_tick_handler(void *, int);
static void cxgb_tick(void *);
static void setup_rss(adapter_t *sc);
+static int alloc_filters(struct adapter *);
+static int setup_hw_filters(struct adapter *);
+static int set_filter(struct adapter *, int, const struct filter_info *);
+static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
+ unsigned int, u64, u64);
+static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
+ unsigned int, u64, u64);
/* Attachment glue for the PCI controller end of the device. Each port of
* the device is attached separately, as defined later.
@@ -1661,6 +1668,13 @@ cxgb_up(struct adapter *sc)
if ((err = update_tpsram(sc)))
goto out;
+ if (is_offload(sc)) {
+ sc->params.mc5.nservers = 0;
+ sc->params.mc5.nroutes = 0;
+ sc->params.mc5.nfilters = t3_mc5_size(&sc->mc5) -
+ MC5_MIN_TIDS;
+ }
+
err = t3_init_hw(sc, 0);
if (err)
goto out;
@@ -1672,6 +1686,7 @@ cxgb_up(struct adapter *sc)
if (err)
goto out;
+ alloc_filters(sc);
setup_rss(sc);
t3_intr_clear(sc);
@@ -1698,6 +1713,7 @@ cxgb_up(struct adapter *sc)
if (!(sc->flags & QUEUES_BOUND)) {
bind_qsets(sc);
+ setup_hw_filters(sc);
sc->flags |= QUEUES_BOUND;
}
@@ -3076,6 +3092,139 @@ cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
free(buf, M_DEVBUF);
break;
}
+ case CHELSIO_SET_FILTER: {
+ struct ch_filter *f = (struct ch_filter *)data;;
+ struct filter_info *p;
+ unsigned int nfilters = sc->params.mc5.nfilters;
+
+ if (!is_offload(sc))
+ return (EOPNOTSUPP); /* No TCAM */
+ if (!(sc->flags & FULL_INIT_DONE))
+ return (EAGAIN); /* mc5 not setup yet */
+ if (nfilters == 0)
+ return (EBUSY); /* TOE will use TCAM */
+
+ /* sanity checks */
+ if (f->filter_id >= nfilters ||
+ (f->val.dip && f->mask.dip != 0xffffffff) ||
+ (f->val.sport && f->mask.sport != 0xffff) ||
+ (f->val.dport && f->mask.dport != 0xffff) ||
+ (f->val.vlan && f->mask.vlan != 0xfff) ||
+ (f->val.vlan_prio &&
+ f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
+ (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
+ f->qset >= SGE_QSETS ||
+ sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
+ return (EINVAL);
+
+ /* Was allocated with M_WAITOK */
+ KASSERT(sc->filters, ("filter table NULL\n"));
+
+ p = &sc->filters[f->filter_id];
+ if (p->locked)
+ return (EPERM);
+
+ bzero(p, sizeof(*p));
+ p->sip = f->val.sip;
+ p->sip_mask = f->mask.sip;
+ p->dip = f->val.dip;
+ p->sport = f->val.sport;
+ p->dport = f->val.dport;
+ p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
+ p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
+ FILTER_NO_VLAN_PRI;
+ p->mac_hit = f->mac_hit;
+ p->mac_vld = f->mac_addr_idx != 0xffff;
+ p->mac_idx = f->mac_addr_idx;
+ p->pkt_type = f->proto;
+ p->report_filter_id = f->want_filter_id;
+ p->pass = f->pass;
+ p->rss = f->rss;
+ p->qset = f->qset;
+
+ error = set_filter(sc, f->filter_id, p);
+ if (error == 0)
+ p->valid = 1;
+ break;
+ }
+ case CHELSIO_DEL_FILTER: {
+ struct ch_filter *f = (struct ch_filter *)data;
+ struct filter_info *p;
+ unsigned int nfilters = sc->params.mc5.nfilters;
+
+ if (!is_offload(sc))
+ return (EOPNOTSUPP);
+ if (!(sc->flags & FULL_INIT_DONE))
+ return (EAGAIN);
+ if (nfilters == 0 || sc->filters == NULL)
+ return (EINVAL);
+ if (f->filter_id >= nfilters)
+ return (EINVAL);
+
+ p = &sc->filters[f->filter_id];
+ if (p->locked)
+ return (EPERM);
+ if (!p->valid)
+ return (EFAULT); /* Read "Bad address" as "Bad index" */
+
+ bzero(p, sizeof(*p));
+ p->sip = p->sip_mask = 0xffffffff;
+ p->vlan = 0xfff;
+ p->vlan_prio = FILTER_NO_VLAN_PRI;
+ p->pkt_type = 1;
+ error = set_filter(sc, f->filter_id, p);
+ break;
+ }
+ case CHELSIO_GET_FILTER: {
+ struct ch_filter *f = (struct ch_filter *)data;
+ struct filter_info *p;
+ unsigned int i, nfilters = sc->params.mc5.nfilters;
+
+ if (!is_offload(sc))
+ return (EOPNOTSUPP);
+ if (!(sc->flags & FULL_INIT_DONE))
+ return (EAGAIN);
+ if (nfilters == 0 || sc->filters == NULL)
+ return (EINVAL);
+
+ i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
+ for (; i < nfilters; i++) {
+ p = &sc->filters[i];
+ if (!p->valid)
+ continue;
+
+ bzero(f, sizeof(*f));
+
+ f->filter_id = i;
+ f->val.sip = p->sip;
+ f->mask.sip = p->sip_mask;
+ f->val.dip = p->dip;
+ f->mask.dip = p->dip ? 0xffffffff : 0;
+ f->val.sport = p->sport;
+ f->mask.sport = p->sport ? 0xffff : 0;
+ f->val.dport = p->dport;
+ f->mask.dport = p->dport ? 0xffff : 0;
+ f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
+ f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
+ f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
+ 0 : p->vlan_prio;
+ f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
+ 0 : FILTER_NO_VLAN_PRI;
+ f->mac_hit = p->mac_hit;
+ f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
+ f->proto = p->pkt_type;
+ f->want_filter_id = p->report_filter_id;
+ f->pass = p->pass;
+ f->rss = p->rss;
+ f->qset = p->qset;
+
+ break;
+ }
+
+ if (i == nfilters)
+ f->filter_id = 0xffffffff;
+ break;
+ }
default:
return (EOPNOTSUPP);
break;
@@ -3130,5 +3279,129 @@ cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
}
+static int
+alloc_filters(struct adapter *sc)
+{
+ struct filter_info *p;
+ unsigned int nfilters = sc->params.mc5.nfilters;
+
+ if (nfilters == 0)
+ return (0);
+
+ p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
+ sc->filters = p;
+
+ p = &sc->filters[nfilters - 1];
+ p->vlan = 0xfff;
+ p->vlan_prio = FILTER_NO_VLAN_PRI;
+ p->pass = p->rss = p->valid = p->locked = 1;
+
+ return (0);
+}
+
+static int
+setup_hw_filters(struct adapter *sc)
+{
+ int i, rc;
+ unsigned int nfilters = sc->params.mc5.nfilters;
+
+ if (!sc->filters)
+ return (0);
+
+ t3_enable_filters(sc);
+
+ for (i = rc = 0; i < nfilters && !rc; i++) {
+ if (sc->filters[i].locked)
+ rc = set_filter(sc, i, &sc->filters[i]);
+ }
+
+ return (rc);
+}
+
+static int
+set_filter(struct adapter *sc, int id, const struct filter_info *f)
+{
+ int len;
+ struct mbuf *m;
+ struct ulp_txpkt *txpkt;
+ struct work_request_hdr *wr;
+ struct cpl_pass_open_req *oreq;
+ struct cpl_set_tcb_field *sreq;
+
+ len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
+ KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
+
+ id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
+ sc->params.mc5.nfilters;
+
+ m = m_gethdr(M_WAITOK, MT_DATA);
+ m->m_len = m->m_pkthdr.len = len;
+ bzero(mtod(m, char *), len);
+
+ wr = mtod(m, struct work_request_hdr *);
+ wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
+
+ oreq = (struct cpl_pass_open_req *)(wr + 1);
+ txpkt = (struct ulp_txpkt *)oreq;
+ txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
+ txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
+ OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
+ oreq->local_port = htons(f->dport);
+ oreq->peer_port = htons(f->sport);
+ oreq->local_ip = htonl(f->dip);
+ oreq->peer_ip = htonl(f->sip);
+ oreq->peer_netmask = htonl(f->sip_mask);
+ oreq->opt0h = 0;
+ oreq->opt0l = htonl(F_NO_OFFLOAD);
+ oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
+ V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
+ V_VLAN_PRI(f->vlan_prio >> 1) |
+ V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
+ V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
+ V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
+
+ sreq = (struct cpl_set_tcb_field *)(oreq + 1);
+ set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
+ (f->report_filter_id << 15) | (1 << 23) |
+ ((u64)f->pass << 35) | ((u64)!f->rss << 36));
+ set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
+ t3_mgmt_tx(sc, m);
+
+ if (f->pass && !f->rss) {
+ len = sizeof(*sreq);
+ m = m_gethdr(M_WAITOK, MT_DATA);
+ m->m_len = m->m_pkthdr.len = len;
+ bzero(mtod(m, char *), len);
+ sreq = mtod(m, struct cpl_set_tcb_field *);
+ sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ mk_set_tcb_field(sreq, id, 25, 0x3f80000,
+ (u64)sc->rrss_map[f->qset] << 19);
+ t3_mgmt_tx(sc, m);
+ }
+ return 0;
+}
+
+static inline void
+mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
+ unsigned int word, u64 mask, u64 val)
+{
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = V_NO_REPLY(1);
+ req->cpu_idx = 0;
+ req->word = htons(word);
+ req->mask = htobe64(mask);
+ req->val = htobe64(val);
+}
+
+static inline void
+set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
+ unsigned int word, u64 mask, u64 val)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+
+ txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
+ txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
+ mk_set_tcb_field(req, tid, word, mask, val);
+}
MODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);
OpenPOWER on IntegriCloud