summaryrefslogtreecommitdiffstats
path: root/sys/dev/oce/oce_if.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/oce/oce_if.c')
-rw-r--r--sys/dev/oce/oce_if.c240
1 files changed, 202 insertions, 38 deletions
diff --git a/sys/dev/oce/oce_if.c b/sys/dev/oce/oce_if.c
index 426acd9..48cdb16 100644
--- a/sys/dev/oce/oce_if.c
+++ b/sys/dev/oce/oce_if.c
@@ -36,6 +36,7 @@
* Costa Mesa, CA 92626
*/
+
/* $FreeBSD$ */
#include "opt_inet6.h"
@@ -94,7 +95,8 @@ static void setup_max_queues_want(POCE_SOFTC sc);
static void update_queues_got(POCE_SOFTC sc);
static void process_link_state(POCE_SOFTC sc,
struct oce_async_cqe_link_state *acqe);
-
+static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
+static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
/* IP specific */
#if defined(INET6) || defined(INET)
@@ -266,8 +268,6 @@ oce_attach(device_t dev)
rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
if (rc)
goto stats_free;
-#ifdef DEV_NETMAP
-#endif /* DEV_NETMAP */
return 0;
@@ -485,7 +485,7 @@ oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
struct oce_wq *wq = NULL;
int queue_index = 0;
int status = 0;
-
+
if ((m->m_flags & M_FLOWID) != 0)
queue_index = m->m_pkthdr.flowid % sc->nwqs;
@@ -568,6 +568,7 @@ oce_intr(void *arg, int pending)
eq_arm:
oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
+
return;
}
@@ -633,6 +634,8 @@ oce_fast_isr(void *arg)
taskqueue_enqueue_fast(ii->tq, &ii->task);
+ ii->eq->intr++;
+
return FILTER_HANDLED;
}
@@ -780,6 +783,7 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
struct oce_nic_frag_wqe *nicfrag;
int num_wqes;
uint32_t reg_value;
+ boolean_t complete = TRUE;
m = *mpp;
if (!m)
@@ -790,6 +794,15 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
goto free_ret;
}
+ if(oce_tx_asic_stall_verify(sc, m)) {
+ m = oce_insert_vlan_tag(sc, m, &complete);
+ if(!m) {
+ device_printf(sc->dev, "Insertion unsuccessful\n");
+ return 0;
+ }
+
+ }
+
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
/* consolidate packet buffers for TSO/LSO segment offload */
#if defined(INET6) || defined(INET)
@@ -837,15 +850,15 @@ retry:
nichdr->u0.dw[2] = 0;
nichdr->u0.dw[3] = 0;
- nichdr->u0.s.complete = 1;
+ nichdr->u0.s.complete = complete;
nichdr->u0.s.event = 1;
nichdr->u0.s.crc = 1;
nichdr->u0.s.forward = 0;
nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
nichdr->u0.s.udpcs =
- (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
+ (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
nichdr->u0.s.tcpcs =
- (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
+ (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
nichdr->u0.s.num_wqe = num_wqes;
nichdr->u0.s.total_length = m->m_pkthdr.len;
if (m->m_flags & M_VLANTAG) {
@@ -895,7 +908,7 @@ retry:
wq->tx_stats.tx_wrbs += num_wqes;
wq->tx_stats.tx_bytes += m->m_pkthdr.len;
wq->tx_stats.tx_pkts++;
-
+
bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
reg_value = (num_wqes << 16) | wq->wq_id;
@@ -1081,6 +1094,9 @@ oce_start(struct ifnet *ifp)
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
+
+ if (!sc->link_status)
+ return;
do {
IF_DEQUEUE(&sc->ifp->if_snd, m);
@@ -1303,7 +1319,6 @@ oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
#if defined(INET6) || defined(INET)
/* Try to queue to LRO */
if (IF_LRO_ENABLED(sc) &&
- !(m->m_flags & M_VLANTAG) &&
(cqe->u0.s.ip_cksum_pass) &&
(cqe->u0.s.l4_cksum_pass) &&
(!cqe->u0.s.ip_ver) &&
@@ -1343,13 +1358,6 @@ oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
int num_frags = cqe->u0.s.num_fragments;
- if (IS_XE201(sc) && cqe->u0.s.error) {
- /* Lancer A0 workaround
- * num_frags will be 1 more than actual in case of error
- */
- if (num_frags)
- num_frags -= 1;
- }
for (i = 0; i < num_frags; i++) {
if (rq->packets_out == rq->packets_in) {
device_printf(sc->dev,
@@ -1458,7 +1466,7 @@ oce_free_lro(POCE_SOFTC sc)
tcp_lro_free(lro);
}
}
-#endif /* INET6 || INET */
+#endif
int
oce_alloc_rx_bufs(struct oce_rq *rq, int count)
@@ -1471,7 +1479,7 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
struct oce_nic_rqe *rqe;
pd_rxulp_db_t rxdb_reg;
-
+ bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
for (i = 0; i < count; i++) {
in = rq->packets_in + 1;
if (in == OCE_RQ_PACKET_ARRAY_SIZE)
@@ -1512,14 +1520,12 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
}
if (added != 0) {
for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
- DELAY(1);
rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
rxdb_reg.bits.qid = rq->rq_id;
OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
added -= OCE_MAX_RQ_POSTS;
}
if (added > 0) {
- DELAY(1);
rxdb_reg.bits.qid = rq->rq_id;
rxdb_reg.bits.num_posted = added;
OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
@@ -1554,13 +1560,8 @@ oce_rq_handler(void *arg)
} else {
rq->rx_stats.rxcp_err++;
sc->ifp->if_ierrors++;
- if (IS_XE201(sc))
- /* Lancer A0 no buffer workaround */
- oce_discard_rx_comp(rq, cqe);
- else
- /* Post L3/L4 errors to stack.*/
- oce_rx(rq, cqe->u0.s.frag_index, cqe);
-
+ /* Post L3/L4 errors to stack.*/
+ oce_rx(rq, cqe->u0.s.frag_index, cqe);
}
rq->rx_stats.rx_compl++;
cqe->u0.dw[2] = 0;
@@ -1757,18 +1758,18 @@ oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
uint32_t req_size;
struct mbx_hdr req;
OCE_DMA_MEM dma_mem;
-
+ struct mbx_common_get_cntl_attr *fw_cmd;
if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
return EFAULT;
-
+
if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
return EINVAL;
-
+
ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
return EFAULT;
-
+
req_size = le32toh(req.u0.req.request_length);
if (req_size > 65536)
return EINVAL;
@@ -1792,12 +1793,86 @@ oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
rc = EFAULT;
+ /*
+ firmware is filling all the attributes for this ioctl except
+ the driver version..so fill it
+ */
+ if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
+ fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
+ strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
+ COMPONENT_REVISION, strlen(COMPONENT_REVISION));
+ }
+
dma_free:
oce_dma_free(sc, &dma_mem);
return rc;
}
+static void
+oce_eqd_set_periodic(POCE_SOFTC sc)
+{
+ struct oce_set_eqd set_eqd[OCE_MAX_EQ];
+ struct oce_aic_obj *aic;
+ struct oce_eq *eqo;
+ uint64_t now = 0, delta;
+ int eqd, i, num = 0;
+ uint32_t ips = 0;
+ int tps;
+
+ for (i = 0 ; i < sc->neqs; i++) {
+ eqo = sc->eq[i];
+ aic = &sc->aic_obj[i];
+ /* When setting the static eq delay from the user space */
+ if (!aic->enable) {
+ eqd = aic->et_eqd;
+ goto modify_eqd;
+ }
+
+ now = ticks;
+
+ /* Over flow check */
+ if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
+ goto done;
+
+ delta = now - aic->ticks;
+ tps = delta/hz;
+
+ /* Interrupt rate based on elapsed ticks */
+ if(tps)
+ ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
+
+ if (ips > INTR_RATE_HWM)
+ eqd = aic->cur_eqd + 20;
+ else if (ips < INTR_RATE_LWM)
+ eqd = aic->cur_eqd / 2;
+ else
+ goto done;
+
+ if (eqd < 10)
+ eqd = 0;
+
+ /* Make sure that the eq delay is in the known range */
+ eqd = min(eqd, aic->max_eqd);
+ eqd = max(eqd, aic->min_eqd);
+
+modify_eqd:
+ if (eqd != aic->cur_eqd) {
+ set_eqd[num].delay_multiplier = (eqd * 65)/100;
+ set_eqd[num].eq_id = eqo->eq_id;
+ aic->cur_eqd = eqd;
+ num++;
+ }
+done:
+ aic->intr_prev = eqo->intr;
+ aic->ticks = now;
+ }
+
+ /* Is there atleast one eq that needs to be modified? */
+ if(num)
+ oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
+
+}
static void
oce_local_timer(void *arg)
@@ -1813,6 +1888,10 @@ oce_local_timer(void *arg)
for (i = 0; i < sc->nwqs; i++)
oce_tx_restart(sc, sc->wq[i]);
+ /* calculate and set the eq delay for optimal interrupt rate */
+ if (IS_BE(sc))
+ oce_eqd_set_periodic(sc);
+
callout_reset(&sc->timer, hz, oce_local_timer, sc);
}
@@ -1849,17 +1928,17 @@ oce_if_deactivate(POCE_SOFTC sc)
/* Stop intrs and finish any bottom halves pending */
oce_hw_intr_disable(sc);
- /* Since taskqueue_drain takes a Giant Lock, We should not acquire
- any other lock. So unlock device lock and require after
- completing taskqueue_drain.
- */
- UNLOCK(&sc->dev_lock);
+ /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
+ any other lock. So unlock device lock and require after
+ completing taskqueue_drain.
+ */
+ UNLOCK(&sc->dev_lock);
for (i = 0; i < sc->intr_count; i++) {
if (sc->intrs[i].tq != NULL) {
taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
}
}
- LOCK(&sc->dev_lock);
+ LOCK(&sc->dev_lock);
/* Delete RX queue in card with flush param */
oce_stop_rx(sc);
@@ -1874,7 +1953,7 @@ oce_if_deactivate(POCE_SOFTC sc)
/* But still we need to get MCC aync events.
So enable intrs and also arm first EQ
- */
+ */
oce_hw_intr_enable(sc);
oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
@@ -1947,6 +2026,7 @@ oce_mq_handler(void *arg)
struct oce_mq_cqe *cqe;
struct oce_async_cqe_link_state *acqe;
struct oce_async_event_grp5_pvid_state *gcqe;
+ struct oce_async_event_qnq *dbgcqe;
bus_dmamap_sync(cq->ring->dma.tag,
@@ -1973,6 +2053,14 @@ oce_mq_handler(void *arg)
sc->pvid = 0;
}
+ else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
+ optype == ASYNC_EVENT_DEBUG_QNQ) {
+ dbgcqe =
+ (struct oce_async_event_qnq *)cqe;
+ if(dbgcqe->valid)
+ sc->qnqid = dbgcqe->vlan_tag;
+ sc->qnq_debug_event = TRUE;
+ }
}
cqe->u0.dw[3] = 0;
RING_GET(cq->ring, 1);
@@ -2032,3 +2120,79 @@ update_queues_got(POCE_SOFTC sc)
}
}
+static int
+oce_check_ipv6_ext_hdr(struct mbuf *m)
+{
+ struct ether_header *eh = mtod(m, struct ether_header *);
+ caddr_t m_datatemp = m->m_data;
+
+ if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
+ m->m_data += sizeof(struct ether_header);
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+
+ if((ip6->ip6_nxt != IPPROTO_TCP) && \
+ (ip6->ip6_nxt != IPPROTO_UDP)){
+ struct ip6_ext *ip6e = NULL;
+ m->m_data += sizeof(struct ip6_hdr);
+
+ ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
+ if(ip6e->ip6e_len == 0xff) {
+ m->m_data = m_datatemp;
+ return TRUE;
+ }
+ }
+ m->m_data = m_datatemp;
+ }
+ return FALSE;
+}
+
+static int
+is_be3_a1(POCE_SOFTC sc)
+{
+ if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static struct mbuf *
+oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
+{
+ uint16_t vlan_tag = 0;
+
+ if(!M_WRITABLE(m))
+ return NULL;
+
+ /* Embed vlan tag in the packet if it is not part of it */
+ if(m->m_flags & M_VLANTAG) {
+ vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
+ m->m_flags &= ~M_VLANTAG;
+ }
+
+ /* if UMC, ignore vlan tag insertion and instead insert pvid */
+ if(sc->pvid) {
+ if(!vlan_tag)
+ vlan_tag = sc->pvid;
+ *complete = FALSE;
+ }
+
+ if(vlan_tag) {
+ m = ether_vlanencap(m, vlan_tag);
+ }
+
+ if(sc->qnqid) {
+ m = ether_vlanencap(m, sc->qnqid);
+ *complete = FALSE;
+ }
+ return m;
+}
+
+static int
+oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
+{
+ if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
+ oce_check_ipv6_ext_hdr(m)) {
+ return TRUE;
+ }
+ return FALSE;
+}
OpenPOWER on IntegriCloud