summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c23
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c269
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c9
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h32
4 files changed, 170 insertions, 163 deletions
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 36e325d..51c5715 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -1162,7 +1162,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
so = inp->inp_socket;
if (toep->ulp_mode == ULP_MODE_TCPDDP) {
DDP_LOCK(toep);
- if (__predict_false(toep->ddp_flags &
+ if (__predict_false(toep->ddp.flags &
(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)))
handle_ddp_close(toep, tp, cpl->rcv_nxt);
DDP_UNLOCK(toep);
@@ -1538,23 +1538,23 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
toep->rx_credits += newsize - hiwat;
}
- if (toep->ddp_waiting_count != 0 || toep->ddp_active_count != 0)
- CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__,
- tid, len);
-
if (toep->ulp_mode == ULP_MODE_TCPDDP) {
- int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off;
+ int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off;
+
+ if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0)
+ CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)",
+ __func__, tid, len);
if (changed) {
- if (toep->ddp_flags & DDP_SC_REQ)
- toep->ddp_flags ^= DDP_ON | DDP_SC_REQ;
+ if (toep->ddp.flags & DDP_SC_REQ)
+ toep->ddp.flags ^= DDP_ON | DDP_SC_REQ;
else {
KASSERT(cpl->ddp_off == 1,
("%s: DDP switched on by itself.",
__func__));
/* Fell out of DDP mode */
- toep->ddp_flags &= ~DDP_ON;
+ toep->ddp.flags &= ~DDP_ON;
CTR1(KTR_CXGBE, "%s: fell out of DDP mode",
__func__);
@@ -1562,7 +1562,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
}
- if (toep->ddp_flags & DDP_ON) {
+ if (toep->ddp.flags & DDP_ON) {
/*
* CPL_RX_DATA with DDP on can only be an indicate.
* Start posting queued AIO requests via DDP. The
@@ -1588,7 +1588,8 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
tp->rcv_adv += credits;
}
- if (toep->ddp_waiting_count > 0 && sbavail(sb) != 0) {
+ if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 &&
+ sbavail(sb) != 0) {
CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__,
tid);
ddp_queue_toep(toep);
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index 17ddf5f..efcc174 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -157,11 +157,11 @@ recycle_pageset(struct toepcb *toep, struct pageset *ps)
{
DDP_ASSERT_LOCKED(toep);
- if (!(toep->ddp_flags & DDP_DEAD) && ps->flags & PS_WIRED) {
- KASSERT(toep->ddp_cached_count + toep->ddp_active_count <
- nitems(toep->db), ("too many wired pagesets"));
- TAILQ_INSERT_HEAD(&toep->ddp_cached_pagesets, ps, link);
- toep->ddp_cached_count++;
+ if (!(toep->ddp.flags & DDP_DEAD) && ps->flags & PS_WIRED) {
+ KASSERT(toep->ddp.cached_count + toep->ddp.active_count <
+ nitems(toep->ddp.db), ("too many wired pagesets"));
+ TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link);
+ toep->ddp.cached_count++;
} else
free_pageset(toep->td, ps);
}
@@ -206,17 +206,18 @@ void
ddp_init_toep(struct toepcb *toep)
{
- TAILQ_INIT(&toep->ddp_aiojobq);
- TASK_INIT(&toep->ddp_requeue_task, 0, aio_ddp_requeue_task, toep);
- toep->ddp_active_id = -1;
- mtx_init(&toep->ddp_lock, "t4 ddp", NULL, MTX_DEF);
+ TAILQ_INIT(&toep->ddp.aiojobq);
+ TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep);
+ toep->ddp.flags = DDP_OK;
+ toep->ddp.active_id = -1;
+ mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF);
}
void
ddp_uninit_toep(struct toepcb *toep)
{
- mtx_destroy(&toep->ddp_lock);
+ mtx_destroy(&toep->ddp.lock);
}
void
@@ -227,11 +228,11 @@ release_ddp_resources(struct toepcb *toep)
DDP_LOCK(toep);
toep->flags |= DDP_DEAD;
- for (i = 0; i < nitems(toep->db); i++) {
- free_ddp_buffer(toep->td, &toep->db[i]);
+ for (i = 0; i < nitems(toep->ddp.db); i++) {
+ free_ddp_buffer(toep->td, &toep->ddp.db[i]);
}
- while ((ps = TAILQ_FIRST(&toep->ddp_cached_pagesets)) != NULL) {
- TAILQ_REMOVE(&toep->ddp_cached_pagesets, ps, link);
+ while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) {
+ TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
free_pageset(toep->td, ps);
}
ddp_complete_all(toep, 0);
@@ -244,13 +245,13 @@ ddp_assert_empty(struct toepcb *toep)
{
int i;
- MPASS(!(toep->ddp_flags & DDP_TASK_ACTIVE));
- for (i = 0; i < nitems(toep->db); i++) {
- MPASS(toep->db[i].job == NULL);
- MPASS(toep->db[i].ps == NULL);
+ MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE));
+ for (i = 0; i < nitems(toep->ddp.db); i++) {
+ MPASS(toep->ddp.db[i].job == NULL);
+ MPASS(toep->ddp.db[i].ps == NULL);
}
- MPASS(TAILQ_EMPTY(&toep->ddp_cached_pagesets));
- MPASS(TAILQ_EMPTY(&toep->ddp_aiojobq));
+ MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets));
+ MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq));
}
#endif
@@ -260,21 +261,21 @@ complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
{
unsigned int db_flag;
- toep->ddp_active_count--;
- if (toep->ddp_active_id == db_idx) {
- if (toep->ddp_active_count == 0) {
- KASSERT(toep->db[db_idx ^ 1].job == NULL,
+ toep->ddp.active_count--;
+ if (toep->ddp.active_id == db_idx) {
+ if (toep->ddp.active_count == 0) {
+ KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL,
("%s: active_count mismatch", __func__));
- toep->ddp_active_id = -1;
+ toep->ddp.active_id = -1;
} else
- toep->ddp_active_id ^= 1;
+ toep->ddp.active_id ^= 1;
#ifdef VERBOSE_TRACES
CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
- toep->ddp_active_id);
+ toep->ddp.active_id);
#endif
} else {
- KASSERT(toep->ddp_active_count != 0 &&
- toep->ddp_active_id != -1,
+ KASSERT(toep->ddp.active_count != 0 &&
+ toep->ddp.active_id != -1,
("%s: active count mismatch", __func__));
}
@@ -284,10 +285,10 @@ complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
db->ps = NULL;
db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
- KASSERT(toep->ddp_flags & db_flag,
+ KASSERT(toep->ddp.flags & db_flag,
("%s: DDP buffer not active. toep %p, ddp_flags 0x%x",
- __func__, toep, toep->ddp_flags));
- toep->ddp_flags &= ~db_flag;
+ __func__, toep, toep->ddp.flags));
+ toep->ddp.flags &= ~db_flag;
}
/* XXX: handle_ddp_data code duplication */
@@ -315,12 +316,12 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
#endif
CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP",
__func__, n);
- while (toep->ddp_active_count > 0) {
- MPASS(toep->ddp_active_id != -1);
- db_idx = toep->ddp_active_id;
+ while (toep->ddp.active_count > 0) {
+ MPASS(toep->ddp.active_id != -1);
+ db_idx = toep->ddp.active_id;
db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
- MPASS((toep->ddp_flags & db_flag) != 0);
- db = &toep->db[db_idx];
+ MPASS((toep->ddp.flags & db_flag) != 0);
+ db = &toep->ddp.db[db_idx];
job = db->job;
copied = job->aio_received;
placed = n;
@@ -342,8 +343,8 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
/* XXX: This always completes if there is some data. */
aio_complete(job, copied + placed, 0);
} else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
- TAILQ_INSERT_HEAD(&toep->ddp_aiojobq, job, list);
- toep->ddp_waiting_count++;
+ TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
+ toep->ddp.waiting_count++;
} else
aio_cancel(job);
n -= placed;
@@ -503,10 +504,10 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
sb = &so->so_rcv;
DDP_LOCK(toep);
- KASSERT(toep->ddp_active_id == db_idx,
+ KASSERT(toep->ddp.active_id == db_idx,
("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx,
- toep->ddp_active_id, toep->tid));
- db = &toep->db[db_idx];
+ toep->ddp.active_id, toep->tid));
+ db = &toep->ddp.db[db_idx];
job = db->job;
if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
@@ -597,7 +598,7 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
completed:
complete_ddp_buffer(toep, db, db_idx);
- if (toep->ddp_waiting_count > 0)
+ if (toep->ddp.waiting_count > 0)
ddp_queue_toep(toep);
out:
DDP_UNLOCK(toep);
@@ -611,9 +612,9 @@ handle_ddp_indicate(struct toepcb *toep)
{
DDP_ASSERT_LOCKED(toep);
- MPASS(toep->ddp_active_count == 0);
- MPASS((toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0);
- if (toep->ddp_waiting_count == 0) {
+ MPASS(toep->ddp.active_count == 0);
+ MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0);
+ if (toep->ddp.waiting_count == 0) {
/*
* The pending requests that triggered the request for an
* an indicate were cancelled. Those cancels should have
@@ -623,7 +624,7 @@ handle_ddp_indicate(struct toepcb *toep)
return;
}
CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__,
- toep->tid, toep->ddp_waiting_count);
+ toep->tid, toep->ddp.waiting_count);
ddp_queue_toep(toep);
}
@@ -653,7 +654,7 @@ handle_ddp_tcb_rpl(struct toepcb *toep, const struct cpl_set_tcb_rpl *cpl)
db_idx = G_COOKIE(cpl->cookie) - DDP_BUF0_INVALIDATED;
INP_WLOCK(inp);
DDP_LOCK(toep);
- db = &toep->db[db_idx];
+ db = &toep->ddp.db[db_idx];
/*
* handle_ddp_data() should leave the job around until
@@ -686,7 +687,7 @@ handle_ddp_tcb_rpl(struct toepcb *toep, const struct cpl_set_tcb_rpl *cpl)
}
complete_ddp_buffer(toep, db, db_idx);
- if (toep->ddp_waiting_count > 0)
+ if (toep->ddp.waiting_count > 0)
ddp_queue_toep(toep);
DDP_UNLOCK(toep);
INP_WUNLOCK(inp);
@@ -715,12 +716,12 @@ handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
toep->rx_credits += len;
#endif
- while (toep->ddp_active_count > 0) {
- MPASS(toep->ddp_active_id != -1);
- db_idx = toep->ddp_active_id;
+ while (toep->ddp.active_count > 0) {
+ MPASS(toep->ddp.active_id != -1);
+ db_idx = toep->ddp.active_id;
db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
- MPASS((toep->ddp_flags & db_flag) != 0);
- db = &toep->db[db_idx];
+ MPASS((toep->ddp.flags & db_flag) != 0);
+ db = &toep->ddp.db[db_idx];
job = db->job;
copied = job->aio_received;
placed = len;
@@ -808,15 +809,15 @@ static void
enable_ddp(struct adapter *sc, struct toepcb *toep)
{
- KASSERT((toep->ddp_flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
+ KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
("%s: toep %p has bad ddp_flags 0x%x",
- __func__, toep, toep->ddp_flags));
+ __func__, toep, toep->ddp.flags));
CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
__func__, toep->tid, time_uptime);
DDP_ASSERT_LOCKED(toep);
- toep->ddp_flags |= DDP_SC_REQ;
+ toep->ddp.flags |= DDP_SC_REQ;
t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS,
V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
@@ -1333,11 +1334,11 @@ hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
/*
* Try to reuse a cached pageset.
*/
- TAILQ_FOREACH(ps, &toep->ddp_cached_pagesets, link) {
+ TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) {
if (pscmp(ps, vm, start, n, pgoff,
job->uaiocb.aio_nbytes) == 0) {
- TAILQ_REMOVE(&toep->ddp_cached_pagesets, ps, link);
- toep->ddp_cached_count--;
+ TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
+ toep->ddp.cached_count--;
*pps = ps;
return (0);
}
@@ -1347,15 +1348,15 @@ hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
* If there are too many cached pagesets to create a new one,
* free a pageset before creating a new one.
*/
- KASSERT(toep->ddp_active_count + toep->ddp_cached_count <=
- nitems(toep->db), ("%s: too many wired pagesets", __func__));
- if (toep->ddp_active_count + toep->ddp_cached_count ==
- nitems(toep->db)) {
- KASSERT(toep->ddp_cached_count > 0,
+ KASSERT(toep->ddp.active_count + toep->ddp.cached_count <=
+ nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__));
+ if (toep->ddp.active_count + toep->ddp.cached_count ==
+ nitems(toep->ddp.db)) {
+ KASSERT(toep->ddp.cached_count > 0,
("no cached pageset to free"));
- ps = TAILQ_LAST(&toep->ddp_cached_pagesets, pagesetq);
- TAILQ_REMOVE(&toep->ddp_cached_pagesets, ps, link);
- toep->ddp_cached_count--;
+ ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq);
+ TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
+ toep->ddp.cached_count--;
free_pageset(toep->td, ps);
}
DDP_UNLOCK(toep);
@@ -1395,10 +1396,10 @@ ddp_complete_all(struct toepcb *toep, int error)
struct kaiocb *job;
DDP_ASSERT_LOCKED(toep);
- while (!TAILQ_EMPTY(&toep->ddp_aiojobq)) {
- job = TAILQ_FIRST(&toep->ddp_aiojobq);
- TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
- toep->ddp_waiting_count--;
+ while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) {
+ job = TAILQ_FIRST(&toep->ddp.aiojobq);
+ TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
+ toep->ddp.waiting_count--;
if (aio_clear_cancel_function(job))
ddp_complete_one(job, error);
}
@@ -1431,10 +1432,10 @@ aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
{
DDP_ASSERT_LOCKED(toep);
- if (!(toep->ddp_flags & DDP_DEAD) &&
+ if (!(toep->ddp.flags & DDP_DEAD) &&
aio_set_cancel_function(job, t4_aio_cancel_queued)) {
- TAILQ_INSERT_HEAD(&toep->ddp_aiojobq, job, list);
- toep->ddp_waiting_count++;
+ TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
+ toep->ddp.waiting_count++;
} else
aio_ddp_cancel_one(job);
}
@@ -1458,18 +1459,18 @@ aio_ddp_requeue(struct toepcb *toep)
DDP_ASSERT_LOCKED(toep);
restart:
- if (toep->ddp_flags & DDP_DEAD) {
- MPASS(toep->ddp_waiting_count == 0);
- MPASS(toep->ddp_active_count == 0);
+ if (toep->ddp.flags & DDP_DEAD) {
+ MPASS(toep->ddp.waiting_count == 0);
+ MPASS(toep->ddp.active_count == 0);
return;
}
- if (toep->ddp_waiting_count == 0 ||
- toep->ddp_active_count == nitems(toep->db)) {
+ if (toep->ddp.waiting_count == 0 ||
+ toep->ddp.active_count == nitems(toep->ddp.db)) {
return;
}
- job = TAILQ_FIRST(&toep->ddp_aiojobq);
+ job = TAILQ_FIRST(&toep->ddp.aiojobq);
so = job->fd_file->f_data;
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
@@ -1481,14 +1482,14 @@ restart:
return;
}
- KASSERT(toep->ddp_active_count == 0 || sbavail(sb) == 0,
+ KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0,
("%s: pending sockbuf data and DDP is active", __func__));
/* Abort if socket has reported problems. */
/* XXX: Wait for any queued DDP's to finish and/or flush them? */
if (so->so_error && sbavail(sb) == 0) {
- toep->ddp_waiting_count--;
- TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
+ toep->ddp.waiting_count--;
+ TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
if (!aio_clear_cancel_function(job)) {
SOCKBUF_UNLOCK(sb);
goto restart;
@@ -1519,7 +1520,7 @@ restart:
*/
if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
SOCKBUF_UNLOCK(sb);
- if (toep->ddp_active_count != 0)
+ if (toep->ddp.active_count != 0)
return;
ddp_complete_all(toep, 0);
return;
@@ -1529,7 +1530,7 @@ restart:
* If DDP is not enabled and there is no pending socket buffer
* data, try to enable DDP.
*/
- if (sbavail(sb) == 0 && (toep->ddp_flags & DDP_ON) == 0) {
+ if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) {
SOCKBUF_UNLOCK(sb);
/*
@@ -1543,7 +1544,7 @@ restart:
* XXX: Might want to limit the indicate size to the size
* of the first queued request.
*/
- if ((toep->ddp_flags & DDP_SC_REQ) == 0)
+ if ((toep->ddp.flags & DDP_SC_REQ) == 0)
enable_ddp(sc, toep);
return;
}
@@ -1553,21 +1554,21 @@ restart:
* If another thread is queueing a buffer for DDP, let it
* drain any work and return.
*/
- if (toep->ddp_queueing != NULL)
+ if (toep->ddp.queueing != NULL)
return;
/* Take the next job to prep it for DDP. */
- toep->ddp_waiting_count--;
- TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
+ toep->ddp.waiting_count--;
+ TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
if (!aio_clear_cancel_function(job))
goto restart;
- toep->ddp_queueing = job;
+ toep->ddp.queueing = job;
/* NB: This drops DDP_LOCK while it holds the backing VM pages. */
error = hold_aio(toep, job, &ps);
if (error != 0) {
ddp_complete_one(job, error);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
goto restart;
}
@@ -1578,7 +1579,7 @@ restart:
SOCKBUF_UNLOCK(sb);
recycle_pageset(toep, ps);
aio_complete(job, copied, 0);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
goto restart;
}
@@ -1587,26 +1588,26 @@ restart:
SOCKBUF_UNLOCK(sb);
recycle_pageset(toep, ps);
aio_complete(job, -1, error);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
goto restart;
}
if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
SOCKBUF_UNLOCK(sb);
recycle_pageset(toep, ps);
- if (toep->ddp_active_count != 0) {
+ if (toep->ddp.active_count != 0) {
/*
* The door is closed, but there are still pending
* DDP buffers. Requeue. These jobs will all be
* completed once those buffers drain.
*/
aio_ddp_requeue_one(toep, job);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
return;
}
ddp_complete_one(job, 0);
ddp_complete_all(toep, 0);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
return;
}
@@ -1615,7 +1616,7 @@ sbcopy:
* If the toep is dead, there shouldn't be any data in the socket
* buffer, so the above case should have handled this.
*/
- MPASS(!(toep->ddp_flags & DDP_DEAD));
+ MPASS(!(toep->ddp.flags & DDP_DEAD));
/*
* If there is pending data in the socket buffer (either
@@ -1627,7 +1628,7 @@ sbcopy:
MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
resid = job->uaiocb.aio_nbytes - job->aio_received;
m = sb->sb_mb;
- KASSERT(m == NULL || toep->ddp_active_count == 0,
+ KASSERT(m == NULL || toep->ddp.active_count == 0,
("%s: sockbuf data with active DDP", __func__));
while (m != NULL && resid > 0) {
struct iovec iov[1];
@@ -1678,7 +1679,7 @@ sbcopy:
}
t4_rcvd_locked(&toep->td->tod, intotcpcb(inp));
INP_WUNLOCK(inp);
- if (resid == 0 || toep->ddp_flags & DDP_DEAD) {
+ if (resid == 0 || toep->ddp.flags & DDP_DEAD) {
/*
* We filled the entire buffer with socket
* data, DDP is not being used, or the socket
@@ -1688,7 +1689,7 @@ sbcopy:
SOCKBUF_UNLOCK(sb);
recycle_pageset(toep, ps);
aio_complete(job, copied, 0);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
goto restart;
}
@@ -1697,11 +1698,11 @@ sbcopy:
* This will either enable DDP or wait for more data to
* arrive on the socket buffer.
*/
- if ((toep->ddp_flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) {
+ if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) {
SOCKBUF_UNLOCK(sb);
recycle_pageset(toep, ps);
aio_ddp_requeue_one(toep, job);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
goto restart;
}
@@ -1718,7 +1719,7 @@ sbcopy:
if (prep_pageset(sc, toep, ps) == 0) {
recycle_pageset(toep, ps);
aio_ddp_requeue_one(toep, job);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
/*
* XXX: Need to retry this later. Mostly need a trigger
@@ -1729,10 +1730,10 @@ sbcopy:
}
/* Determine which DDP buffer to use. */
- if (toep->db[0].job == NULL) {
+ if (toep->ddp.db[0].job == NULL) {
db_idx = 0;
} else {
- MPASS(toep->db[1].job == NULL);
+ MPASS(toep->ddp.db[1].job == NULL);
db_idx = 1;
}
@@ -1755,11 +1756,11 @@ sbcopy:
V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1);
buf_flag = DDP_BUF1_ACTIVE;
}
- MPASS((toep->ddp_flags & buf_flag) == 0);
- if ((toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
+ MPASS((toep->ddp.flags & buf_flag) == 0);
+ if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
MPASS(db_idx == 0);
- MPASS(toep->ddp_active_id == -1);
- MPASS(toep->ddp_active_count == 0);
+ MPASS(toep->ddp.active_id == -1);
+ MPASS(toep->ddp.active_count == 0);
ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1);
}
@@ -1776,7 +1777,7 @@ sbcopy:
if (wr == NULL) {
recycle_pageset(toep, ps);
aio_ddp_requeue_one(toep, job);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
/*
* XXX: Need a way to kick a retry here.
@@ -1794,7 +1795,7 @@ sbcopy:
free_wrqe(wr);
recycle_pageset(toep, ps);
aio_ddp_cancel_one(job);
- toep->ddp_queueing = NULL;
+ toep->ddp.queueing = NULL;
goto restart;
}
@@ -1804,18 +1805,18 @@ sbcopy:
#endif
/* Give the chip the go-ahead. */
t4_wrq_tx(sc, wr);
- db = &toep->db[db_idx];
+ db = &toep->ddp.db[db_idx];
db->cancel_pending = 0;
db->job = job;
db->ps = ps;
- toep->ddp_queueing = NULL;
- toep->ddp_flags |= buf_flag;
- toep->ddp_active_count++;
- if (toep->ddp_active_count == 1) {
- MPASS(toep->ddp_active_id == -1);
- toep->ddp_active_id = db_idx;
+ toep->ddp.queueing = NULL;
+ toep->ddp.flags |= buf_flag;
+ toep->ddp.active_count++;
+ if (toep->ddp.active_count == 1) {
+ MPASS(toep->ddp.active_id == -1);
+ toep->ddp.active_id = db_idx;
CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
- toep->ddp_active_id);
+ toep->ddp.active_id);
}
goto restart;
}
@@ -1825,11 +1826,11 @@ ddp_queue_toep(struct toepcb *toep)
{
DDP_ASSERT_LOCKED(toep);
- if (toep->ddp_flags & DDP_TASK_ACTIVE)
+ if (toep->ddp.flags & DDP_TASK_ACTIVE)
return;
- toep->ddp_flags |= DDP_TASK_ACTIVE;
+ toep->ddp.flags |= DDP_TASK_ACTIVE;
hold_toepcb(toep);
- soaio_enqueue(&toep->ddp_requeue_task);
+ soaio_enqueue(&toep->ddp.requeue_task);
}
static void
@@ -1839,7 +1840,7 @@ aio_ddp_requeue_task(void *context, int pending)
DDP_LOCK(toep);
aio_ddp_requeue(toep);
- toep->ddp_flags &= ~DDP_TASK_ACTIVE;
+ toep->ddp.flags &= ~DDP_TASK_ACTIVE;
DDP_UNLOCK(toep);
free_toepcb(toep);
@@ -1862,10 +1863,10 @@ t4_aio_cancel_active(struct kaiocb *job)
return;
}
- for (i = 0; i < nitems(toep->db); i++) {
- if (toep->db[i].job == job) {
+ for (i = 0; i < nitems(toep->ddp.db); i++) {
+ if (toep->ddp.db[i].job == job) {
/* Should only ever get one cancel request for a job. */
- MPASS(toep->db[i].cancel_pending == 0);
+ MPASS(toep->ddp.db[i].cancel_pending == 0);
/*
* Invalidate this buffer. It will be
@@ -1878,7 +1879,7 @@ t4_aio_cancel_active(struct kaiocb *job)
W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1,
i + DDP_BUF0_INVALIDATED,
toep->ofld_rxq->iq.abs_id);
- toep->db[i].cancel_pending = 1;
+ toep->ddp.db[i].cancel_pending = 1;
CTR2(KTR_CXGBE, "%s: request %p marked pending",
__func__, job);
break;
@@ -1896,9 +1897,9 @@ t4_aio_cancel_queued(struct kaiocb *job)
DDP_LOCK(toep);
if (!aio_cancel_cleared(job)) {
- TAILQ_REMOVE(&toep->ddp_aiojobq, job, list);
- toep->ddp_waiting_count--;
- if (toep->ddp_waiting_count == 0)
+ TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
+ toep->ddp.waiting_count--;
+ if (toep->ddp.waiting_count == 0)
ddp_queue_toep(toep);
}
CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
@@ -1931,9 +1932,9 @@ t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
#endif
if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
panic("new job was cancelled");
- TAILQ_INSERT_TAIL(&toep->ddp_aiojobq, job, list);
- toep->ddp_waiting_count++;
- toep->ddp_flags |= DDP_OK;
+ TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);
+ toep->ddp.waiting_count++;
+ toep->ddp.flags |= DDP_OK;
/*
* Try to handle this request synchronously. If this has
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 837c80e..61d18cb 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -171,7 +171,6 @@ alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags)
toep->txsd_pidx = 0;
toep->txsd_cidx = 0;
aiotx_init_toep(toep);
- ddp_init_toep(toep);
return (toep);
}
@@ -196,7 +195,8 @@ free_toepcb(struct toepcb *toep)
KASSERT(!(toep->flags & TPF_CPL_PENDING),
("%s: CPL pending", __func__));
- ddp_uninit_toep(toep);
+ if (toep->ulp_mode == ULP_MODE_TCPDDP)
+ ddp_uninit_toep(toep);
free(toep, M_CXGBE);
}
@@ -301,7 +301,8 @@ release_offload_resources(struct toepcb *toep)
MPASS(mbufq_len(&toep->ulp_pduq) == 0);
MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0);
#ifdef INVARIANTS
- ddp_assert_empty(toep);
+ if (toep->ulp_mode == ULP_MODE_TCPDDP)
+ ddp_assert_empty(toep);
#endif
if (toep->l2te)
@@ -617,7 +618,7 @@ set_tcpddp_ulp_mode(struct toepcb *toep)
{
toep->ulp_mode = ULP_MODE_TCPDDP;
- toep->ddp_flags = DDP_OK;
+ ddp_init_toep(toep);
}
int
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 86d4bec..881dadb 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -132,6 +132,20 @@ struct ddp_buffer {
int cancel_pending;
};
+struct ddp_pcb {
+ u_int flags;
+ struct ddp_buffer db[2];
+ TAILQ_HEAD(, pageset) cached_pagesets;
+ TAILQ_HEAD(, kaiocb) aiojobq;
+ u_int waiting_count;
+ u_int active_count;
+ u_int cached_count;
+ int active_id; /* the currently active DDP buffer */
+ struct task requeue_task;
+ struct kaiocb *queueing;
+ struct mtx lock;
+};
+
struct aiotx_buffer {
struct pageset ps;
struct kaiocb *job;
@@ -169,17 +183,7 @@ struct toepcb {
struct mbufq ulp_pduq; /* PDUs waiting to be sent out. */
struct mbufq ulp_pdu_reclaimq;
- u_int ddp_flags;
- struct ddp_buffer db[2];
- TAILQ_HEAD(, pageset) ddp_cached_pagesets;
- TAILQ_HEAD(, kaiocb) ddp_aiojobq;
- u_int ddp_waiting_count;
- u_int ddp_active_count;
- u_int ddp_cached_count;
- int ddp_active_id; /* the currently active DDP buffer */
- struct task ddp_requeue_task;
- struct kaiocb *ddp_queueing;
- struct mtx ddp_lock;
+ struct ddp_pcb ddp;
TAILQ_HEAD(, kaiocb) aiotx_jobq;
struct task aiotx_task;
@@ -193,9 +197,9 @@ struct toepcb {
struct ofld_tx_sdesc txsd[];
};
-#define DDP_LOCK(toep) mtx_lock(&(toep)->ddp_lock)
-#define DDP_UNLOCK(toep) mtx_unlock(&(toep)->ddp_lock)
-#define DDP_ASSERT_LOCKED(toep) mtx_assert(&(toep)->ddp_lock, MA_OWNED)
+#define DDP_LOCK(toep) mtx_lock(&(toep)->ddp.lock)
+#define DDP_UNLOCK(toep) mtx_unlock(&(toep)->ddp.lock)
+#define DDP_ASSERT_LOCKED(toep) mtx_assert(&(toep)->ddp.lock, MA_OWNED)
struct flowc_tx_params {
uint32_t snd_nxt;
OpenPOWER on IntegriCloud