summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hfi1/user_sdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/hfi1/user_sdma.c')
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c230
1 files changed, 102 insertions, 128 deletions
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index d55339f..8f02707 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -94,43 +94,13 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* Number of BTH.PSN bits used for sequence number in expected rcvs */
#define BTH_SEQ_MASK 0x7ffull
-/*
- * Define fields in the KDETH header so we can update the header
- * template.
- */
-#define KDETH_OFFSET_SHIFT 0
-#define KDETH_OFFSET_MASK 0x7fff
-#define KDETH_OM_SHIFT 15
-#define KDETH_OM_MASK 0x1
-#define KDETH_TID_SHIFT 16
-#define KDETH_TID_MASK 0x3ff
-#define KDETH_TIDCTRL_SHIFT 26
-#define KDETH_TIDCTRL_MASK 0x3
-#define KDETH_INTR_SHIFT 28
-#define KDETH_INTR_MASK 0x1
-#define KDETH_SH_SHIFT 29
-#define KDETH_SH_MASK 0x1
-#define KDETH_HCRC_UPPER_SHIFT 16
-#define KDETH_HCRC_UPPER_MASK 0xff
-#define KDETH_HCRC_LOWER_SHIFT 24
-#define KDETH_HCRC_LOWER_MASK 0xff
-
#define AHG_KDETH_INTR_SHIFT 12
#define AHG_KDETH_SH_SHIFT 13
+#define AHG_KDETH_ARRAY_SIZE 9
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
-#define KDETH_GET(val, field) \
- (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
-#define KDETH_SET(dw, field, val) do { \
- u32 dwval = le32_to_cpu(dw); \
- dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
- dwval |= (((val) & KDETH_##field##_MASK) << \
- KDETH_##field##_SHIFT); \
- dw = cpu_to_le32(dwval); \
- } while (0)
-
#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
do { \
if ((idx) < ARRAY_SIZE((arr))) \
@@ -141,23 +111,10 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
return -ERANGE; \
} while (0)
-/* KDETH OM multipliers and switch over point */
-#define KDETH_OM_SMALL 4
-#define KDETH_OM_SMALL_SHIFT 2
-#define KDETH_OM_LARGE 64
-#define KDETH_OM_LARGE_SHIFT 6
-#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
-
/* Tx request flag bits */
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
-/* SDMA request flag bits */
-#define SDMA_REQ_FOR_THREAD 1
-#define SDMA_REQ_SEND_DONE 2
-#define SDMA_REQ_HAS_ERROR 3
-#define SDMA_REQ_DONE_ERROR 4
-
#define SDMA_PKT_Q_INACTIVE BIT(0)
#define SDMA_PKT_Q_ACTIVE BIT(1)
#define SDMA_PKT_Q_DEFERRED BIT(2)
@@ -204,25 +161,41 @@ struct evict_data {
};
struct user_sdma_request {
- struct sdma_req_info info;
- struct hfi1_user_sdma_pkt_q *pq;
- struct hfi1_user_sdma_comp_q *cq;
/* This is the original header from user space */
struct hfi1_pkt_header hdr;
+
+ /* Read mostly fields */
+ struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
+ struct hfi1_user_sdma_comp_q *cq;
/*
* Pointer to the SDMA engine for this request.
* Since different request could be on different VLs,
* each request will need it's own engine pointer.
*/
struct sdma_engine *sde;
- s8 ahg_idx;
- u32 ahg[9];
+ struct sdma_req_info info;
+ /* TID array values copied from the tid_iov vector */
+ u32 *tids;
+ /* total length of the data in the request */
+ u32 data_len;
+ /* number of elements copied to the tids array */
+ u16 n_tids;
/*
- * KDETH.Offset (Eager) field
- * We need to remember the initial value so the headers
- * can be updated properly.
+ * We copy the iovs for this request (based on
+ * info.iovcnt). These are only the data vectors
*/
- u32 koffset;
+ u8 data_iovs;
+ s8 ahg_idx;
+
+ /* Writeable fields shared with interrupt */
+ u64 seqcomp ____cacheline_aligned_in_smp;
+ u64 seqsubmitted;
+ /* status of the last txreq completed */
+ int status;
+
+ /* Send side fields */
+ struct list_head txps ____cacheline_aligned_in_smp;
+ u64 seqnum;
/*
* KDETH.OFFSET (TID) field
* The offset can cover multiple packets, depending on the
@@ -230,29 +203,21 @@ struct user_sdma_request {
*/
u32 tidoffset;
/*
- * We copy the iovs for this request (based on
- * info.iovcnt). These are only the data vectors
+ * KDETH.Offset (Eager) field
+ * We need to remember the initial value so the headers
+ * can be updated properly.
*/
- unsigned data_iovs;
- /* total length of the data in the request */
- u32 data_len;
+ u32 koffset;
+ u32 sent;
+ /* TID index copied from the tid_iov vector */
+ u16 tididx;
/* progress index moving along the iovs array */
- unsigned iov_idx;
+ u8 iov_idx;
+ u8 done;
+ u8 has_error;
+
struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
- /* number of elements copied to the tids array */
- u16 n_tids;
- /* TID array values copied from the tid_iov vector */
- u32 *tids;
- u16 tididx;
- u32 sent;
- u64 seqnum;
- u64 seqcomp;
- u64 seqsubmitted;
- struct list_head txps;
- unsigned long flags;
- /* status of the last txreq completed */
- int status;
-};
+} ____cacheline_aligned_in_smp;
/*
* A single txreq could span up to 3 physical pages when the MTU
@@ -307,7 +272,8 @@ static int defer_packet_queue(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *txreq,
- unsigned int seq);
+ uint seq,
+ bool pkts_sent);
static void activate_packet_queue(struct iowait *wait, int reason);
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
unsigned long len);
@@ -329,7 +295,8 @@ static int defer_packet_queue(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *txreq,
- unsigned seq)
+ uint seq,
+ bool pkts_sent)
{
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
@@ -349,7 +316,7 @@ static int defer_packet_queue(
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
write_seqlock(&dev->iowait_lock);
if (list_empty(&pq->busy.list))
- list_add_tail(&pq->busy.list, &sde->dmawait);
+ iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
eagain:
@@ -379,7 +346,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
struct hfi1_devdata *dd;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
- unsigned long flags;
if (!uctxt || !fd)
return -EBADF;
@@ -393,7 +359,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
if (!pq)
return -ENOMEM;
- INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
@@ -454,10 +419,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
fd->pq = pq;
fd->cq = cq;
- spin_lock_irqsave(&uctxt->sdma_qlock, flags);
- list_add(&pq->list, &uctxt->sdma_queues);
- spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
-
return 0;
pq_mmu_fail:
@@ -476,11 +437,10 @@ pq_reqs_nomem:
return ret;
}
-int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq;
- unsigned long flags;
hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
uctxt->ctxt, fd->subctxt);
@@ -488,10 +448,6 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
if (pq) {
if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler);
- spin_lock_irqsave(&uctxt->sdma_qlock, flags);
- if (!list_empty(&pq->list))
- list_del_init(&pq->list);
- spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
iowait_sdma_drain(&pq->busy);
/* Wait until all requests have been freed. */
wait_event_interruptible(
@@ -607,12 +563,20 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
uctxt->ctxt, fd->subctxt, info.comp_idx);
req = pq->reqs + info.comp_idx;
- memset(req, 0, sizeof(*req));
req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
+ req->data_len = 0;
req->pq = pq;
req->cq = cq;
req->status = -1;
req->ahg_idx = -1;
+ req->iov_idx = 0;
+ req->sent = 0;
+ req->seqnum = 0;
+ req->seqcomp = 0;
+ req->seqsubmitted = 0;
+ req->tids = NULL;
+ req->done = 0;
+ req->has_error = 0;
INIT_LIST_HEAD(&req->txps);
memcpy(&req->info, &info, sizeof(info));
@@ -701,12 +665,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
/* Save all the IO vector structures */
for (i = 0; i < req->data_iovs; i++) {
+ req->iovs[i].offset = 0;
INIT_LIST_HEAD(&req->iovs[i].list);
memcpy(&req->iovs[i].iov,
iovec + idx++,
sizeof(req->iovs[i].iov));
ret = pin_vector_pages(req, &req->iovs[i]);
if (ret) {
+ req->data_iovs = i;
req->status = ret;
goto free_req;
}
@@ -749,6 +715,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
}
req->tids = tmp;
req->n_tids = ntids;
+ req->tididx = 0;
idx++;
}
@@ -791,12 +758,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
* request have been submitted to the SDMA engine. However, it
* will not wait for send completions.
*/
- while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
+ while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
if (ret != -EBUSY) {
req->status = ret;
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ WRITE_ONCE(req->has_error, 1);
if (ACCESS_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
@@ -898,10 +865,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
pq = req->pq;
/* If tx completion has reported an error, we are done. */
- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ if (READ_ONCE(req->has_error))
return -EFAULT;
- }
/*
* Check if we might have sent the entire request already
@@ -924,10 +889,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
* with errors. If so, we are not going to process any
* more packets from this request.
*/
- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ if (READ_ONCE(req->has_error))
return -EFAULT;
- }
tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
if (!tx)
@@ -1024,11 +987,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
datalen);
if (changes < 0)
goto free_tx;
- sdma_txinit_ahg(&tx->txreq,
- SDMA_TXREQ_F_USE_AHG,
- datalen, req->ahg_idx, changes,
- req->ahg, sizeof(req->hdr),
- user_sdma_txreq_cb);
}
} else {
ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
@@ -1105,7 +1063,7 @@ dosend:
ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
req->seqsubmitted += count;
if (req->seqsubmitted == req->info.npkts) {
- set_bit(SDMA_REQ_SEND_DONE, &req->flags);
+ WRITE_ONCE(req->done, 1);
/*
* The txreq has already been submitted to the HW queue
* so we can free the AHG entry now. Corruption will not
@@ -1155,14 +1113,23 @@ static int pin_vector_pages(struct user_sdma_request *req,
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct sdma_mmu_node *node = NULL;
struct mmu_rb_node *rb_node;
-
- rb_node = hfi1_mmu_rb_extract(pq->handler,
- (unsigned long)iovec->iov.iov_base,
- iovec->iov.iov_len);
- if (rb_node)
+ bool extracted;
+
+ extracted =
+ hfi1_mmu_rb_remove_unless_exact(pq->handler,
+ (unsigned long)
+ iovec->iov.iov_base,
+ iovec->iov.iov_len, &rb_node);
+ if (rb_node) {
node = container_of(rb_node, struct sdma_mmu_node, rb);
- else
- rb_node = NULL;
+ if (!extracted) {
+ atomic_inc(&node->refcount);
+ iovec->pages = node->pages;
+ iovec->npages = node->npages;
+ iovec->node = node;
+ return 0;
+ }
+ }
if (!node) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1423,21 +1390,22 @@ done:
}
static int set_txreq_header_ahg(struct user_sdma_request *req,
- struct user_sdma_txreq *tx, u32 len)
+ struct user_sdma_txreq *tx, u32 datalen)
{
+ u32 ahg[AHG_KDETH_ARRAY_SIZE];
int diff = 0;
u8 omfactor; /* KDETH.OM */
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
- u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len));
+ u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
if (PBC2LRH(pbclen) != lrhlen) {
/* PBC.PbcLengthDWs */
- AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
+ AHG_HEADER_SET(ahg, diff, 0, 0, 12,
cpu_to_le16(LRH2PBC(lrhlen)));
/* LRH.PktLen (we need the full 16 bits due to byte swap) */
- AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
+ AHG_HEADER_SET(ahg, diff, 3, 0, 16,
cpu_to_be16(lrhlen >> 2));
}
@@ -1449,13 +1417,12 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
val32 |= 1UL << 31;
- AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
- AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
+ AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
+ AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
/* KDETH.Offset */
- AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
+ AHG_HEADER_SET(ahg, diff, 15, 0, 16,
cpu_to_le16(req->koffset & 0xffff));
- AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
- cpu_to_le16(req->koffset >> 16));
+ AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
if (req_opcode(req->info.ctrl) == EXPECTED) {
__le16 val;
@@ -1473,9 +1440,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
* we have to check again.
*/
if (++req->tididx > req->n_tids - 1 ||
- !req->tids[req->tididx]) {
+ !req->tids[req->tididx])
return -EINVAL;
- }
tidval = req->tids[req->tididx];
}
omfactor = ((EXP_TID_GET(tidval, LEN) *
@@ -1483,7 +1449,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
KDETH_OM_SMALL_SHIFT;
/* KDETH.OM and KDETH.OFFSET (TID) */
- AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
+ AHG_HEADER_SET(ahg, diff, 7, 0, 16,
((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
((req->tidoffset >> omfactor)
& 0x7fff)));
@@ -1503,12 +1469,20 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_KDETH_INTR_SHIFT));
}
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
+ AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
}
+ if (diff < 0)
+ return diff;
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
req->info.comp_idx, req->sde->this_idx,
- req->ahg_idx, req->ahg, diff, tidval);
+ req->ahg_idx, ahg, diff, tidval);
+ sdma_txinit_ahg(&tx->txreq,
+ SDMA_TXREQ_F_USE_AHG,
+ datalen, req->ahg_idx, diff,
+ ahg, sizeof(req->hdr),
+ user_sdma_txreq_cb);
+
return diff;
}
@@ -1537,7 +1511,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
if (status != SDMA_TXREQ_S_OK) {
SDMA_DBG(req, "SDMA completion with error %d",
status);
- set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
+ WRITE_ONCE(req->has_error, 1);
}
req->seqcomp = tx->seqnum;
@@ -1556,8 +1530,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
if (status != SDMA_TXREQ_S_OK)
req->status = status;
if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
- (test_bit(SDMA_REQ_SEND_DONE, &req->flags) ||
- test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
+ (READ_ONCE(req->done) ||
+ READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
pq_update(pq);
set_comp_state(pq, cq, idx, ERROR, req->status);
OpenPOWER on IntegriCloud