summaryrefslogtreecommitdiffstats
path: root/fs/cifs/smbdirect.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cifs/smbdirect.c')
-rw-r--r--fs/cifs/smbdirect.c121
1 files changed, 75 insertions, 46 deletions
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index c62f7c9..e459c97 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -17,6 +17,7 @@
#include <linux/highmem.h>
#include "smbdirect.h"
#include "cifs_debug.h"
+#include "cifsproto.h"
static struct smbd_response *get_empty_queue_buffer(
struct smbd_connection *info);
@@ -2003,10 +2004,12 @@ read_rfc1002_done:
* return value: actual data read
*/
static int smbd_recv_page(struct smbd_connection *info,
- struct page *page, unsigned int to_read)
+ struct page *page, unsigned int page_offset,
+ unsigned int to_read)
{
int ret;
char *to_address;
+ void *page_address;
/* make sure we have the page ready for read */
ret = wait_event_interruptible(
@@ -2014,16 +2017,17 @@ static int smbd_recv_page(struct smbd_connection *info,
info->reassembly_data_length >= to_read ||
info->transport_status != SMBD_CONNECTED);
if (ret)
- return 0;
+ return ret;
/* now we can read from reassembly queue and not sleep */
- to_address = kmap_atomic(page);
+ page_address = kmap_atomic(page);
+ to_address = (char *) page_address + page_offset;
log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
page, to_address, to_read);
ret = smbd_recv_buf(info, to_address, to_read);
- kunmap_atomic(to_address);
+ kunmap_atomic(page_address);
return ret;
}
@@ -2037,7 +2041,7 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
{
char *buf;
struct page *page;
- unsigned int to_read;
+ unsigned int to_read, page_offset;
int rc;
info->smbd_recv_pending++;
@@ -2051,15 +2055,16 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
case READ | ITER_BVEC:
page = msg->msg_iter.bvec->bv_page;
+ page_offset = msg->msg_iter.bvec->bv_offset;
to_read = msg->msg_iter.bvec->bv_len;
- rc = smbd_recv_page(info, page, to_read);
+ rc = smbd_recv_page(info, page, page_offset, to_read);
break;
default:
/* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
msg->msg_iter.type);
- rc = -EIO;
+ rc = -EINVAL;
}
info->smbd_recv_pending--;
@@ -2082,7 +2087,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
struct kvec vec;
int nvecs;
int size;
- int buflen = 0, remaining_data_length;
+ unsigned int buflen = 0, remaining_data_length;
int start, i, j;
int max_iov_size =
info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2113,10 +2118,17 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
buflen += iov[i].iov_len;
}
- /* add in the page array if there is one */
+ /*
+ * Add in the page array if there is one. The caller needs to set
+ * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
+ * ends at page boundary
+ */
if (rqst->rq_npages) {
- buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
- buflen += rqst->rq_tailsz;
+ if (rqst->rq_npages == 1)
+ buflen += rqst->rq_tailsz;
+ else
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
+ rqst->rq_offset + rqst->rq_tailsz;
}
if (buflen + sizeof(struct smbd_data_transfer) >
@@ -2213,8 +2225,9 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
/* now sending pages if there are any */
for (i = 0; i < rqst->rq_npages; i++) {
- buflen = (i == rqst->rq_npages-1) ?
- rqst->rq_tailsz : rqst->rq_pagesz;
+ unsigned int offset;
+
+ rqst_page_get_length(rqst, i, &buflen, &offset);
nvecs = (buflen + max_iov_size - 1) / max_iov_size;
log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
buflen, nvecs);
@@ -2225,9 +2238,11 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
remaining_data_length -= size;
log_write(INFO, "sending pages i=%d offset=%d size=%d"
" remaining_data_length=%d\n",
- i, j*max_iov_size, size, remaining_data_length);
+ i, j*max_iov_size+offset, size,
+ remaining_data_length);
rc = smbd_post_send_page(
- info, rqst->rq_pages[i], j*max_iov_size,
+ info, rqst->rq_pages[i],
+ j*max_iov_size + offset,
size, remaining_data_length);
if (rc)
goto done;
@@ -2284,37 +2299,37 @@ static void smbd_mr_recovery_work(struct work_struct *work)
if (smbdirect_mr->state == MR_INVALIDATED ||
smbdirect_mr->state == MR_ERROR) {
- if (smbdirect_mr->state == MR_INVALIDATED) {
+ /* recover this MR entry */
+ rc = ib_dereg_mr(smbdirect_mr->mr);
+ if (rc) {
+ log_rdma_mr(ERR,
+ "ib_dereg_mr failed rc=%x\n",
+ rc);
+ smbd_disconnect_rdma_connection(info);
+ continue;
+ }
+
+ smbdirect_mr->mr = ib_alloc_mr(
+ info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR,
+ "ib_alloc_mr failed mr_type=%x "
+ "max_frmr_depth=%x\n",
+ info->mr_type,
+ info->max_frmr_depth);
+ smbd_disconnect_rdma_connection(info);
+ continue;
+ }
+
+ if (smbdirect_mr->state == MR_INVALIDATED)
ib_dma_unmap_sg(
info->id->device, smbdirect_mr->sgl,
smbdirect_mr->sgl_count,
smbdirect_mr->dir);
- smbdirect_mr->state = MR_READY;
- } else if (smbdirect_mr->state == MR_ERROR) {
-
- /* recover this MR entry */
- rc = ib_dereg_mr(smbdirect_mr->mr);
- if (rc) {
- log_rdma_mr(ERR,
- "ib_dereg_mr failed rc=%x\n",
- rc);
- smbd_disconnect_rdma_connection(info);
- }
- smbdirect_mr->mr = ib_alloc_mr(
- info->pd, info->mr_type,
- info->max_frmr_depth);
- if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR,
- "ib_alloc_mr failed mr_type=%x "
- "max_frmr_depth=%x\n",
- info->mr_type,
- info->max_frmr_depth);
- smbd_disconnect_rdma_connection(info);
- }
+ smbdirect_mr->state = MR_READY;
- smbdirect_mr->state = MR_READY;
- }
/* smbdirect_mr->state is updated by this function
* and is read and updated by I/O issuing CPUs trying
* to get a MR, the call to atomic_inc_return
@@ -2460,7 +2475,7 @@ again:
*/
struct smbd_mr *smbd_register_mr(
struct smbd_connection *info, struct page *pages[], int num_pages,
- int tailsz, bool writing, bool need_invalidate)
+ int offset, int tailsz, bool writing, bool need_invalidate)
{
struct smbd_mr *smbdirect_mr;
int rc, i;
@@ -2483,17 +2498,31 @@ struct smbd_mr *smbd_register_mr(
smbdirect_mr->sgl_count = num_pages;
sg_init_table(smbdirect_mr->sgl, num_pages);
- for (i = 0; i < num_pages - 1; i++)
- sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+ log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
+ num_pages, offset, tailsz);
+
+ if (num_pages == 1) {
+ sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
+ goto skip_multiple_pages;
+ }
+ /* We have at least two pages to register */
+ sg_set_page(
+ &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
+ i = 1;
+ while (i < num_pages - 1) {
+ sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+ i++;
+ }
sg_set_page(&smbdirect_mr->sgl[i], pages[i],
tailsz ? tailsz : PAGE_SIZE, 0);
+skip_multiple_pages:
dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
smbdirect_mr->dir = dir;
rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
if (!rc) {
- log_rdma_mr(INFO, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+ log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
num_pages, dir, rc);
goto dma_map_error;
}
@@ -2501,8 +2530,8 @@ struct smbd_mr *smbd_register_mr(
rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
NULL, PAGE_SIZE);
if (rc != num_pages) {
- log_rdma_mr(INFO,
- "ib_map_mr_sg failed rc = %x num_pages = %x\n",
+ log_rdma_mr(ERR,
+ "ib_map_mr_sg failed rc = %d num_pages = %x\n",
rc, num_pages);
goto map_mr_error;
}
OpenPOWER on IntegriCloud