From 710027a48ede75428cc68eaa8ae2269b1e356e2c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 19 Aug 2008 20:13:11 +0200 Subject: Add some block/ source files to the kernel-api docbook. Fix kernel-doc notation in them as needed. Fix changed function parameter names. Fix typos/spellos. In comments, change REQ_SPECIAL to REQ_TYPE_SPECIAL and REQ_BLOCK_PC to REQ_TYPE_BLOCK_PC. Signed-off-by: Randy Dunlap Signed-off-by: Jens Axboe --- block/blk-map.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index af37e4a..ea1bf53 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -85,17 +85,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, } /** - * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage + * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request structure to fill * @ubuf: the user buffer * @len: length of user data * * Description: - * Data will be mapped directly for zero copy io, if possible. Otherwise + * Data will be mapped directly for zero copy I/O, if possible. Otherwise * a kernel bounce buffer is used. * - * A matching blk_rq_unmap_user() must be issued at the end of io, while + * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() @@ -154,7 +154,7 @@ unmap_rq: EXPORT_SYMBOL(blk_rq_map_user); /** - * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage + * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to map data to * @iov: pointer to the iovec @@ -162,10 +162,10 @@ EXPORT_SYMBOL(blk_rq_map_user); * @len: I/O byte count * * Description: - * Data will be mapped directly for zero copy io, if possible. Otherwise + * Data will be mapped directly for zero copy I/O, if possible. Otherwise * a kernel bounce buffer is used. * - * A matching blk_rq_unmap_user() must be issued at the end of io, while + * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() @@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since - * the io completion may have changed rq->bio. + * the I/O completion may have changed rq->bio. */ int blk_rq_unmap_user(struct bio *bio) { @@ -250,7 +250,7 @@ int blk_rq_unmap_user(struct bio *bio) EXPORT_SYMBOL(blk_rq_unmap_user); /** - * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage + * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer -- cgit v1.1 From a3bce90edd8f6cafe3f63b1a943800792e830178 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 28 Aug 2008 16:17:05 +0900 Subject: block: add gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov Currently, blk_rq_map_user and blk_rq_map_user_iov always do GFP_KERNEL allocation. This adds gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov so sg can use it (sg always does GFP_ATOMIC allocation). Signed-off-by: FUJITA Tomonori Signed-off-by: Douglas Gilbert Cc: Mike Christie Cc: James Bottomley Signed-off-by: Jens Axboe --- block/blk-map.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index ea1bf53..ac21b73 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -41,7 +41,8 @@ static int __blk_rq_unmap_user(struct bio *bio) } static int __blk_rq_map_user(struct request_queue *q, struct request *rq, - void __user *ubuf, unsigned int len) + void __user *ubuf, unsigned int len, + gfp_t gfp_mask) { unsigned long uaddr; unsigned int alignment; @@ -57,9 +58,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, uaddr = (unsigned long) ubuf; alignment = queue_dma_alignment(q) | q->dma_pad_mask; if (!(uaddr & alignment) && !(len & alignment)) - bio = bio_map_user(q, NULL, uaddr, len, reading); + bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); else - bio = bio_copy_user(q, uaddr, len, reading); + bio = bio_copy_user(q, uaddr, len, reading, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); @@ -90,6 +91,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * @rq: request structure to fill * @ubuf: the user buffer * @len: length of user data + * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly for zero copy I/O, if possible. Otherwise @@ -105,7 +107,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * unmapping. */ int blk_rq_map_user(struct request_queue *q, struct request *rq, - void __user *ubuf, unsigned long len) + void __user *ubuf, unsigned long len, gfp_t gfp_mask) { unsigned long bytes_read = 0; struct bio *bio = NULL; @@ -132,7 +134,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (end - start > BIO_MAX_PAGES) map_len -= PAGE_SIZE; - ret = __blk_rq_map_user(q, rq, ubuf, map_len); + ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask); if (ret < 0) goto unmap_rq; if (!bio) @@ -160,6 +162,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count + * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly for zero copy I/O, if possible. Otherwise @@ -175,7 +178,8 @@ EXPORT_SYMBOL(blk_rq_map_user); * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, - struct sg_iovec *iov, int iov_count, unsigned int len) + struct sg_iovec *iov, int iov_count, unsigned int len, + gfp_t gfp_mask) { struct bio *bio; int i, read = rq_data_dir(rq) == READ; @@ -194,9 +198,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, } if (unaligned || (q->dma_pad_mask & len)) - bio = bio_copy_user_iov(q, iov, iov_count, read); + bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask); else - bio = bio_map_user_iov(q, NULL, iov, iov_count, read); + bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); -- cgit v1.1 From 152e283fdfea0cd11e297d982378b55937842dde Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 28 Aug 2008 16:17:06 +0900 Subject: block: introduce struct rq_map_data to use reserved pages This patch introduces struct rq_map_data to enable bio_copy_use_iov() use reserved pages. Currently, bio_copy_user_iov allocates bounce pages but drivers/scsi/sg.c wants to allocate pages by itself and use them. struct rq_map_data can be used to pass allocated pages to bio_copy_user_iov. The current users of bio_copy_user_iov simply passes NULL (they don't want to use pre-allocated pages). Signed-off-by: FUJITA Tomonori Cc: Jens Axboe Cc: Douglas Gilbert Cc: Mike Christie Cc: James Bottomley Signed-off-by: Jens Axboe --- block/blk-map.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index ac21b73..dad6a29 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -41,8 +41,8 @@ static int __blk_rq_unmap_user(struct bio *bio) } static int __blk_rq_map_user(struct request_queue *q, struct request *rq, - void __user *ubuf, unsigned int len, - gfp_t gfp_mask) + struct rq_map_data *map_data, void __user *ubuf, + unsigned int len, gfp_t gfp_mask) { unsigned long uaddr; unsigned int alignment; @@ -57,10 +57,10 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, */ uaddr = (unsigned long) ubuf; alignment = queue_dma_alignment(q) | q->dma_pad_mask; - if (!(uaddr & alignment) && !(len & alignment)) + if (!(uaddr & alignment) && !(len & alignment) && !map_data) bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); else - bio = bio_copy_user(q, uaddr, len, reading, gfp_mask); + bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); @@ -89,6 +89,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request structure to fill + * @map_data: pointer to the rq_map_data holding pages (if necessary) * @ubuf: the user buffer * @len: length of user data * @gfp_mask: memory allocation flags @@ -107,7 +108,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * unmapping. */ int blk_rq_map_user(struct request_queue *q, struct request *rq, - void __user *ubuf, unsigned long len, gfp_t gfp_mask) + struct rq_map_data *map_data, void __user *ubuf, + unsigned long len, gfp_t gfp_mask) { unsigned long bytes_read = 0; struct bio *bio = NULL; @@ -134,7 +136,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (end - start > BIO_MAX_PAGES) map_len -= PAGE_SIZE; - ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask); + ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, + gfp_mask); if (ret < 0) goto unmap_rq; if (!bio) @@ -159,6 +162,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to map data to + * @map_data: pointer to the rq_map_data holding pages (if necessary) * @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count @@ -178,8 +182,8 @@ EXPORT_SYMBOL(blk_rq_map_user); * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, - struct sg_iovec *iov, int iov_count, unsigned int len, - gfp_t gfp_mask) + struct rq_map_data *map_data, struct sg_iovec *iov, + int iov_count, unsigned int len, gfp_t gfp_mask) { struct bio *bio; int i, read = rq_data_dir(rq) == READ; @@ -197,8 +201,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, } } - if (unaligned || (q->dma_pad_mask & len)) - bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask); + if (unaligned || (q->dma_pad_mask & len) || map_data) + bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, + gfp_mask); else bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); @@ -220,6 +225,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, rq->buffer = rq->data = NULL; return 0; } +EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data -- cgit v1.1 From 879040742cf09f2360a9ac41846288707e4e567c Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 28 Aug 2008 15:05:58 +0900 Subject: block: add blk_rq_aligned helper function This adds blk_rq_aligned helper function to see if alignment and padding requirement is satisfied for DMA transfer. This also converts blk_rq_map_kern and __blk_rq_map_user to use the helper function. Signed-off-by: FUJITA Tomonori Cc: Jens Axboe Signed-off-by: Jens Axboe --- block/blk-map.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index dad6a29..572140c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -45,7 +45,6 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, unsigned int len, gfp_t gfp_mask) { unsigned long uaddr; - unsigned int alignment; struct bio *bio, *orig_bio; int reading, ret; @@ -56,8 +55,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; - alignment = queue_dma_alignment(q) | q->dma_pad_mask; - if (!(uaddr & alignment) && !(len & alignment) && !map_data) + if (blk_rq_aligned(q, ubuf, len) && !map_data) bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); else bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); @@ -274,8 +272,6 @@ EXPORT_SYMBOL(blk_rq_unmap_user); int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { - unsigned long kaddr; - unsigned int alignment; int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; @@ -285,11 +281,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (!len || !kbuf) return -EINVAL; - kaddr = (unsigned long)kbuf; - alignment = queue_dma_alignment(q) | q->dma_pad_mask; - do_copy = ((kaddr & alignment) || (len & alignment) || - object_is_on_stack(kbuf)); - + do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else -- cgit v1.1 From 818827669d85b84241696ffef2de485db46b0b5e Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 2 Sep 2008 16:20:19 +0900 Subject: block: make blk_rq_map_user take a NULL user-space buffer This patch changes blk_rq_map_user to accept a NULL user-space buffer with a READ command if rq_map_data is not NULL. Thus a caller can pass page frames to lk_rq_map_user to just set up a request and bios with page frames propely. bio_uncopy_user (called via blk_rq_unmap_user) doesn't copy data to user space with such request. Signed-off-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- block/blk-map.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index 572140c..4849fa3 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -42,7 +42,7 @@ static int __blk_rq_unmap_user(struct bio *bio) static int __blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, - unsigned int len, gfp_t gfp_mask) + unsigned int len, int null_mapped, gfp_t gfp_mask) { unsigned long uaddr; struct bio *bio, *orig_bio; @@ -63,6 +63,9 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, if (IS_ERR(bio)) return PTR_ERR(bio); + if (null_mapped) + bio->bi_flags |= (1 << BIO_NULL_MAPPED); + orig_bio = bio; blk_queue_bounce(q, &bio); @@ -111,12 +114,17 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, { unsigned long bytes_read = 0; struct bio *bio = NULL; - int ret; + int ret, null_mapped = 0; if (len > (q->max_hw_sectors << 9)) return -EINVAL; - if (!len || !ubuf) + if (!len) return -EINVAL; + if (!ubuf) { + if (!map_data || rq_data_dir(rq) != READ) + return -EINVAL; + null_mapped = 1; + } while (bytes_read != len) { unsigned long map_len, end, start; @@ -135,7 +143,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, map_len -= PAGE_SIZE; ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, - gfp_mask); + null_mapped, gfp_mask); if (ret < 0) goto unmap_rq; if (!bio) -- cgit v1.1