diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 4 | ||||
-rw-r--r-- | block/blk-ioc.c | 37 | ||||
-rw-r--r-- | block/blk-map.c | 20 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 9 | ||||
-rw-r--r-- | block/bsg.c | 8 | ||||
-rw-r--r-- | block/cfq-iosched.c | 38 | ||||
-rw-r--r-- | block/elevator.c | 15 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 3 |
9 files changed, 83 insertions, 57 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e9754dc..775c851 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -38,7 +38,7 @@ static int __make_request(struct request_queue *q, struct bio *bio); /* * For the allocated request tables */ -struct kmem_cache *request_cachep; +static struct kmem_cache *request_cachep; /* * For queue allocation @@ -127,6 +127,7 @@ void rq_init(struct request_queue *q, struct request *rq) rq->nr_hw_segments = 0; rq->ioprio = 0; rq->special = NULL; + rq->raw_data_len = 0; rq->buffer = NULL; rq->tag = -1; rq->errors = 0; @@ -2015,6 +2016,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->hard_cur_sectors = rq->current_nr_sectors; rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); rq->buffer = bio_data(bio); + rq->raw_data_len = bio->bi_size; rq->data_len = bio->bi_size; rq->bio = rq->biotail = bio; diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 80245dc..e34df7c 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -17,17 +17,13 @@ static struct kmem_cache *iocontext_cachep; static void cfq_dtor(struct io_context *ioc) { - struct cfq_io_context *cic[1]; - int r; + if (!hlist_empty(&ioc->cic_list)) { + struct cfq_io_context *cic; - /* - * We don't have a specific key to lookup with, so use the gang - * lookup to just retrieve the first item stored. The cfq exit - * function will iterate the full tree, so any member will do. - */ - r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); - if (r > 0) - cic[0]->dtor(ioc); + cic = list_entry(ioc->cic_list.first, struct cfq_io_context, + cic_list); + cic->dtor(ioc); + } } /* @@ -57,18 +53,16 @@ EXPORT_SYMBOL(put_io_context); static void cfq_exit(struct io_context *ioc) { - struct cfq_io_context *cic[1]; - int r; - rcu_read_lock(); - /* - * See comment for cfq_dtor() - */ - r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1); - rcu_read_unlock(); - if (r > 0) - cic[0]->exit(ioc); + if (!hlist_empty(&ioc->cic_list)) { + struct cfq_io_context *cic; + + cic = list_entry(ioc->cic_list.first, struct cfq_io_context, + cic_list); + cic->exit(ioc); + } + rcu_read_unlock(); } /* Called by the exitting task */ @@ -105,6 +99,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); + INIT_HLIST_HEAD(&ret->cic_list); ret->ioc_data = NULL; } @@ -176,7 +171,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc) } EXPORT_SYMBOL(copy_io_context); -int __init blk_ioc_init(void) +static int __init blk_ioc_init(void) { iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); diff --git a/block/blk-map.c b/block/blk-map.c index 955d75c..09f7fd0 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -19,6 +19,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; + rq->raw_data_len += bio->bi_size; rq->data_len += bio->bi_size; } return 0; @@ -139,10 +140,29 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ubuf += ret; } + /* + * __blk_rq_map_user() copies the buffers if starting address + * or length isn't aligned. As the copied buffer is always + * page aligned, we know that there's enough room for padding. + * Extend the last bio and update rq->data_len accordingly. + * + * On unmap, bio_uncopy_user() will use unmodified + * bio_map_data pointed to by bio->bi_private. + */ + if (len & queue_dma_alignment(q)) { + unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; + struct bio *bio = rq->biotail; + + bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; + bio->bi_size += pad_len; + rq->data_len += pad_len; + } + rq->buffer = rq->data = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); + rq->bio = NULL; return ret; } EXPORT_SYMBOL(blk_rq_map_user); diff --git a/block/blk-merge.c b/block/blk-merge.c index d3b84bb..7506c4f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -220,7 +220,10 @@ new_segment: bvprv = bvec; } /* segments in rq */ - if (q->dma_drain_size) { + if (q->dma_drain_size && q->dma_drain_needed(rq)) { + if (rq->cmd_flags & REQ_RW) + memset(q->dma_drain_buffer, 0, q->dma_drain_size); + sg->page_link &= ~0x02; sg = sg_next(sg); sg_set_page(sg, virt_to_page(q->dma_drain_buffer), @@ -228,6 +231,7 @@ new_segment: ((unsigned long)q->dma_drain_buffer) & (PAGE_SIZE - 1)); nsegs++; + rq->data_len += q->dma_drain_size; } if (sg) diff --git a/block/blk-settings.c b/block/blk-settings.c index c8d0c57..9a8ffdd 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); * blk_queue_dma_drain - Set up a drain buffer for excess dma. * * @q: the request queue for the device + * @dma_drain_needed: fn which returns non-zero if drain is necessary * @buf: physically contiguous buffer * @size: size of the buffer in bytes * @@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits); * device can support otherwise there won't be room for the drain * buffer. */ -int blk_queue_dma_drain(struct request_queue *q, void *buf, - unsigned int size) +extern int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size) { if (q->max_hw_segments < 2 || q->max_phys_segments < 2) return -EINVAL; /* make room for appending the drain */ --q->max_hw_segments; --q->max_phys_segments; + q->dma_drain_needed = dma_drain_needed; q->dma_drain_buffer = buf; q->dma_drain_size = size; @@ -386,7 +389,7 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) } EXPORT_SYMBOL(blk_queue_update_dma_alignment); -int __init blk_settings_init(void) +static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; blk_max_pfn = max_pfn - 1; diff --git a/block/bsg.c b/block/bsg.c index 8917c51..7f3c095 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, } if (rq->next_rq) { - hdr->dout_resid = rq->data_len; - hdr->din_resid = rq->next_rq->data_len; + hdr->dout_resid = rq->raw_data_len; + hdr->din_resid = rq->next_rq->raw_data_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) - hdr->din_resid = rq->data_len; + hdr->din_resid = rq->raw_data_len; else - hdr->dout_resid = rq->data_len; + hdr->dout_resid = rq->raw_data_len; /* * If the request generated a negative error number, return it diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ca198e6..0f962ec 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1145,38 +1145,19 @@ static void cfq_put_queue(struct cfq_queue *cfqq) /* * Call func for each cic attached to this ioc. Returns number of cic's seen. */ -#define CIC_GANG_NR 16 static unsigned int call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) { - struct cfq_io_context *cics[CIC_GANG_NR]; - unsigned long index = 0; - unsigned int called = 0; - int nr; + struct cfq_io_context *cic; + struct hlist_node *n; + int called = 0; rcu_read_lock(); - - do { - int i; - - /* - * Perhaps there's a better way - this just gang lookups from - * 0 to the end, restarting after each CIC_GANG_NR from the - * last key + 1. - */ - nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics, - index, CIC_GANG_NR); - if (!nr) - break; - - called += nr; - index = 1 + (unsigned long) cics[nr - 1]->key; - - for (i = 0; i < nr; i++) - func(ioc, cics[i]); - } while (nr == CIC_GANG_NR); - + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) { + func(ioc, cic); + called++; + } rcu_read_unlock(); return called; @@ -1190,6 +1171,7 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) spin_lock_irqsave(&ioc->lock, flags); radix_tree_delete(&ioc->radix_root, cic->dead_key); + hlist_del_rcu(&cic->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); kmem_cache_free(cfq_ioc_pool, cic); @@ -1280,6 +1262,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (cic) { cic->last_end_request = jiffies; INIT_LIST_HEAD(&cic->queue_list); + INIT_HLIST_NODE(&cic->cic_list); cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; elv_ioc_count_inc(ioc_count); @@ -1501,6 +1484,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, rcu_assign_pointer(ioc->ioc_data, NULL); radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); + hlist_del_rcu(&cic->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); cfq_cic_free(cic); @@ -1561,6 +1545,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, spin_lock_irqsave(&ioc->lock, flags); ret = radix_tree_insert(&ioc->radix_root, (unsigned long) cfqd, cic); + if (!ret) + hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); radix_tree_preload_end(); diff --git a/block/elevator.c b/block/elevator.c index bafbae0..88318c3 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -134,6 +134,21 @@ static struct elevator_type *elevator_get(const char *name) spin_lock(&elv_list_lock); e = elevator_find(name); + if (!e) { + char elv[ELV_NAME_MAX + strlen("-iosched")]; + + spin_unlock(&elv_list_lock); + + if (!strcmp(name, "anticipatory")) + sprintf(elv, "as-iosched"); + else + sprintf(elv, "%s-iosched", name); + + request_module(elv); + spin_lock(&elv_list_lock); + e = elevator_find(name); + } + if (e && !try_module_get(e->elevator_owner)) e = NULL; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 9675b34..e993cac 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; - hdr->resid = rq->data_len; + hdr->resid = rq->raw_data_len; hdr->sb_len_wr = 0; if (rq->sense_len && hdr->sbp) { @@ -528,6 +528,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, rq = blk_get_request(q, WRITE, __GFP_WAIT); rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->data = NULL; + rq->raw_data_len = 0; rq->data_len = 0; rq->timeout = BLK_DEFAULT_SG_TIMEOUT; memset(rq->cmd, 0, sizeof(rq->cmd)); |