From 484fc254b88257a2d8b3759aa062e8e8b35e0988 Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Thu, 8 Sep 2011 12:32:14 +0200 Subject: elevator: use ELV_NAME_MAX instead of magic number 16 for chosen_elevator We have ELV_NAME_MAX defined to 16, and hence we should use it instead of the magic nubmer 16 for elevator's name string. Signed-off-by: Wang Sheng-Hui Signed-off-by: Jens Axboe --- block/elevator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/elevator.c b/block/elevator.c index a3b64bc..cb332cb 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -182,7 +182,7 @@ static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, eq->elevator_data = data; } -static char chosen_elevator[16]; +static char chosen_elevator[ELV_NAME_MAX]; static int __init elevator_setup(char *str) { -- cgit v1.1 From 166e1f901b01872e8b70733a3f2e2c6980389cf8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Sep 2011 12:08:27 +0200 Subject: block: export __make_request Avoid the hacks need for request based device mappers currently by simply exporting the symbol instead of trying to get it through the back door. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index b627558..56ef387 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -38,8 +38,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); -static int __make_request(struct request_queue *q, struct bio *bio); - /* * For the allocated request tables */ @@ -1213,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) blk_rq_bio_prep(req->q, req, bio); } -static int __make_request(struct request_queue *q, struct bio *bio) +int __make_request(struct request_queue *q, struct bio *bio) { const bool sync = !!(bio->bi_rw & REQ_SYNC); struct blk_plug *plug; @@ -1317,6 +1315,7 @@ out_unlock: out: return 0; } +EXPORT_SYMBOL_GPL(__make_request); /* for device mapper only */ /* * If bio->bi_dev is a partition, remap the location -- cgit v1.1 From c20e8de27fef9f59869c81c288ad6cf28200e00c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 12 Sep 2011 12:03:37 +0200 Subject: block: rename __make_request() to blk_queue_bio() Now that it's exported, lets put it in a more sane namespace. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 56ef387..ab673f0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -540,7 +540,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, /* * This also sets hw/phys segments, boundary and size */ - blk_queue_make_request(q, __make_request); + blk_queue_make_request(q, blk_queue_bio); q->sg_reserved_size = INT_MAX; @@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) blk_rq_bio_prep(req->q, req, bio); } -int __make_request(struct request_queue *q, struct bio *bio) +int blk_queue_bio(struct request_queue *q, struct bio *bio) { const bool sync = !!(bio->bi_rw & REQ_SYNC); struct blk_plug *plug; @@ -1315,7 +1315,7 @@ out_unlock: out: return 0; } -EXPORT_SYMBOL_GPL(__make_request); /* for device mapper only */ +EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ /* * If bio->bi_dev is a partition, remap the location -- cgit v1.1 From 5a7bbad27a410350e64a2d7f5ec18fc73836c14f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Sep 2011 12:12:01 +0200 Subject: block: remove support for bio remapping from ->make_request There is very little benefit in allowing to let a ->make_request instance update the bios device and sector and loop around it in __generic_make_request when we can archive the same through calling generic_make_request from the driver and letting the loop in generic_make_request handle it. Note that various drivers got the return value from ->make_request and returned non-zero values for errors. Signed-off-by: Christoph Hellwig Acked-by: NeilBrown Signed-off-by: Jens Axboe --- block/blk-core.c | 153 ++++++++++++++++++++++--------------------------------- 1 file changed, 62 insertions(+), 91 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index ab673f0..f58e019 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) blk_rq_bio_prep(req->q, req, bio); } -int blk_queue_bio(struct request_queue *q, struct bio *bio) +void blk_queue_bio(struct request_queue *q, struct bio *bio) { const bool sync = !!(bio->bi_rw & REQ_SYNC); struct blk_plug *plug; @@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio) * any locks. */ if (attempt_plug_merge(current, q, bio)) - goto out; + return; spin_lock_irq(q->queue_lock); @@ -1312,8 +1312,6 @@ get_rq: out_unlock: spin_unlock_irq(q->queue_lock); } -out: - return 0; } EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ @@ -1441,112 +1439,85 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) static inline void __generic_make_request(struct bio *bio) { struct request_queue *q; - sector_t old_sector; - int ret, nr_sectors = bio_sectors(bio); - dev_t old_dev; + int nr_sectors = bio_sectors(bio); int err = -EIO; + char b[BDEVNAME_SIZE]; + struct hd_struct *part; might_sleep(); if (bio_check_eod(bio, nr_sectors)) goto end_io; - /* - * Resolve the mapping until finished. (drivers are - * still free to implement/resolve their own stacking - * by explicitly returning 0) - * - * NOTE: we don't repeat the blk_size check for each new device. - * Stacking drivers are expected to know what they are doing. - */ - old_sector = -1; - old_dev = 0; - do { - char b[BDEVNAME_SIZE]; - struct hd_struct *part; - - q = bdev_get_queue(bio->bi_bdev); - if (unlikely(!q)) { - printk(KERN_ERR - "generic_make_request: Trying to access " - "nonexistent block-device %s (%Lu)\n", - bdevname(bio->bi_bdev, b), - (long long) bio->bi_sector); - goto end_io; - } - - if (unlikely(!(bio->bi_rw & REQ_DISCARD) && - nr_sectors > queue_max_hw_sectors(q))) { - printk(KERN_ERR "bio too big device %s (%u > %u)\n", - bdevname(bio->bi_bdev, b), - bio_sectors(bio), - queue_max_hw_sectors(q)); - goto end_io; - } - - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) - goto end_io; - - part = bio->bi_bdev->bd_part; - if (should_fail_request(part, bio->bi_size) || - should_fail_request(&part_to_disk(part)->part0, - bio->bi_size)) - goto end_io; + q = bdev_get_queue(bio->bi_bdev); + if (unlikely(!q)) { + printk(KERN_ERR + "generic_make_request: Trying to access " + "nonexistent block-device %s (%Lu)\n", + bdevname(bio->bi_bdev, b), + (long long) bio->bi_sector); + goto end_io; + } - /* - * If this device has partitions, remap block n - * of partition p to block n+start(p) of the disk. - */ - blk_partition_remap(bio); + if (unlikely(!(bio->bi_rw & REQ_DISCARD) && + nr_sectors > queue_max_hw_sectors(q))) { + printk(KERN_ERR "bio too big device %s (%u > %u)\n", + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); + goto end_io; + } - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) - goto end_io; + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + goto end_io; - if (old_sector != -1) - trace_block_bio_remap(q, bio, old_dev, old_sector); + part = bio->bi_bdev->bd_part; + if (should_fail_request(part, bio->bi_size) || + should_fail_request(&part_to_disk(part)->part0, + bio->bi_size)) + goto end_io; - old_sector = bio->bi_sector; - old_dev = bio->bi_bdev->bd_dev; + /* + * If this device has partitions, remap block n + * of partition p to block n+start(p) of the disk. + */ + blk_partition_remap(bio); - if (bio_check_eod(bio, nr_sectors)) - goto end_io; + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) + goto end_io; - /* - * Filter flush bio's early so that make_request based - * drivers without flush support don't have to worry - * about them. - */ - if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { - bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); - if (!nr_sectors) { - err = 0; - goto end_io; - } - } + if (bio_check_eod(bio, nr_sectors)) + goto end_io; - if ((bio->bi_rw & REQ_DISCARD) && - (!blk_queue_discard(q) || - ((bio->bi_rw & REQ_SECURE) && - !blk_queue_secdiscard(q)))) { - err = -EOPNOTSUPP; + /* + * Filter flush bio's early so that make_request based + * drivers without flush support don't have to worry + * about them. + */ + if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { + bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); + if (!nr_sectors) { + err = 0; goto end_io; } + } - if (blk_throtl_bio(q, &bio)) - goto end_io; - - /* - * If bio = NULL, bio has been throttled and will be submitted - * later. - */ - if (!bio) - break; - - trace_block_bio_queue(q, bio); + if ((bio->bi_rw & REQ_DISCARD) && + (!blk_queue_discard(q) || + ((bio->bi_rw & REQ_SECURE) && + !blk_queue_secdiscard(q)))) { + err = -EOPNOTSUPP; + goto end_io; + } - ret = q->make_request_fn(q, bio); - } while (ret); + if (blk_throtl_bio(q, &bio)) + goto end_io; + /* if bio = NULL, bio has been throttled and will be submitted later. */ + if (!bio) + return; + trace_block_bio_queue(q, bio); + q->make_request_fn(q, bio); return; end_io: -- cgit v1.1 From 27a84d54c02591e815d291ae0ee4bfb9cfd21065 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 15 Sep 2011 14:01:40 +0200 Subject: block: refactor generic_make_request Move all the checks performed on a bio into a new helper, and call it as soon as bio is submitted even if it is a re-submission from ->make_request. We explicitly mark the new helper as beeing non-inlined as the stack usage for printing the block device name in the failure case is quite high and this a patch where we have to be extremely conservative about stack usage. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 95 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 46 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index f58e019..684d7eb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1412,31 +1412,8 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) return 0; } -/** - * generic_make_request - hand a buffer to its device driver for I/O - * @bio: The bio describing the location in memory and on the device. - * - * generic_make_request() is used to make I/O requests of block - * devices. It is passed a &struct bio, which describes the I/O that needs - * to be done. - * - * generic_make_request() does not return any status. The - * success/failure status of the request, along with notification of - * completion, is delivered asynchronously through the bio->bi_end_io - * function described (one day) else where. - * - * The caller of generic_make_request must make sure that bi_io_vec - * are set to describe the memory buffer, and that bi_dev and bi_sector are - * set to describe the device address, and the - * bi_end_io and optionally bi_private are set to describe how - * completion notification should be signaled. - * - * generic_make_request and the drivers it calls may use bi_next if this - * bio happens to be merged with someone else, and may change bi_dev and - * bi_sector for remaps as it sees fit. So the values of these fields - * should NOT be depended on after the call to generic_make_request. - */ -static inline void __generic_make_request(struct bio *bio) +static noinline_for_stack bool +generic_make_request_checks(struct bio *bio) { struct request_queue *q; int nr_sectors = bio_sectors(bio); @@ -1515,35 +1492,62 @@ static inline void __generic_make_request(struct bio *bio) /* if bio = NULL, bio has been throttled and will be submitted later. */ if (!bio) - return; + return false; + trace_block_bio_queue(q, bio); - q->make_request_fn(q, bio); - return; + return true; end_io: bio_endio(bio, err); + return false; } -/* - * We only want one ->make_request_fn to be active at a time, - * else stack usage with stacked devices could be a problem. - * So use current->bio_list to keep a list of requests - * submited by a make_request_fn function. - * current->bio_list is also used as a flag to say if - * generic_make_request is currently active in this task or not. - * If it is NULL, then no make_request is active. If it is non-NULL, - * then a make_request is active, and new requests should be added - * at the tail +/** + * generic_make_request - hand a buffer to its device driver for I/O + * @bio: The bio describing the location in memory and on the device. + * + * generic_make_request() is used to make I/O requests of block + * devices. It is passed a &struct bio, which describes the I/O that needs + * to be done. + * + * generic_make_request() does not return any status. The + * success/failure status of the request, along with notification of + * completion, is delivered asynchronously through the bio->bi_end_io + * function described (one day) else where. + * + * The caller of generic_make_request must make sure that bi_io_vec + * are set to describe the memory buffer, and that bi_dev and bi_sector are + * set to describe the device address, and the + * bi_end_io and optionally bi_private are set to describe how + * completion notification should be signaled. + * + * generic_make_request and the drivers it calls may use bi_next if this + * bio happens to be merged with someone else, and may resubmit the bio to + * a lower device by calling into generic_make_request recursively, which + * means the bio should NOT be touched after the call to ->make_request_fn. */ void generic_make_request(struct bio *bio) { struct bio_list bio_list_on_stack; + if (!generic_make_request_checks(bio)) + return; + + /* + * We only want one ->make_request_fn to be active at a time, else + * stack usage with stacked devices could be a problem. So use + * current->bio_list to keep a list of requests submited by a + * make_request_fn function. current->bio_list is also used as a + * flag to say if generic_make_request is currently active in this + * task or not. If it is NULL, then no make_request is active. If + * it is non-NULL, then a make_request is active, and new requests + * should be added at the tail + */ if (current->bio_list) { - /* make_request is active */ bio_list_add(current->bio_list, bio); return; } + /* following loop may be a bit non-obvious, and so deserves some * explanation. * Before entering the loop, bio->bi_next is NULL (as all callers @@ -1551,22 +1555,21 @@ void generic_make_request(struct bio *bio) * We pretend that we have just taken it off a longer list, so * we assign bio_list to a pointer to the bio_list_on_stack, * thus initialising the bio_list of new bios to be - * added. __generic_make_request may indeed add some more bios + * added. ->make_request() may indeed add some more bios * through a recursive call to generic_make_request. If it * did, we find a non-NULL value in bio_list and re-enter the loop * from the top. In this case we really did just take the bio * of the top of the list (no pretending) and so remove it from - * bio_list, and call into __generic_make_request again. - * - * The loop was structured like this to make only one call to - * __generic_make_request (which is important as it is large and - * inlined) and to keep the structure simple. + * bio_list, and call into ->make_request() again. */ BUG_ON(bio->bi_next); bio_list_init(&bio_list_on_stack); current->bio_list = &bio_list_on_stack; do { - __generic_make_request(bio); + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + + q->make_request_fn(q, bio); + bio = bio_list_pop(current->bio_list); } while (bio); current->bio_list = NULL; /* deactivate */ -- cgit v1.1 From 75df713627f28f88b901b329c8857747545fd4ab Mon Sep 17 00:00:00 2001 From: Suresh Jayaraman Date: Wed, 21 Sep 2011 10:00:16 +0200 Subject: block: document blk-plug Thus spake Andrew Morton: "And I have the usual maintainability whine. If someone comes up to vmscan.c and sees it calling blk_start_plug(), how are they supposed to work out why that call is there? They go look at the blk_start_plug() definition and it is undocumented. I think we can do better than this?" Adapted from the LWN article - http://lwn.net/Articles/438256/ by Jens Axboe and from an earlier attempt by Shaohua Li to document blk-plug. [akpm@linux-foundation.org: grammatical and spelling tweaks] Signed-off-by: Suresh Jayaraman Cc: Shaohua Li Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- block/blk-core.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 684d7eb..97e9e54 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2595,6 +2595,20 @@ EXPORT_SYMBOL(kblockd_schedule_delayed_work); #define PLUG_MAGIC 0x91827364 +/** + * blk_start_plug - initialize blk_plug and track it inside the task_struct + * @plug: The &struct blk_plug that needs to be initialized + * + * Description: + * Tracking blk_plug inside the task_struct will help with auto-flushing the + * pending I/O should the task end up blocking between blk_start_plug() and + * blk_finish_plug(). This is important from a performance perspective, but + * also ensures that we don't deadlock. For instance, if the task is blocking + * for a memory allocation, memory reclaim could end up wanting to free a + * page belonging to that request that is currently residing in our private + * plug. By flushing the pending I/O when the process goes to sleep, we avoid + * this kind of deadlock. + */ void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; -- cgit v1.1 From 499337bb6511e665a236a6a947f819d98ea340c6 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 21 Sep 2011 10:01:22 +0200 Subject: block/blk-sysfs.c: fix kerneldoc references The kerneldoc for blk_release_queue() is referring to blk_cleanup_queue(). Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 0ee17b5..adc923e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -455,11 +455,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, } /** - * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed - * @kobj: the kobj belonging of the request queue to be released + * blk_release_queue: - release a &struct request_queue when it is no longer needed + * @kobj: the kobj belonging to the request queue to be released * * Description: - * blk_cleanup_queue is the pair to blk_init_queue() or + * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request -- cgit v1.1 From 523e1d399ce0e23bec562abe2b2f8d297af81161 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:31:07 +0200 Subject: block: make gendisk hold a reference to its queue The following command sequence triggers an oops. # mount /dev/sdb1 /mnt # echo 1 > /sys/class/scsi_device/0\:0\:1\:0/device/delete # umount /mnt general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: Pid: 791, comm: umount Not tainted 3.1.0-rc3-work+ #8 Bochs Bochs RIP: 0010:[] [] __lock_acquire+0x389/0x1d60 ... Call Trace: [] lock_acquire+0x95/0x140 [] _raw_spin_lock+0x3b/0x50 [] bdi_lock_two+0x5c/0x70 [] bdev_inode_switch_bdi+0x4c/0xf0 [] __blkdev_put+0x11b/0x1d0 [] __blkdev_put+0x160/0x1d0 [] blkdev_put+0x5f/0x190 [] kill_block_super+0x4d/0x80 [] deactivate_locked_super+0x45/0x70 [] deactivate_super+0x4a/0x70 [] mntput_no_expire+0xed/0x130 [] sys_umount+0x7e/0x3a0 [] system_call_fastpath+0x16/0x1b This is because bdev holds on to disk but disk doesn't pin the associated queue. If a SCSI device is removed while the device is still open, the sdev puts the base reference to the queue on release. When the bdev is finally released, the associated queue is already gone along with the bdi and bdev_inode_switch_bdi() ends up dereferencing already freed bdi. Even if it were not for this bug, disk not holding onto the associated queue is very unusual and error-prone. Fix it by making add_disk() take an extra reference to its queue and put it on disk_release() and ensuring that disk and its fops owner are put in that order after all accesses to the disk and queue are complete. Signed-off-by: Tejun Heo Cc: stable@kernel.org Signed-off-by: Jens Axboe --- block/genhd.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index e2f6790..d261b73 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -611,6 +611,12 @@ void add_disk(struct gendisk *disk) register_disk(disk); blk_register_queue(disk); + /* + * Take an extra ref on queue which will be put on disk_release() + * so that it sticks around as long as @disk is there. + */ + WARN_ON_ONCE(blk_get_queue(disk->queue)); + retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, "bdi"); WARN_ON(retval); @@ -1095,6 +1101,8 @@ static void disk_release(struct device *dev) disk_replace_part_tbl(disk, NULL); free_part_stats(&disk->part0); free_part_info(&disk->part0); + if (disk->queue) + blk_put_queue(disk->queue); kfree(disk); } struct class block_class = { -- cgit v1.1 From ece84241b93c4693bfe0a8ec9a043a16d216d0cd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:31:15 +0200 Subject: block: fix genhd refcounting in blkio_policy_parse_and_set() blkio_policy_parse_and_set() calls blkio_check_dev_num() to check whether the given dev_t is valid. blkio_check_dev_num() uses get_gendisk() for verification but never puts the returned genhd leaking the reference. This patch collapses blkio_check_dev_num() into its caller and updates it such that the genhd is put before returning. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 56 ++++++++++++++++++++++-------------------------------- 1 file changed, 23 insertions(+), 33 deletions(-) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b596e54..d61ec56 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -768,25 +768,14 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, return disk_total; } -static int blkio_check_dev_num(dev_t dev) -{ - int part = 0; - struct gendisk *disk; - - disk = get_gendisk(dev, &part); - if (!disk || part) - return -ENODEV; - - return 0; -} - static int blkio_policy_parse_and_set(char *buf, struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid) { + struct gendisk *disk = NULL; char *s[4], *p, *major_s = NULL, *minor_s = NULL; - int ret; unsigned long major, minor; - int i = 0; + int i = 0, ret = -EINVAL; + int part; dev_t dev; u64 temp; @@ -804,37 +793,36 @@ static int blkio_policy_parse_and_set(char *buf, } if (i != 2) - return -EINVAL; + goto out; p = strsep(&s[0], ":"); if (p != NULL) major_s = p; else - return -EINVAL; + goto out; minor_s = s[0]; if (!minor_s) - return -EINVAL; + goto out; - ret = strict_strtoul(major_s, 10, &major); - if (ret) - return -EINVAL; + if (strict_strtoul(major_s, 10, &major)) + goto out; - ret = strict_strtoul(minor_s, 10, &minor); - if (ret) - return -EINVAL; + if (strict_strtoul(minor_s, 10, &minor)) + goto out; dev = MKDEV(major, minor); - ret = strict_strtoull(s[1], 10, &temp); - if (ret) - return -EINVAL; + if (strict_strtoull(s[1], 10, &temp)) + goto out; /* For rule removal, do not check for device presence. */ if (temp) { - ret = blkio_check_dev_num(dev); - if (ret) - return ret; + disk = get_gendisk(dev, &part); + if (!disk || part) { + ret = -ENODEV; + goto out; + } } newpn->dev = dev; @@ -843,7 +831,7 @@ static int blkio_policy_parse_and_set(char *buf, case BLKIO_POLICY_PROP: if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || temp > BLKIO_WEIGHT_MAX) - return -EINVAL; + goto out; newpn->plid = plid; newpn->fileid = fileid; @@ -860,7 +848,7 @@ static int blkio_policy_parse_and_set(char *buf, case BLKIO_THROTL_read_iops_device: case BLKIO_THROTL_write_iops_device: if (temp > THROTL_IOPS_MAX) - return -EINVAL; + goto out; newpn->plid = plid; newpn->fileid = fileid; @@ -871,8 +859,10 @@ static int blkio_policy_parse_and_set(char *buf, default: BUG(); } - - return 0; + ret = 0; +out: + put_disk(disk); + return ret; } unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, -- cgit v1.1 From bc9fcbf9cb8ec76d340da16fbf48a9a316e14c52 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:31:18 +0200 Subject: block: move blk_throtl prototypes to block/blk.h blk_throtl interface is block internal and there's no reason to have them in linux/blkdev.h. Move them to block/blk.h. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-throttle.c | 1 + block/blk.h | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a19f58c..f3f495e 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -10,6 +10,7 @@ #include #include #include "blk-cgroup.h" +#include "blk.h" /* Max dispatch from a group in 1 round */ static int throtl_grp_quantum = 8; diff --git a/block/blk.h b/block/blk.h index 20b900a..da247ba 100644 --- a/block/blk.h +++ b/block/blk.h @@ -188,4 +188,17 @@ static inline int blk_do_io_stat(struct request *rq) (rq->cmd_flags & REQ_DISCARD)); } -#endif +#ifdef CONFIG_BLK_DEV_THROTTLING +extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); +extern int blk_throtl_init(struct request_queue *q); +extern void blk_throtl_exit(struct request_queue *q); +#else /* CONFIG_BLK_DEV_THROTTLING */ +static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) +{ + return 0; +} +static inline int blk_throtl_init(struct request_queue *q) { return 0; } +static inline void blk_throtl_exit(struct request_queue *q) { } +#endif /* CONFIG_BLK_DEV_THROTTLING */ + +#endif /* BLK_INTERNAL_H */ -- cgit v1.1 From 75eb6c372d41d6d140b893873f6687d78c987a44 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:31:22 +0200 Subject: block: pass around REQ_* flags instead of broken down booleans during request alloc/free blk_alloc_request() and freed_request() take different combinations of REQ_* @flags, @priv and @is_sync when @flags is superset of the latter two. Make them take @flags only. This cleans up the code a bit and will ease updating allocation related REQ_* flags. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 79e41a7..a3d2fdc 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -574,7 +574,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) } static struct request * -blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) +blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -585,12 +585,10 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) rq->cmd_flags = flags | REQ_ALLOCED; - if (priv) { - if (unlikely(elv_set_request(q, rq, gfp_mask))) { - mempool_free(rq, q->rq.rq_pool); - return NULL; - } - rq->cmd_flags |= REQ_ELVPRIV; + if ((flags & REQ_ELVPRIV) && + unlikely(elv_set_request(q, rq, gfp_mask))) { + mempool_free(rq, q->rq.rq_pool); + return NULL; } return rq; @@ -649,12 +647,13 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, int sync, int priv) +static void freed_request(struct request_queue *q, unsigned int flags) { struct request_list *rl = &q->rq; + int sync = rw_is_sync(flags); rl->count[sync]--; - if (priv) + if (flags & REQ_ELVPRIV) rl->elvpriv--; __freed_request(q, sync); @@ -694,7 +693,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, struct request_list *rl = &q->rq; struct io_context *ioc = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - int may_queue, priv = 0; + int may_queue; may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) @@ -738,17 +737,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags, rl->count[is_sync]++; rl->starved[is_sync] = 0; - if (blk_rq_should_init_elevator(bio)) { - priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); - if (priv) - rl->elvpriv++; + if (blk_rq_should_init_elevator(bio) && + !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { + rw_flags |= REQ_ELVPRIV; + rl->elvpriv++; } if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); + rq = blk_alloc_request(q, rw_flags, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -758,7 +757,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * wait queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, is_sync, priv); + freed_request(q, rw_flags); /* * in the very unlikely event that allocation failed and no @@ -1050,14 +1049,13 @@ void __blk_put_request(struct request_queue *q, struct request *req) * it didn't come out of our reserved rq pools */ if (req->cmd_flags & REQ_ALLOCED) { - int is_sync = rq_is_sync(req) != 0; - int priv = req->cmd_flags & REQ_ELVPRIV; + unsigned int flags = req->cmd_flags; BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); blk_free_request(q, req); - freed_request(q, is_sync, priv); + freed_request(q, flags); } } EXPORT_SYMBOL_GPL(__blk_put_request); -- cgit v1.1 From 315fceee81155ef2aeed9316ca72aeea9347db5c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:31:25 +0200 Subject: block: drop unnecessary blk_get/put_queue() in scsi_cmd_ioctl() and blk_get_tg() blk_get/put_queue() in scsi_cmd_ioctl() and throtl_get_tg() are completely bogus. The caller must have a reference to the queue on entry and taking an extra reference doesn't change anything. For scsi_cmd_ioctl(), the only effect is that it ends up checking QUEUE_FLAG_DEAD on entry; however, this is bogus as queue can die right after blk_get_queue(). Dead queue should be and is handled in request issue path (it's somewhat broken now but that's a separate problem and doesn't affect this one much). throtl_get_tg() incorrectly assumes that q is rcu freed. Also, it doesn't check return value of blk_get_queue(). If the queue is already dead, it ends up doing an extra put. Drop them. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-throttle.c | 8 +------- block/scsi_ioctl.c | 3 +-- 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index f3f495e..ecba5fc 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -324,12 +324,8 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) /* * Need to allocate a group. Allocation of group also needs allocation * of per cpu stats which in-turn takes a mutex() and can block. Hence - * we need to drop rcu lock and queue_lock before we call alloc - * - * Take the request queue reference to make sure queue does not - * go away once we return from allocation. + * we need to drop rcu lock and queue_lock before we call alloc. */ - blk_get_queue(q); rcu_read_unlock(); spin_unlock_irq(q->queue_lock); @@ -339,13 +335,11 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) * dead */ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { - blk_put_queue(q); if (tg) kfree(tg); return ERR_PTR(-ENODEV); } - blk_put_queue(q); /* Group allocated and queue is still alive. take the lock */ spin_lock_irq(q->queue_lock); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 4f4230b..fbdf0d8 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -565,7 +565,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod { int err; - if (!q || blk_get_queue(q)) + if (!q) return -ENXIO; switch (cmd) { @@ -686,7 +686,6 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod err = -ENOTTY; } - blk_put_queue(q); return err; } EXPORT_SYMBOL(scsi_cmd_ioctl); -- cgit v1.1 From e3c78ca524d230bc145e902625e88c392a58ddf3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:32:38 +0200 Subject: block: reorganize queue draining Reorganize queue draining related code in preparation of queue exit changes. * Factor out actual draining from elv_quiesce_start() to blk_drain_queue(). * Make elv_quiesce_start/end() responsible for their own locking. * Replace open-coded ELVSWITCH clearing in elevator_switch() with elv_quiesce_end(). This patch doesn't cause any visible functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 28 ++++++++++++++++++++++++++++ block/blk.h | 1 + block/elevator.c | 37 +++++++++++-------------------------- 3 files changed, 40 insertions(+), 26 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index a3d2fdc..149149d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -28,6 +28,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -345,6 +346,33 @@ void blk_put_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_put_queue); +/** + * blk_drain_queue - drain requests from request_queue + * @q: queue to drain + * + * Drain ELV_PRIV requests from @q. The caller is responsible for ensuring + * that no new requests which need to be drained are queued. + */ +void blk_drain_queue(struct request_queue *q) +{ + while (true) { + int nr_rqs; + + spin_lock_irq(q->queue_lock); + + elv_drain_elevator(q); + + __blk_run_queue(q); + nr_rqs = q->rq.elvpriv; + + spin_unlock_irq(q->queue_lock); + + if (!nr_rqs) + break; + msleep(10); + } +} + /* * Note: If a driver supplied the queue lock, it is disconnected * by this function. The actual state of the lock doesn't matter diff --git a/block/blk.h b/block/blk.h index da247ba..2b66dc21 100644 --- a/block/blk.h +++ b/block/blk.h @@ -15,6 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio); +void blk_drain_queue(struct request_queue *q); void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); bool __blk_end_bidi_request(struct request *rq, int error, diff --git a/block/elevator.c b/block/elevator.c index cb332cb..74a277f 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -606,43 +605,35 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) void elv_drain_elevator(struct request_queue *q) { static int printed; + + lockdep_assert_held(q->queue_lock); + while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ; - if (q->nr_sorted == 0) - return; - if (printed++ < 10) { + if (q->nr_sorted && printed++ < 10) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this\n", q->elevator->elevator_type->elevator_name, q->nr_sorted); } } -/* - * Call with queue lock held, interrupts disabled - */ void elv_quiesce_start(struct request_queue *q) { if (!q->elevator) return; + spin_lock_irq(q->queue_lock); queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); + spin_unlock_irq(q->queue_lock); - /* - * make sure we don't have any requests in flight - */ - elv_drain_elevator(q); - while (q->rq.elvpriv) { - __blk_run_queue(q); - spin_unlock_irq(q->queue_lock); - msleep(10); - spin_lock_irq(q->queue_lock); - elv_drain_elevator(q); - } + blk_drain_queue(q); } void elv_quiesce_end(struct request_queue *q) { + spin_lock_irq(q->queue_lock); queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); + spin_unlock_irq(q->queue_lock); } void __elv_add_request(struct request_queue *q, struct request *rq, int where) @@ -972,7 +963,6 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) /* * Turn on BYPASS and drain all requests w/ elevator private data */ - spin_lock_irq(q->queue_lock); elv_quiesce_start(q); /* @@ -983,8 +973,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) /* * attach and start new elevator */ + spin_lock_irq(q->queue_lock); elevator_attach(q, e, data); - spin_unlock_irq(q->queue_lock); if (old_elevator->registered) { @@ -999,9 +989,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) * finally exit old elevator and turn off BYPASS. */ elevator_exit(old_elevator); - spin_lock_irq(q->queue_lock); elv_quiesce_end(q); - spin_unlock_irq(q->queue_lock); blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); @@ -1015,10 +1003,7 @@ fail_register: elevator_exit(e); q->elevator = old_elevator; elv_register_queue(q); - - spin_lock_irq(q->queue_lock); - queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); - spin_unlock_irq(q->queue_lock); + elv_quiesce_end(q); return err; } -- cgit v1.1 From bc16a4f933bc5ed50826b20561e4c3515061998b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:33:01 +0200 Subject: block: reorganize throtl_get_tg() and blk_throtl_bio() blk_throtl_bio() and throtl_get_tg() have rather unusual interface. * throtl_get_tg() returns pointer to a valid tg or ERR_PTR(-ENODEV), and drops queue_lock in the latter case. Different locking context depending on return value is error-prone and DEAD state is scheduled to be protected by queue_lock anyway. Move DEAD check inside queue_lock and return valid tg or NULL. * blk_throtl_bio() indicates return status both with its return value and in/out param **@bio. The former is used to indicate whether queue is found to be dead during throtl processing. The latter whether the bio is throttled. There's no point in returning DEAD check result from blk_throtl_bio(). The queue can die after blk_throtl_bio() is finished but before make_request_fn() grabs queue lock. Make it take *@bio instead and return boolean result indicating whether the request is throttled or not. This patch doesn't cause any visible functional difference. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-core.c | 8 ++------ block/blk-throttle.c | 49 +++++++++++++++++-------------------------------- block/blk.h | 6 +++--- 3 files changed, 22 insertions(+), 41 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 149149d..6c491f2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1515,12 +1515,8 @@ generic_make_request_checks(struct bio *bio) goto end_io; } - if (blk_throtl_bio(q, &bio)) - goto end_io; - - /* if bio = NULL, bio has been throttled and will be submitted later. */ - if (!bio) - return false; + if (blk_throtl_bio(q, bio)) + return false; /* throttled, will be resubmitted later */ trace_block_bio_queue(q, bio); return true; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index ecba5fc..900a0c9 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -303,10 +303,6 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) return tg; } -/* - * This function returns with queue lock unlocked in case of error, like - * request queue is no more - */ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) { struct throtl_grp *tg = NULL, *__tg = NULL; @@ -330,20 +326,16 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) spin_unlock_irq(q->queue_lock); tg = throtl_alloc_tg(td); - /* - * We might have slept in group allocation. Make sure queue is not - * dead - */ - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { - if (tg) - kfree(tg); - - return ERR_PTR(-ENODEV); - } /* Group allocated and queue is still alive. take the lock */ spin_lock_irq(q->queue_lock); + /* Make sure @q is still alive */ + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { + kfree(tg); + return NULL; + } + /* * Initialize the new group. After sleeping, read the blkcg again. */ @@ -1118,17 +1110,17 @@ static struct blkio_policy_type blkio_policy_throtl = { .plid = BLKIO_POLICY_THROTL, }; -int blk_throtl_bio(struct request_queue *q, struct bio **biop) +bool blk_throtl_bio(struct request_queue *q, struct bio *bio) { struct throtl_data *td = q->td; struct throtl_grp *tg; - struct bio *bio = *biop; bool rw = bio_data_dir(bio), update_disptime = true; struct blkio_cgroup *blkcg; + bool throttled = false; if (bio->bi_rw & REQ_THROTTLED) { bio->bi_rw &= ~REQ_THROTTLED; - return 0; + goto out; } /* @@ -1147,7 +1139,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, rw_is_sync(bio->bi_rw)); rcu_read_unlock(); - return 0; + goto out; } } rcu_read_unlock(); @@ -1156,18 +1148,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) * Either group has not been allocated yet or it is not an unlimited * IO group */ - spin_lock_irq(q->queue_lock); tg = throtl_get_tg(td); - - if (IS_ERR(tg)) { - if (PTR_ERR(tg) == -ENODEV) { - /* - * Queue is gone. No queue lock held here. - */ - return -ENODEV; - } - } + if (unlikely(!tg)) + goto out_unlock; if (tg->nr_queued[rw]) { /* @@ -1195,7 +1179,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) * So keep on trimming slice even if bio is not queued. */ throtl_trim_slice(td, tg, rw); - goto out; + goto out_unlock; } queue_bio: @@ -1207,16 +1191,17 @@ queue_bio: tg->nr_queued[READ], tg->nr_queued[WRITE]); throtl_add_bio_tg(q->td, tg, bio); - *biop = NULL; + throttled = true; if (update_disptime) { tg_update_disptime(td, tg); throtl_schedule_next_dispatch(td); } -out: +out_unlock: spin_unlock_irq(q->queue_lock); - return 0; +out: + return throttled; } int blk_throtl_init(struct request_queue *q) diff --git a/block/blk.h b/block/blk.h index 2b66dc21..c018dba 100644 --- a/block/blk.h +++ b/block/blk.h @@ -190,13 +190,13 @@ static inline int blk_do_io_stat(struct request *rq) } #ifdef CONFIG_BLK_DEV_THROTTLING -extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); +extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); #else /* CONFIG_BLK_DEV_THROTTLING */ -static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) +static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) { - return 0; + return false; } static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline void blk_throtl_exit(struct request_queue *q) { } -- cgit v1.1 From da8303c63b8de73619884382d6e573d44aae0810 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:33:05 +0200 Subject: block: make get_request[_wait]() fail if queue is dead Currently get_request[_wait]() allocates request whether queue is dead or not. This patch makes get_request[_wait]() return NULL if @q is dead. blk_queue_bio() is updated to fail the submitted bio if request allocation fails. While at it, add docbook comments for get_request[_wait](). Note that the current code has rather unclear (there are spurious DEAD tests scattered around) assumption that the owner of a queue guarantees that no request travels block layer if the queue is dead and this patch in itself doesn't change much; however, this will allow fixing the broken assumption in the next patch. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 54 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 16 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 6c491f2..3508751 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -709,10 +709,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio) return true; } -/* - * Get a free request, queue_lock must be held. - * Returns NULL on failure, with queue_lock held. - * Returns !NULL on success, with queue_lock *not held*. +/** + * get_request - get a free request + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask + * + * Get a free request from @q. This function may fail under memory + * pressure or if @q is dead. + * + * Must be callled with @q->queue_lock held and, + * Returns %NULL on failure, with @q->queue_lock held. + * Returns !%NULL on success, with @q->queue_lock *not held*. */ static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) @@ -723,6 +732,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, const bool is_sync = rw_is_sync(rw_flags) != 0; int may_queue; + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + return NULL; + may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; @@ -815,11 +827,18 @@ out: return rq; } -/* - * No available requests for this queue, wait for some requests to become - * available. +/** + * get_request_wait - get a free request with retry + * @q: request_queue to allocate request from + * @rw_flags: RW and SYNC flags + * @bio: bio to allocate request for (can be %NULL) * - * Called with q->queue_lock held, and returns with it unlocked. + * Get a free request from @q. This function keeps retrying under memory + * pressure and fails iff @q is dead. + * + * Must be callled with @q->queue_lock held and, + * Returns %NULL on failure, with @q->queue_lock held. + * Returns !%NULL on success, with @q->queue_lock *not held*. */ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) @@ -833,6 +852,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct io_context *ioc; struct request_list *rl = &q->rq; + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + return NULL; + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, TASK_UNINTERRUPTIBLE); @@ -863,19 +885,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { struct request *rq; - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) - return NULL; - BUG_ON(rw != READ && rw != WRITE); spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) { + if (gfp_mask & __GFP_WAIT) rq = get_request_wait(q, rw, NULL); - } else { + else rq = get_request(q, rw, NULL, gfp_mask); - if (!rq) - spin_unlock_irq(q->queue_lock); - } + if (!rq) + spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ return rq; @@ -1299,6 +1317,10 @@ get_rq: * Returns with the queue unlocked. */ req = get_request_wait(q, rw_flags, bio); + if (unlikely(!req)) { + bio_endio(bio, -ENODEV); /* @q is dead */ + goto out_unlock; + } /* * After dropping the lock and possibly sleeping here, our request -- cgit v1.1 From bd87b5898a72b1aef6acf3705c61c9f6372adf0c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:33:08 +0200 Subject: block: drop @tsk from attempt_plug_merge() and explain sync rules attempt_plug_merge() accesses elevator without holding queue_lock and may call into ->elevator_bio_merge_fn(). The elvator is guaranteed to be valid because it's accessed iff the plugged list has requests and elevator is never exited with live requests, so as long as the elevator method can deal with unlocked access, this is safe. Explain the sync rules around attempt_plug_merge() and drop the unnecessary @tsk parameter. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 3508751..034cbb2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1203,18 +1203,32 @@ static bool bio_attempt_front_merge(struct request_queue *q, return true; } -/* - * Attempts to merge with the plugged list in the current process. Returns - * true if merge was successful, otherwise false. +/** + * attempt_plug_merge - try to merge with %current's plugged list + * @q: request_queue new bio is being queued at + * @bio: new bio being queued + * @request_count: out parameter for number of traversed plugged requests + * + * Determine whether @bio being queued on @q can be merged with a request + * on %current's plugged list. Returns %true if merge was successful, + * otherwise %false. + * + * This function is called without @q->queue_lock; however, elevator is + * accessed iff there already are requests on the plugged list which in + * turn guarantees validity of the elevator. + * + * Note that, on successful merge, elevator operation + * elevator_bio_merged_fn() will be called without queue lock. Elevator + * must be ready for this. */ -static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, - struct bio *bio, unsigned int *request_count) +static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count) { struct blk_plug *plug; struct request *rq; bool ret = false; - plug = tsk->plug; + plug = current->plug; if (!plug) goto out; *request_count = 0; @@ -1282,7 +1296,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * Check if we can merge with the plugged list before grabbing * any locks. */ - if (attempt_plug_merge(current, q, bio, &request_count)) + if (attempt_plug_merge(q, bio, &request_count)) return; spin_lock_irq(q->queue_lock); -- cgit v1.1 From c9a929dde3913780b5c416f4bb9d9ed804f509ce Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Oct 2011 14:42:16 +0200 Subject: block: fix request_queue lifetime handling by making blk_queue_cleanup() properly shutdown request_queue is refcounted but actually depdends on lifetime management from the queue owner - on blk_cleanup_queue(), block layer expects that there's no request passing through request_queue and no new one will. This is fundamentally broken. The queue owner (e.g. SCSI layer) doesn't have a way to know whether there are other active users before calling blk_cleanup_queue() and other users (e.g. bsg) don't have any guarantee that the queue is and would stay valid while it's holding a reference. With delay added in blk_queue_bio() before queue_lock is grabbed, the following oops can be easily triggered when a device is removed with in-flight IOs. sd 0:0:1:0: [sdb] Stopping disk ata1.01: disabled general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: Pid: 648, comm: test_rawio Not tainted 3.1.0-rc3-work+ #56 Bochs Bochs RIP: 0010:[] [] elv_rqhash_find+0x61/0x100 ... Process test_rawio (pid: 648, threadinfo ffff880019efa000, task ffff880019ef8a80) ... Call Trace: [] elv_merge+0x84/0xe0 [] blk_queue_bio+0xf4/0x400 [] generic_make_request+0xca/0x100 [] submit_bio+0x74/0x100 [] dio_bio_submit+0xbc/0xc0 [] __blockdev_direct_IO+0x92e/0xb40 [] blkdev_direct_IO+0x57/0x60 [] generic_file_aio_read+0x6d5/0x760 [] do_sync_read+0xda/0x120 [] vfs_read+0xc5/0x180 [] sys_pread64+0x9a/0xb0 [] system_call_fastpath+0x16/0x1b This happens because blk_queue_cleanup() destroys the queue and elevator whether IOs are in progress or not and DEAD tests are sprinkled in the request processing path without proper synchronization. Similar problem exists for blk-throtl. On queue cleanup, blk-throtl is shutdown whether it has requests in it or not. Depending on timing, it either oopses or throttled bios are lost putting tasks which are waiting for bio completion into eternal D state. The way it should work is having the usual clear distinction between shutdown and release. Shutdown drains all currently pending requests, marks the queue dead, and performs partial teardown of the now unnecessary part of the queue. Even after shutdown is complete, reference holders are still allowed to issue requests to the queue although they will be immmediately failed. The rest of teardown happens on release. This patch makes the following changes to make blk_queue_cleanup() behave as proper shutdown. * QUEUE_FLAG_DEAD is now set while holding both q->exit_mutex and queue_lock. * Unsynchronized DEAD check in generic_make_request_checks() removed. This couldn't make any meaningful difference as the queue could die after the check. * blk_drain_queue() updated such that it can drain all requests and is now called during cleanup. * blk_throtl updated such that it checks DEAD on grabbing queue_lock, drains all throttled bios during cleanup and free td when queue is released. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- block/blk-core.c | 57 +++++++++++++++++++++++++++++++++------------------- block/blk-sysfs.c | 1 + block/blk-throttle.c | 50 +++++++++++++++++++++++++++++++++++++++------ block/blk.h | 6 +++++- block/elevator.c | 2 +- 5 files changed, 87 insertions(+), 29 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 034cbb2..7e15235 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -349,11 +349,13 @@ EXPORT_SYMBOL(blk_put_queue); /** * blk_drain_queue - drain requests from request_queue * @q: queue to drain + * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV * - * Drain ELV_PRIV requests from @q. The caller is responsible for ensuring - * that no new requests which need to be drained are queued. + * Drain requests from @q. If @drain_all is set, all requests are drained. + * If not, only ELVPRIV requests are drained. The caller is responsible + * for ensuring that no new requests which need to be drained are queued. */ -void blk_drain_queue(struct request_queue *q) +void blk_drain_queue(struct request_queue *q, bool drain_all) { while (true) { int nr_rqs; @@ -361,9 +363,15 @@ void blk_drain_queue(struct request_queue *q) spin_lock_irq(q->queue_lock); elv_drain_elevator(q); + if (drain_all) + blk_throtl_drain(q); __blk_run_queue(q); - nr_rqs = q->rq.elvpriv; + + if (drain_all) + nr_rqs = q->rq.count[0] + q->rq.count[1]; + else + nr_rqs = q->rq.elvpriv; spin_unlock_irq(q->queue_lock); @@ -373,30 +381,40 @@ void blk_drain_queue(struct request_queue *q) } } -/* - * Note: If a driver supplied the queue lock, it is disconnected - * by this function. The actual state of the lock doesn't matter - * here as the request_queue isn't accessible after this point - * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. +/** + * blk_cleanup_queue - shutdown a request queue + * @q: request queue to shutdown + * + * Mark @q DEAD, drain all pending requests, destroy and put it. All + * future requests will be failed immediately with -ENODEV. */ void blk_cleanup_queue(struct request_queue *q) { - /* - * We know we have process context here, so we can be a little - * cautious and ensure that pending block actions on this device - * are done before moving on. Going into this function, we should - * not have processes doing IO to this device. - */ - blk_sync_queue(q); + spinlock_t *lock = q->queue_lock; - del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + /* mark @q DEAD, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); - mutex_unlock(&q->sysfs_lock); + + spin_lock_irq(lock); + queue_flag_set(QUEUE_FLAG_NOMERGES, q); + queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + queue_flag_set(QUEUE_FLAG_DEAD, q); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; + spin_unlock_irq(lock); + mutex_unlock(&q->sysfs_lock); + + /* drain all requests queued before DEAD marking */ + blk_drain_queue(q, true); + + /* @q won't process any more request, flush async actions */ + del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + blk_sync_queue(q); + + /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } EXPORT_SYMBOL(blk_cleanup_queue); @@ -1509,9 +1527,6 @@ generic_make_request_checks(struct bio *bio) goto end_io; } - if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) - goto end_io; - part = bio->bi_bdev->bd_part; if (should_fail_request(part, bio->bi_size) || should_fail_request(&part_to_disk(part)->part0, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index a8eff5f..e7f9f65 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -490,6 +490,7 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); + blk_throtl_release(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 900a0c9..8edb949 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -309,6 +309,10 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) struct blkio_cgroup *blkcg; struct request_queue *q = td->queue; + /* no throttling for dead queue */ + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + return NULL; + rcu_read_lock(); blkcg = task_blkio_cgroup(current); tg = throtl_find_tg(td, blkcg); @@ -1001,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td) } } -static void throtl_td_free(struct throtl_data *td) -{ - kfree(td); -} - /* * Blk cgroup controller notification saying that blkio_group object is being * delinked as associated cgroup object is going away. That also means that @@ -1204,6 +1203,41 @@ out: return throttled; } +/** + * blk_throtl_drain - drain throttled bios + * @q: request_queue to drain throttled bios for + * + * Dispatch all currently throttled bios on @q through ->make_request_fn(). + */ +void blk_throtl_drain(struct request_queue *q) + __releases(q->queue_lock) __acquires(q->queue_lock) +{ + struct throtl_data *td = q->td; + struct throtl_rb_root *st = &td->tg_service_tree; + struct throtl_grp *tg; + struct bio_list bl; + struct bio *bio; + + lockdep_is_held(q->queue_lock); + + bio_list_init(&bl); + + while ((tg = throtl_rb_first(st))) { + throtl_dequeue_tg(td, tg); + + while ((bio = bio_list_peek(&tg->bio_lists[READ]))) + tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); + while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) + tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); + } + spin_unlock_irq(q->queue_lock); + + while ((bio = bio_list_pop(&bl))) + generic_make_request(bio); + + spin_lock_irq(q->queue_lock); +} + int blk_throtl_init(struct request_queue *q) { struct throtl_data *td; @@ -1276,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q) * it. */ throtl_shutdown_wq(q); - throtl_td_free(td); +} + +void blk_throtl_release(struct request_queue *q) +{ + kfree(q->td); } static int __init throtl_init(void) diff --git a/block/blk.h b/block/blk.h index c018dba..3f6551b 100644 --- a/block/blk.h +++ b/block/blk.h @@ -15,7 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio); -void blk_drain_queue(struct request_queue *q); +void blk_drain_queue(struct request_queue *q, bool drain_all); void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); bool __blk_end_bidi_request(struct request *rq, int error, @@ -191,15 +191,19 @@ static inline int blk_do_io_stat(struct request *rq) #ifdef CONFIG_BLK_DEV_THROTTLING extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); +extern void blk_throtl_drain(struct request_queue *q); extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); +extern void blk_throtl_release(struct request_queue *q); #else /* CONFIG_BLK_DEV_THROTTLING */ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) { return false; } +static inline void blk_throtl_drain(struct request_queue *q) { } static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline void blk_throtl_exit(struct request_queue *q) { } +static inline void blk_throtl_release(struct request_queue *q) { } #endif /* CONFIG_BLK_DEV_THROTTLING */ #endif /* BLK_INTERNAL_H */ diff --git a/block/elevator.c b/block/elevator.c index 74a277f..66343d6 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -626,7 +626,7 @@ void elv_quiesce_start(struct request_queue *q) queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); - blk_drain_queue(q); + blk_drain_queue(q, false); } void elv_quiesce_end(struct request_queue *q) -- cgit v1.1 From e890413af4c2dfebf5432ef30cc70cb11dad3213 Mon Sep 17 00:00:00 2001 From: Jie Liu Date: Mon, 24 Oct 2011 16:08:38 +0200 Subject: block: fix a typo in the blk-cgroup.h file byptes -> bytes. Signed-off-by: Jie Liu Signed-off-by: Jens Axboe --- block/blk-cgroup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index a71d290..6f3ace7 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -188,7 +188,7 @@ struct blkio_policy_node { union { unsigned int weight; /* - * Rate read/write in terms of byptes per second + * Rate read/write in terms of bytes per second * Whether this rate represents read or write is determined * by file type "fileid". */ -- cgit v1.1 From 9562ad9ab36df7ccef920d119f3b5100025db95f Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Mon, 24 Oct 2011 16:11:30 +0200 Subject: block: Remove the control of complete cpu from bio. bio originally has the functionality to set the complete cpu, but it is broken. Chirstoph said that "This code is unused, and from the all the discussions lately pretty obviously broken. The only thing keeping it serves is creating more confusion and possibly more bugs." And Jens replied with "We can kill bio_set_completion_cpu(). I'm fine with leaving cpu control to the request based drivers, they are the only ones that can toggle the setting anyway". So this patch tries to remove all the work of controling complete cpu from a bio. Cc: Shaohua Li Cc: Christoph Hellwig Signed-off-by: Tao Ma Signed-off-by: Jens Axboe --- block/blk-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 7e15235..da69793 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1276,7 +1276,6 @@ out: void init_request_from_bio(struct request *req, struct bio *bio) { - req->cpu = bio->bi_comp_cpu; req->cmd_type = REQ_TYPE_FS; req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; @@ -1362,8 +1361,7 @@ get_rq: */ init_request_from_bio(req, bio); - if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || - bio_flagged(bio, BIO_CPU_AFFINE)) + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) req->cpu = raw_smp_processor_id(); plug = current->plug; -- cgit v1.1 From 834f9f61a525d2f6d3d0c93894e26326c8d3ceed Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Mon, 17 Oct 2011 12:57:22 +0200 Subject: blk-flush: fix invalid BUG_ON in blk_insert_flush A user reported a regression due to commit 4853abaae7e4a2af938115ce9071ef8684fb7af4 (block: fix flush machinery for stacking drivers with differring flush flags). Part of the problem is that blk_insert_flush required a single bio be attached to the request. In reality, having no attached bio is also a valid case, as can be observed with an empty flush. [1] http://www.redhat.com/archives/dm-devel/2011-September/msg00154.html Reported-by: Christophe Saout Signed-off-by: Jeff Moyer Stable note: 3.1 Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- block/blk-flush.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index 491eb30..89ae3b9 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -320,7 +320,7 @@ void blk_insert_flush(struct request *rq) return; } - BUG_ON(!rq->bio || rq->bio != rq->biotail); + BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ /* * If there's data but flush is not necessary, the request can be -- cgit v1.1 From e67b77c791ca2778198c9e7088f3266ed2da7a55 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Mon, 17 Oct 2011 12:57:23 +0200 Subject: blk-flush: move the queue kick into A dm-multipath user reported[1] a problem when trying to boot a kernel with commit 4853abaae7e4a2af938115ce9071ef8684fb7af4 (block: fix flush machinery for stacking drivers with differring flush flags) applied. It turns out that an empty flush request can be sent into blk_insert_flush. When the BUG_ON was fixed to allow for this, I/O on the underlying device would stall. The reason is that blk_insert_cloned_request does not kick the queue. In the aforementioned commit, I had added a special case to kick the queue if data was sent down but the queue flags did not require a flush. A better solution is to push the queue kick up into blk_insert_cloned_request. This patch, along with a follow-on which fixes the BUG_ON, fixes the issue reported. [1] http://www.redhat.com/archives/dm-devel/2011-September/msg00154.html Reported-by: Christophe Saout Signed-off-by: Jeff Moyer Acked-by: Tejun Heo Stable note: 3.1 Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- block/blk-core.c | 2 ++ block/blk-flush.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index d34433a..795154e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1725,6 +1725,8 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) where = ELEVATOR_INSERT_FLUSH; add_acct_request(q, rq, where); + if (where == ELEVATOR_INSERT_FLUSH) + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); return 0; diff --git a/block/blk-flush.c b/block/blk-flush.c index 89ae3b9..720ad60 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -330,7 +330,6 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { list_add_tail(&rq->queuelist, &q->queue_head); - blk_run_queue_async(q); return; } -- cgit v1.1 From f992ae801a7dec34a4ed99a6598bbbbfb82af4fb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 17 Oct 2011 13:42:43 +0200 Subject: block: make gendisk hold a reference to its queue The following command sequence triggers an oops. # mount /dev/sdb1 /mnt # echo 1 > /sys/class/scsi_device/0\:0\:1\:0/device/delete # umount /mnt general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: Pid: 791, comm: umount Not tainted 3.1.0-rc3-work+ #8 Bochs Bochs RIP: 0010:[] [] __lock_acquire+0x389/0x1d60 ... Call Trace: [] lock_acquire+0x95/0x140 [] _raw_spin_lock+0x3b/0x50 [] bdi_lock_two+0x5c/0x70 [] bdev_inode_switch_bdi+0x4c/0xf0 [] __blkdev_put+0x11b/0x1d0 [] __blkdev_put+0x160/0x1d0 [] blkdev_put+0x5f/0x190 [] kill_block_super+0x4d/0x80 [] deactivate_locked_super+0x45/0x70 [] deactivate_super+0x4a/0x70 [] mntput_no_expire+0xed/0x130 [] sys_umount+0x7e/0x3a0 [] system_call_fastpath+0x16/0x1b This is because bdev holds on to disk but disk doesn't pin the associated queue. If a SCSI device is removed while the device is still open, the sdev puts the base reference to the queue on release. When the bdev is finally released, the associated queue is already gone along with the bdi and bdev_inode_switch_bdi() ends up dereferencing already freed bdi. Even if it were not for this bug, disk not holding onto the associated queue is very unusual and error-prone. Fix it by making add_disk() take an extra reference to its queue and put it on disk_release() and ensuring that disk and its fops owner are put in that order after all accesses to the disk and queue are complete. Signed-off-by: Tejun Heo Cc: Jens Axboe Cc: stable@kernel.org Signed-off-by: Jens Axboe --- block/genhd.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index e2f6790..d261b73 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -611,6 +611,12 @@ void add_disk(struct gendisk *disk) register_disk(disk); blk_register_queue(disk); + /* + * Take an extra ref on queue which will be put on disk_release() + * so that it sticks around as long as @disk is there. + */ + WARN_ON_ONCE(blk_get_queue(disk->queue)); + retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, "bdi"); WARN_ON(retval); @@ -1095,6 +1101,8 @@ static void disk_release(struct device *dev) disk_replace_part_tbl(disk, NULL); free_part_stats(&disk->part0); free_part_info(&disk->part0); + if (disk->queue) + blk_put_queue(disk->queue); kfree(disk); } struct class block_class = { -- cgit v1.1 From 5e08159197b5b98a6648a172008de23f420e6c11 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Tue, 25 Oct 2011 10:20:05 +0200 Subject: block: warn if tag is greater than real_max_depth. In case tag depth is reduced, it is max_depth not real_max_depth. So we should allow a request with tag >= max_depth, but for a tag >= real_max_depth, there really should be some problem. Signed-off-by: Tao Ma Signed-off-by: Jens Axboe --- block/blk-tag.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-tag.c b/block/blk-tag.c index ece65fc..e74d6d1 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -286,12 +286,14 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) BUG_ON(tag == -1); - if (unlikely(tag >= bqt->real_max_depth)) + if (unlikely(tag >= bqt->max_depth)) { /* * This can happen after tag depth has been reduced. - * FIXME: how about a warning or info message here? + * But tag shouldn't be larger than real_max_depth. */ + WARN_ON(tag >= bqt->real_max_depth); return; + } list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; -- cgit v1.1 From e060f00beee23568fe2c4faf1e88ff22edefd7b2 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 25 Oct 2011 15:48:12 +0200 Subject: blk-throttle: Free up policy node associated with deleted rule If a rule is being deleted, free up associated policy node. Otherwise that memory is leaked. Signed-off-by: Vivek Goyal Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d61ec56..6155367 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1075,6 +1075,7 @@ static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, if (blkio_delete_rule_command(newpn)) { blkio_policy_delete_node(pn); + kfree(pn); spin_unlock_irq(&blkcg->lock); goto update_io_group; } -- cgit v1.1 From a38eb630fa224d6fba8c14a4063174bc5e0f63bb Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 25 Oct 2011 15:48:12 +0200 Subject: blk-throttle: Take blkcg->lock while traversing blkcg->policy_list blkcg->policy_list is protected by blkcg->lock. Its not rcu protected list. So even for readers, they need to take blkcg->lock. There are few functions which were reading the list without taking lock. Fix it. Signed-off-by: Vivek Goyal Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 54 ++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 14 deletions(-) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 6155367..8f630ce 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -869,60 +869,86 @@ unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + unsigned int weight; + + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP, BLKIO_PROP_weight_device); if (pn) - return pn->val.weight; + weight = pn->val.weight; else - return blkcg->weight; + weight = blkcg->weight; + + spin_unlock_irqrestore(&blkcg->lock, flags); + + return weight; } EXPORT_SYMBOL_GPL(blkcg_get_weight); uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + uint64_t bps = -1; + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_read_bps_device); if (pn) - return pn->val.bps; - else - return -1; + bps = pn->val.bps; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return bps; } uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + uint64_t bps = -1; + + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_write_bps_device); if (pn) - return pn->val.bps; - else - return -1; + bps = pn->val.bps; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return bps; } unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + unsigned int iops = -1; + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_read_iops_device); if (pn) - return pn->val.iops; - else - return -1; + iops = pn->val.iops; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return iops; } unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev) { struct blkio_policy_node *pn; + unsigned long flags; + unsigned int iops = -1; + + spin_lock_irqsave(&blkcg->lock, flags); pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, BLKIO_THROTL_write_iops_device); if (pn) - return pn->val.iops; - else - return -1; + iops = pn->val.iops; + spin_unlock_irqrestore(&blkcg->lock, flags); + + return iops; } /* Checks whether user asked for deleting a policy rule */ -- cgit v1.1 From 334c2b0b8b2ab186fa198413386cba41fffcb4f2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 25 Oct 2011 15:51:48 +0200 Subject: blk-throttle: use queue_is_locked() instead of lockdep_is_held() We can't use the latter if !CONFIG_LOCKDEP. Reported-by: Sedat Dilek Signed-off-by: Jens Axboe --- block/blk-throttle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8edb949..4553245 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q) struct bio_list bl; struct bio *bio; - lockdep_is_held(q->queue_lock); + WARN_ON_ONCE(!queue_is_locked(q)); bio_list_init(&bl); -- cgit v1.1 From 6dd9ad7df2019b1e33a372a501907db293ebcd0d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 3 Nov 2011 18:52:11 +0100 Subject: block: don't call blk_drain_queue() if elevator is not up blk_cleanup_queue() may be called before elevator is set up on a queue which triggers the following oops. BUG: unable to handle kernel NULL pointer dereference at (null) IP: [] elv_drain_elevator+0x1c/0x70 ... Pid: 830, comm: kworker/0:2 Not tainted 3.1.0-next-20111025_64+ #1590 Bochs Bochs RIP: 0010:[] [] elv_drain_elevator+0x1c/0x70 ... Call Trace: [] blk_drain_queue+0x42/0x70 [] blk_cleanup_queue+0xd0/0x1c0 [] md_free+0x50/0x70 [] kobject_release+0x8b/0x1d0 [] kref_put+0x36/0xa0 [] kobject_put+0x27/0x60 [] mddev_delayed_delete+0x2f/0x40 [] process_one_work+0x100/0x3b0 [] worker_thread+0x15f/0x3a0 [] kthread+0x87/0x90 [] kernel_thread_helper+0x4/0x10 Fix it by making blk_cleanup_queue() check whether q->elevator is set up before invoking blk_drain_queue. Signed-off-by: Tejun Heo Reported-and-tested-by: Jiri Slaby Signed-off-by: Jens Axboe --- block/blk-core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index f658711..f43c8a5 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -407,8 +407,13 @@ void blk_cleanup_queue(struct request_queue *q) spin_unlock_irq(lock); mutex_unlock(&q->sysfs_lock); - /* drain all requests queued before DEAD marking */ - blk_drain_queue(q, true); + /* + * Drain all requests queued before DEAD marking. The caller might + * be trying to tear down @q before its elevator is initialized, in + * which case we don't want to call into draining. + */ + if (q->elevator) + blk_drain_queue(q, true); /* @q won't process any more request, flush async actions */ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); -- cgit v1.1