summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2014-02-20 14:54:28 +0100
committerJiri Kosina <jkosina@suse.cz>2014-02-20 14:54:28 +0100
commitd4263348f796f29546f90802177865dd4379dd0a (patch)
treeadcbdaebae584eee2f32fab95e826e8e49eef385 /block/blk-core.c
parentbe873ac782f5ff5ee6675f83929f4fe6737eead2 (diff)
parent6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff)
downloadop-kernel-dev-d4263348f796f29546f90802177865dd4379dd0a.zip
op-kernel-dev-d4263348f796f29546f90802177865dd4379dd0a.tar.gz
Merge branch 'master' into for-next
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c81
1 files changed, 55 insertions, 26 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index cd01581..4db2b32 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -38,6 +38,7 @@
#include "blk.h"
#include "blk-cgroup.h"
+#include "blk-mq.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -130,7 +131,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
}
@@ -245,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
- cancel_delayed_work_sync(&q->delay_work);
+
+ if (q->mq_ops) {
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ cancel_delayed_work_sync(&hctx->delayed_work);
+ } else {
+ cancel_delayed_work_sync(&q->delay_work);
+ }
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -497,8 +507,13 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
- spin_lock_irq(lock);
- __blk_drain_queue(q, true);
+ if (q->mq_ops) {
+ blk_mq_drain_queue(q);
+ spin_lock_irq(lock);
+ } else {
+ spin_lock_irq(lock);
+ __blk_drain_queue(q, true);
+ }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
@@ -678,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
+ uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!uninit_q->flush_rq)
+ goto out_cleanup_queue;
+
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- blk_cleanup_queue(uninit_q);
-
+ goto out_free_flush_rq;
return q;
+
+out_free_flush_rq:
+ kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+ blk_cleanup_queue(uninit_q);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -1112,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
- return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ return blk_mq_alloc_request(q, rw, gfp_mask);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@@ -1263,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(!q))
return;
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
+ return;
+ }
+
blk_pm_put_request(req);
elv_completed_request(q, req);
@@ -1326,7 +1355,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = len;
- bio->bi_size = len;
+ bio->bi_iter.bi_size = len;
bio->bi_vcnt = 1;
bio->bi_phys_segments = 1;
@@ -1351,7 +1380,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = bio;
req->biotail = bio;
- req->__data_len += bio->bi_size;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
@@ -1380,8 +1409,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->__sector = bio->bi_sector;
- req->__data_len += bio->bi_size;
+ req->__sector = bio->bi_iter.bi_sector;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
@@ -1459,7 +1488,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
- req->__sector = bio->bi_sector;
+ req->__sector = bio->bi_iter.bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1583,12 +1612,12 @@ static inline void blk_partition_remap(struct bio *bio)
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- bio->bi_sector += p->start_sect;
+ bio->bi_iter.bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev,
- bio->bi_sector - p->start_sect);
+ bio->bi_iter.bi_sector - p->start_sect);
}
}
@@ -1654,7 +1683,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
/* Test device or partition size, when known. */
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
/*
@@ -1690,7 +1719,7 @@ generic_make_request_checks(struct bio *bio)
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
+ (long long) bio->bi_iter.bi_sector);
goto end_io;
}
@@ -1704,9 +1733,9 @@ generic_make_request_checks(struct bio *bio)
}
part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
+ if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
+ bio->bi_iter.bi_size))
goto end_io;
/*
@@ -1865,7 +1894,7 @@ void submit_bio(int rw, struct bio *bio)
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
- task_io_account_read(bio->bi_size);
+ task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
@@ -1874,7 +1903,7 @@ void submit_bio(int rw, struct bio *bio)
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
+ (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
}
@@ -2007,7 +2036,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_rw & ff) != ff)
break;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
}
/* this could lead to infinite loop */
@@ -2378,9 +2407,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
total_bytes = 0;
while (req->bio) {
struct bio *bio = req->bio;
- unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+ unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_size)
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2757,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->__data_len = bio->bi_size;
+ rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
@@ -2746,10 +2775,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
void rq_flush_dcache_pages(struct request *rq)
{
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, rq, iter)
- flush_dcache_page(bvec->bv_page);
+ flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif
OpenPOWER on IntegriCloud