summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c117
1 files changed, 82 insertions, 35 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f0640d7..7da630e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
- if (blk_pc_request(rq)) {
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
printk(KERN_INFO " cdb: ");
for (bit = 0; bit < BLK_MAX_CDB; bit++)
printk("%02x ", rq->cmd[bit]);
@@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
q->request_fn = rfn;
q->prep_rq_fn = NULL;
+ q->unprep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = QUEUE_FLAG_DEFAULT;
q->queue_lock = lock;
@@ -1135,30 +1136,46 @@ void blk_put_request(struct request *req)
}
EXPORT_SYMBOL(blk_put_request);
+/**
+ * blk_add_request_payload - add a payload to a request
+ * @rq: request to update
+ * @page: page backing the payload
+ * @len: length of the payload.
+ *
+ * This allows to later add a payload to an already submitted request by
+ * a block driver. The driver needs to take care of freeing the payload
+ * itself.
+ *
+ * Note that this is a quite horrible hack and nothing but handling of
+ * discard requests should ever use it.
+ */
+void blk_add_request_payload(struct request *rq, struct page *page,
+ unsigned int len)
+{
+ struct bio *bio = rq->bio;
+
+ bio->bi_io_vec->bv_page = page;
+ bio->bi_io_vec->bv_offset = 0;
+ bio->bi_io_vec->bv_len = len;
+
+ bio->bi_size = len;
+ bio->bi_vcnt = 1;
+ bio->bi_phys_segments = 1;
+
+ rq->__data_len = rq->resid_len = len;
+ rq->nr_phys_segments = 1;
+ rq->buffer = bio_data(bio);
+}
+EXPORT_SYMBOL_GPL(blk_add_request_payload);
+
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cpu = bio->bi_comp_cpu;
req->cmd_type = REQ_TYPE_FS;
- /*
- * Inherit FAILFAST from bio (for read-ahead, and explicit
- * FAILFAST). FAILFAST flags are identical for req and bio.
- */
- if (bio_rw_flagged(bio, BIO_RW_AHEAD))
+ req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
+ if (bio->bi_rw & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
- else
- req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
-
- if (bio_rw_flagged(bio, BIO_RW_DISCARD))
- req->cmd_flags |= REQ_DISCARD;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER))
- req->cmd_flags |= REQ_HARDBARRIER;
- if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
- req->cmd_flags |= REQ_RW_SYNC;
- if (bio_rw_flagged(bio, BIO_RW_META))
- req->cmd_flags |= REQ_RW_META;
- if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
- req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
req->__sector = bio->bi_sector;
@@ -1181,12 +1198,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
- const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+ const bool sync = (bio->bi_rw & REQ_SYNC);
+ const bool unplug = (bio->bi_rw & REQ_UNPLUG);
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
- if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
+ if ((bio->bi_rw & REQ_HARDBARRIER) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
@@ -1200,7 +1217,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
spin_lock_irq(q->queue_lock);
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
+ if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1292,7 @@ get_rq:
*/
rw_flags = bio_data_dir(bio);
if (sync)
- rw_flags |= REQ_RW_SYNC;
+ rw_flags |= REQ_SYNC;
/*
* Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}
- if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+ if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
@@ -1497,8 +1514,7 @@ static inline void __generic_make_request(struct bio *bio)
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
- !blk_queue_discard(q)) {
+ if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -1583,7 +1599,7 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
+ if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1628,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
+ if (rq->cmd_flags & REQ_DISCARD)
+ return 0;
+
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
@@ -1796,7 +1815,7 @@ struct request *blk_peek_request(struct request_queue *q)
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
- if (blk_sorted_rq(rq))
+ if (rq->cmd_flags & REQ_SORTED)
elv_activate_rq(q, rq);
/*
@@ -1984,10 +2003,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* TODO: tj: This is too subtle. It would be better to let
* low level drivers do what they see fit.
*/
- if (blk_fs_request(req))
+ if (req->cmd_type == REQ_TYPE_FS)
req->errors = 0;
- if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+ if (error && req->cmd_type == REQ_TYPE_FS &&
+ !(req->cmd_flags & REQ_QUIET)) {
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
@@ -2074,7 +2094,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
- if (blk_fs_request(req) || blk_discard_rq(req))
+ if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
@@ -2111,11 +2131,32 @@ static bool blk_update_bidi_request(struct request *rq, int error,
blk_update_request(rq->next_rq, error, bidi_bytes))
return true;
- add_disk_randomness(rq->rq_disk);
+ if (blk_queue_add_random(rq->q))
+ add_disk_randomness(rq->rq_disk);
return false;
}
+/**
+ * blk_unprep_request - unprepare a request
+ * @req: the request
+ *
+ * This function makes a request ready for complete resubmission (or
+ * completion). It happens only after all error handling is complete,
+ * so represents the appropriate moment to deallocate any resources
+ * that were allocated to the request in the prep_rq_fn. The queue
+ * lock is held when calling this.
+ */
+void blk_unprep_request(struct request *req)
+{
+ struct request_queue *q = req->q;
+
+ req->cmd_flags &= ~REQ_DONTPREP;
+ if (q->unprep_rq_fn)
+ q->unprep_rq_fn(q, req);
+}
+EXPORT_SYMBOL_GPL(blk_unprep_request);
+
/*
* queue lock must be held
*/
@@ -2126,11 +2167,15 @@ static void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
- if (unlikely(laptop_mode) && blk_fs_request(req))
+ if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
laptop_io_completion(&req->q->backing_dev_info);
blk_delete_timer(req);
+ if (req->cmd_flags & REQ_DONTPREP)
+ blk_unprep_request(req);
+
+
blk_account_io_done(req);
if (req->end_io)
@@ -2363,7 +2408,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
- rq->cmd_flags |= bio->bi_rw & REQ_RW;
+ rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
if (bio_has_data(bio)) {
rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -2450,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
+ if (src->cmd_flags & REQ_DISCARD)
+ dst->cmd_flags |= REQ_DISCARD;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
OpenPOWER on IntegriCloud