summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorshaohua.li@intel.com <shaohua.li@intel.com>2011-05-06 11:34:32 -0600
committerJens Axboe <jaxboe@fusionio.com>2011-05-06 11:36:25 -0600
commitf3876930952390a31c3a7fd68dd621464a36eb80 (patch)
tree7fe2306a8dc2022ac7724b2d47629e38e3fb354b
parent490b94be0282c3b67f56453628ff0aaae827a670 (diff)
downloadop-kernel-dev-f3876930952390a31c3a7fd68dd621464a36eb80.zip
op-kernel-dev-f3876930952390a31c3a7fd68dd621464a36eb80.tar.gz
block: add a non-queueable flush flag
flush request isn't queueable in some drives. Add a flag to let driver notify block layer about this. We can optimize flush performance with the knowledge. Stable: 2.6.39 only Cc: stable@kernel.org Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--block/blk-settings.c6
-rw-r--r--include/linux/blkdev.h7
2 files changed, 13 insertions, 0 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1fa7692..cd3c428 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -790,6 +790,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
}
EXPORT_SYMBOL_GPL(blk_queue_flush);
+void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
+{
+ q->flush_not_queueable = !queueable;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cbbfd98..8bd2a27 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -364,6 +364,7 @@ struct request_queue
* for flush operations
*/
unsigned int flush_flags;
+ unsigned int flush_not_queueable:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
unsigned long flush_pending_since;
@@ -843,6 +844,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
+extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1111,6 +1113,11 @@ static inline unsigned int block_size(struct block_device *bdev)
return bdev->bd_block_size;
}
+static inline bool queue_flush_queueable(struct request_queue *q)
+{
+ return !q->flush_not_queueable;
+}
+
typedef struct {struct page *v;} Sector;
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
OpenPOWER on IntegriCloud