summaryrefslogtreecommitdiffstats
path: root/block/bounce.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2015-06-18 17:19:14 +0200
committerJan Kara <jack@suse.com>2015-07-23 20:59:40 +0200
commita3ad0a9da863fa554fc17fa8345a07adcdd27d3c (patch)
treea37306c7997b48d3613391f93e77df59e6233b8c /block/bounce.c
parentc290ea01abb7907fde602f3ba55905ef10a37477 (diff)
downloadop-kernel-dev-a3ad0a9da863fa554fc17fa8345a07adcdd27d3c.zip
op-kernel-dev-a3ad0a9da863fa554fc17fa8345a07adcdd27d3c.tar.gz
block: Remove forced page bouncing under IO
JBD layer wrote back data buffers without setting PageWriteback bit. Thus standard mechanism for guaranteeing stable pages under IO did not work. Since JBD is gone now and there is no other user of the functionality, just remove it. Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'block/bounce.c')
-rw-r--r--block/bounce.c31
1 files changed, 4 insertions, 27 deletions
diff --git a/block/bounce.c b/block/bounce.c
index b173112..31cad13 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -176,26 +176,8 @@ static void bounce_end_io_read_isa(struct bio *bio, int err)
__bounce_end_io_read(bio, isa_page_pool, err);
}
-#ifdef CONFIG_NEED_BOUNCE_POOL
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
- if (bio_data_dir(bio) != WRITE)
- return 0;
-
- if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
- return 0;
-
- return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
-}
-#else
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
- return 0;
-}
-#endif /* CONFIG_NEED_BOUNCE_POOL */
-
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
- mempool_t *pool, int force)
+ mempool_t *pool)
{
struct bio *bio;
int rw = bio_data_dir(*bio_orig);
@@ -203,8 +185,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
struct bvec_iter iter;
unsigned i;
- if (force)
- goto bounce;
bio_for_each_segment(from, *bio_orig, iter)
if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
goto bounce;
@@ -216,7 +196,7 @@ bounce:
bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page;
- if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
+ if (page_to_pfn(page) <= queue_bounce_pfn(q))
continue;
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
@@ -254,7 +234,6 @@ bounce:
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
- int must_bounce;
mempool_t *pool;
/*
@@ -263,15 +242,13 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
if (!bio_has_data(*bio_orig))
return;
- must_bounce = must_snapshot_stable_pages(q, *bio_orig);
-
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
- if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
+ if (queue_bounce_pfn(q) >= blk_max_pfn)
return;
pool = page_pool;
} else {
@@ -282,7 +259,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
/*
* slow path
*/
- __blk_queue_bounce(q, bio_orig, pool, must_bounce);
+ __blk_queue_bounce(q, bio_orig, pool);
}
EXPORT_SYMBOL(blk_queue_bounce);
OpenPOWER on IntegriCloud