summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2009-07-23 20:30:40 +0100
committerAlasdair G Kergon <agk@redhat.com>2009-07-23 20:30:40 +0100
commita732c207d19e899845ae47139708af898daaf9fd (patch)
treeaed98221e373868b4fe0fbba9c4fcf5ac8ede128
parent69885683d22d8c05910fd808c01fdce1322739b4 (diff)
downloadop-kernel-dev-a732c207d19e899845ae47139708af898daaf9fd.zip
op-kernel-dev-a732c207d19e899845ae47139708af898daaf9fd.tar.gz
dm: remove queue next_ordered workaround for barriers
This patch removes DM's bio-based vs request-based conditional setting of next_ordered. For bio-based DM the next_ordered check is no longer a concern (as that check is now in the __make_request path). For request-based DM the default of QUEUE_ORDERED_NONE is now appropriate. bio-based DM was changed to work-around the previously misplaced next_ordered check with this commit: 99360b4c18f7675b50d283301d46d755affe75fd request-based DM does not yet support barriers but reacted to the above bio-based DM change with this commit: 5d67aa2366ccb8257d103d0b43df855605c3c086 The above changes are no longer needed given Neil Brown's recent fix to put the next_ordered check in the __make_request path: db64f680ba4b5c56c4be59f0698000df89ff0281 Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Cc: NeilBrown <neilb@suse.de> Acked-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Acked-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/dm.h1
3 files changed, 0 insertions, 16 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2cba557..5d16d06 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -830,11 +830,6 @@ unsigned dm_table_get_type(struct dm_table *t)
return t->type;
}
-bool dm_table_bio_based(struct dm_table *t)
-{
- return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
-}
-
bool dm_table_request_based(struct dm_table *t)
{
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9acd54a..8a311ea 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2203,16 +2203,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
goto out;
}
- /*
- * It is enought that blk_queue_ordered() is called only once when
- * the first bio-based table is bound.
- *
- * This setting should be moved to alloc_dev() when request-based dm
- * supports barrier.
- */
- if (!md->map && dm_table_bio_based(table))
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
-
__unbind(md);
r = __bind(md, table, &limits);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 23278ae..a7663eb 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -61,7 +61,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t);
int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
-bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
OpenPOWER on IntegriCloud