summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/dm-bio-list.h3
-rw-r--r--drivers/md/dm-crypt.c23
-rw-r--r--drivers/md/dm-emc.c15
-rw-r--r--drivers/md/dm-exception-store.c1
-rw-r--r--drivers/md/dm-io.c8
-rw-r--r--drivers/md/dm-mpath-rdac.c2
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid1.c7
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-table.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c32
-rw-r--r--drivers/md/faulty.c12
-rw-r--r--drivers/md/linear.c18
-rw-r--r--drivers/md/md.c30
-rw-r--r--drivers/md/multipath.c25
-rw-r--r--drivers/md/raid0.c18
-rw-r--r--drivers/md/raid1.c67
-rw-r--r--drivers/md/raid10.c55
-rw-r--r--drivers/md/raid5.c108
21 files changed, 188 insertions, 254 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 531d4d1..34a8c60 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -263,7 +263,7 @@ config DM_MULTIPATH_EMC
config DM_MULTIPATH_RDAC
tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
- depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL
+ depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
---help---
Multipath support for LSI/Engenio RDAC.
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index 16ee3b0..3f7b827 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -9,6 +9,8 @@
#include <linux/bio.h>
+#ifdef CONFIG_BLOCK
+
struct bio_list {
struct bio *head;
struct bio *tail;
@@ -106,4 +108,5 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
return bio;
}
+#endif /* CONFIG_BLOCK */
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ba952a0..8216a6f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -489,7 +489,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;
- bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
+ bio_endio(io->base_bio, io->error);
mempool_free(io, cc->io_pool);
}
@@ -509,25 +509,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
queue_work(_kcryptd_workqueue, &io->work);
}
-static int crypt_endio(struct bio *clone, unsigned int done, int error)
+static void crypt_endio(struct bio *clone, int error)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
/*
- * free the processed pages, even if
- * it's only a partially completed write
+ * free the processed pages
*/
- if (!read_io)
- crypt_free_buffer_pages(cc, clone, done);
-
- /* keep going - not finished yet */
- if (unlikely(clone->bi_size))
- return 1;
-
- if (!read_io)
+ if (!read_io) {
+ crypt_free_buffer_pages(cc, clone, clone->bi_size);
goto out;
+ }
if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
error = -EIO;
@@ -537,12 +531,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
bio_put(clone);
io->post_process = 1;
kcryptd_queue_io(io);
- return 0;
+ return;
out:
bio_put(clone);
dec_pending(io, error);
- return error;
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
@@ -920,6 +913,8 @@ static void crypt_dtr(struct dm_target *ti)
{
struct crypt_config *cc = (struct crypt_config *) ti->private;
+ flush_workqueue(_kcryptd_workqueue);
+
bioset_free(cc->bs);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->io_pool);
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 265c467..a2191a4 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
bio_put(bio);
}
-static int emc_endio(struct bio *bio, unsigned int bytes_done, int error)
+static void emc_endio(struct bio *bio, int error)
{
struct dm_path *path = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
/* We also need to look at the sense keys here whether or not to
* switch to the next PG etc.
*
@@ -109,15 +106,7 @@ static struct request *get_failover_req(struct emc_handler *h,
return NULL;
}
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
-
- rq->rq_disk = bdev->bd_contains->bd_disk;
-
- /* bio backed don't set data */
- rq->buffer = rq->data = NULL;
- /* rq data_len used for pc cmd's request_bufflen */
- rq->data_len = bio->bi_size;
+ blk_rq_append_bio(q, rq, bio);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3d65917..8fe81e1 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -623,6 +623,7 @@ int dm_create_persistent(struct exception_store *store)
ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
if (!ps->metadata_wq) {
+ kfree(ps);
DMERR("couldn't start header metadata update thread");
return -ENOMEM;
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f3a7724..b8e342f 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
}
}
-static int endio(struct bio *bio, unsigned int done, int error)
+static void endio(struct bio *bio, int error)
{
struct io *io;
unsigned region;
- /* keep going until we've finished */
- if (bio->bi_size)
- return 1;
-
if (error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
@@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
dec_count(io, region, error);
-
- return 0;
}
/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
index 8b776b8..16b1613 100644
--- a/drivers/md/dm-mpath-rdac.c
+++ b/drivers/md/dm-mpath-rdac.c
@@ -292,7 +292,7 @@ static struct request *get_rdac_req(struct rdac_handler *h,
rq->end_io_data = h;
rq->timeout = h->timeout;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags = REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
return rq;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d6ca9d0..31056ab 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
r = map_io(m, bio, mpio, 1);
if (r < 0)
- bio_endio(bio, bio->bi_size, r);
+ bio_endio(bio, r);
else if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
bio = next;
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 1a876f9..d09ff15 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
break;
}
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
if (unlikely(ms->log_failure))
while ((bio = bio_list_pop(&sync)))
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
@@ -951,13 +951,12 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
- ms = kmalloc(len, GFP_KERNEL);
+ ms = kzalloc(len, GFP_KERNEL);
if (!ms) {
ti->error = "Cannot allocate mirror context";
return NULL;
}
- memset(ms, 0, len);
spin_lock_init(&ms->lock);
ms->ti = ti;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 83ddbfe..98a633f 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
bio = n;
}
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2fc199b..2bcde57 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct io_restrictions *rs = &ti->limits;
/*
@@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
devices = dm_table_get_devices(t);
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
@@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t)
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
if (q->unplug_fn)
q->unplug_fn(q);
@@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t)
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
int err;
if (!q->issue_flush_fn)
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index f314d7d..bdec206 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
break;
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 846614e..167765c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -80,7 +80,7 @@ struct mapped_device {
unsigned long flags;
- request_queue_t *queue;
+ struct request_queue *queue;
struct gendisk *disk;
char name[16];
@@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
blk_add_trace_bio(io->md->queue, io->bio,
BLK_TA_COMPLETE);
- bio_endio(io->bio, io->bio->bi_size, io->error);
+ bio_endio(io->bio, io->error);
}
free_io(io->md, io);
}
}
-static int clone_endio(struct bio *bio, unsigned int done, int error)
+static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (bio->bi_size)
- return 1;
-
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
@@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
- return 1;
+ return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
bio_put(bio);
free_tio(md, tio);
- return r;
}
static sector_t max_io_len(struct mapped_device *md,
@@ -580,8 +576,8 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
/* the bio has been remapped so dispatch it */
blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev, sector,
- clone->bi_sector);
+ tio->io->bio->bi_bdev->bd_dev,
+ clone->bi_sector, sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
@@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.map = dm_get_table(md);
if (!ci.map) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return;
}
@@ -792,7 +788,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(request_queue_t *q, struct bio *bio)
+static int dm_request(struct request_queue *q, struct bio *bio)
{
int r;
int rw = bio_data_dir(bio);
@@ -803,7 +799,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
* guarantee it is (or can be) handled by the targets correctly.
*/
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -820,13 +816,13 @@ static int dm_request(request_queue_t *q, struct bio *bio)
up_read(&md->io_lock);
if (bio_rw(bio) == READA) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
r = queue_io(md, bio);
if (r < 0) {
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
} else if (r == 0)
@@ -844,7 +840,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
return 0;
}
-static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
+static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct mapped_device *md = q->queuedata;
@@ -859,7 +855,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
return ret;
}
-static void dm_unplug_all(request_queue_t *q)
+static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
@@ -1110,7 +1106,7 @@ static void __set_size(struct mapped_device *md, sector_t size)
static int __bind(struct mapped_device *md, struct dm_table *t)
{
- request_queue_t *q = md->queue;
+ struct request_queue *q = md->queue;
sector_t size;
size = dm_table_get_size(t);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4ebd0f2..cf2ddce 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -65,18 +65,16 @@
#include <linux/raid/md.h>
-static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error)
+static void faulty_fail(struct bio *bio, int error)
{
struct bio *b = bio->bi_private;
b->bi_size = bio->bi_size;
b->bi_sector = bio->bi_sector;
- if (bio->bi_size == 0)
- bio_put(bio);
+ bio_put(bio);
- clear_bit(BIO_UPTODATE, &b->bi_flags);
- return (b->bi_end_io)(b, bytes_done, -EIO);
+ bio_io_error(b);
}
typedef struct faulty_conf {
@@ -167,7 +165,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(request_queue_t *q, struct bio *bio)
+static int make_request(struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = (conf_t*)mddev->private;
@@ -179,7 +177,7 @@ static int make_request(request_queue_t *q, struct bio *bio)
/* special case - don't decrement, don't generic_make_request,
* just fail immediately
*/
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
return 0;
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 1927410..5501487 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
*
* Return amount of bytes we can take at this offset
*/
-static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0;
@@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio
return maxsectors << 9;
}
-static void linear_unplug(request_queue_t *q)
+static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
linear_conf_t *conf = mddev_to_conf(mddev);
int i;
for (i=0; i < mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
for (i=0; i < mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = conf->disks[i].rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits)
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
return ret;
@@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev)
return 0;
}
-static int linear_make_request (request_queue_t *q, struct bio *bio)
+static int linear_make_request (struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
@@ -338,7 +338,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
sector_t block;
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -358,7 +358,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
bdevname(tmp_dev->rdev->bdev, b),
(unsigned long long)tmp_dev->size,
(unsigned long long)tmp_dev->offset);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65ddc88..acf1b81 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -211,9 +211,9 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
)
-static int md_fail_request (request_queue_t *q, struct bio *bio)
+static int md_fail_request (struct request_queue *q, struct bio *bio)
{
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
@@ -384,12 +384,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
}
-static int super_written(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written(struct bio *bio, int error)
{
mdk_rdev_t *rdev = bio->bi_private;
mddev_t *mddev = rdev->mddev;
- if (bio->bi_size)
- return 1;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
@@ -401,16 +399,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
- return 0;
}
-static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written_barrier(struct bio *bio, int error)
{
struct bio *bio2 = bio->bi_private;
mdk_rdev_t *rdev = bio2->bi_private;
mddev_t *mddev = rdev->mddev;
- if (bio->bi_size)
- return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
error == -EOPNOTSUPP) {
@@ -424,11 +419,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
spin_unlock_irqrestore(&mddev->write_lock, flags);
wake_up(&mddev->sb_wait);
bio_put(bio);
- return 0;
+ } else {
+ bio_put(bio2);
+ bio->bi_private = rdev;
+ super_written(bio, error);
}
- bio_put(bio2);
- bio->bi_private = rdev;
- return super_written(bio, bytes_done, error);
}
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@@ -489,13 +484,9 @@ void md_super_wait(mddev_t *mddev)
finish_wait(&mddev->sb_wait, &wq);
}
-static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
+static void bi_complete(struct bio *bio, int error)
{
- if (bio->bi_size)
- return 1;
-
complete((struct completion*)bio->bi_private);
- return 0;
}
int sync_page_io(struct block_device *bdev, sector_t sector, int size,
@@ -3085,8 +3076,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
mddev->gendisk = disk;
mutex_unlock(&disks_mutex);
mddev->kobj.parent = &disk->kobj;
- mddev->kobj.k_name = NULL;
- snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
+ kobject_set_name(&mddev->kobj, "%s", "md");
mddev->kobj.ktype = &md_ktype;
if (kobject_register(&mddev->kobj))
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 14da37f..f2a63f3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -82,21 +82,17 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
- bio_endio(bio, bio->bi_size, err);
+ bio_endio(bio, err);
mempool_free(mp_bh, conf->pool);
}
-static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
- int error)
+static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
- if (bio->bi_size)
- return 1;
-
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
else if (!bio_rw_ahead(bio)) {
@@ -112,7 +108,6 @@ static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
} else
multipath_end_bh_io(mp_bh, error);
rdev_dec_pending(rdev, conf->mddev);
- return 0;
}
static void unplug_slaves(mddev_t *mddev)
@@ -125,7 +120,7 @@ static void unplug_slaves(mddev_t *mddev)
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)
&& atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -140,13 +135,13 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void multipath_unplug(request_queue_t *q)
+static void multipath_unplug(struct request_queue *q)
{
unplug_slaves(q->queuedata);
}
-static int multipath_make_request (request_queue_t *q, struct bio * bio)
+static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev_to_conf(mddev);
@@ -155,7 +150,7 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -169,7 +164,7 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio)
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
mempool_free(mp_bh, conf->pool);
return 0;
}
@@ -199,7 +194,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
seq_printf (seq, "]");
}
-static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -211,7 +206,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -238,7 +233,7 @@ static int multipath_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2c404f7..ef0da2d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,7 +25,7 @@
#define MD_DRIVER
#define MD_PERSONALITY
-static void raid0_unplug(request_queue_t *q)
+static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev_to_conf(mddev);
@@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q)
int i;
for (i=0; i<mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = devlist[i]->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits)
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev)
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev)
return 0;
}
-static int raid0_make_request (request_queue_t *q, struct bio *bio)
+static int raid0_make_request (struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
@@ -420,7 +420,7 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -490,7 +490,7 @@ bad_map:
" or bigger than %dk %llu %d\n", chunk_size,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00c78b7..6d03bea 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -238,7 +238,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
(unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1);
- bio_endio(bio, bio->bi_size,
+ bio_endio(bio,
test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
}
free_r1bio(r1_bio);
@@ -255,16 +255,13 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
r1_bio->sector + (r1_bio->sectors);
}
-static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev);
- if (bio->bi_size)
- return 1;
-
mirror = r1_bio->read_disk;
/*
* this branch is our 'one mirror IO has finished' event handler:
@@ -301,10 +298,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
}
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
- return 0;
}
-static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -312,8 +308,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
conf_t *conf = mddev_to_conf(r1_bio->mddev);
struct bio *to_put = NULL;
- if (bio->bi_size)
- return 1;
for (mirror = 0; mirror < conf->raid_disks; mirror++)
if (r1_bio->bios[mirror] == bio)
@@ -366,7 +360,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1);
- bio_endio(mbio, mbio->bi_size, 0);
+ bio_endio(mbio, 0);
}
}
}
@@ -400,8 +394,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (to_put)
bio_put(to_put);
-
- return 0;
}
@@ -552,7 +544,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -567,7 +559,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid1_unplug(request_queue_t *q)
+static void raid1_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -575,7 +567,7 @@ static void raid1_unplug(request_queue_t *q)
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -587,7 +579,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -615,7 +607,7 @@ static int raid1_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
@@ -765,7 +757,7 @@ do_sync_io:
return NULL;
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
@@ -796,7 +788,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
if (rw == WRITE)
md_write_end(mddev);
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -1137,14 +1129,11 @@ abort:
}
-static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_read(struct bio *bio, int error)
{
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int i;
- if (bio->bi_size)
- return 1;
-
for (i=r1_bio->mddev->raid_disks; i--; )
if (r1_bio->bios[i] == bio)
break;
@@ -1160,10 +1149,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
if (atomic_dec_and_test(&r1_bio->remaining))
reschedule_retry(r1_bio);
- return 0;
}
-static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -1172,9 +1160,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
int i;
int mirror=0;
- if (bio->bi_size)
- return 1;
-
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) {
mirror = i;
@@ -1200,7 +1185,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
md_done_sync(mddev, r1_bio->sectors, uptodate);
put_buf(r1_bio);
}
- return 0;
}
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
@@ -1972,7 +1956,8 @@ static int run(mddev_t *mddev)
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
mddev->degraded++;
- conf->fullsync = 1;
+ if (disk->rdev)
+ conf->fullsync = 1;
}
}
if (mddev->degraded == conf->raid_disks) {
@@ -2153,11 +2138,25 @@ static int raid1_reshape(mddev_t *mddev)
oldpool = conf->r1bio_pool;
conf->r1bio_pool = newpool;
- for (d=d2=0; d < conf->raid_disks; d++)
- if (conf->mirrors[d].rdev) {
- conf->mirrors[d].rdev->raid_disk = d2;
- newmirrors[d2++].rdev = conf->mirrors[d].rdev;
+ for (d = d2 = 0; d < conf->raid_disks; d++) {
+ mdk_rdev_t *rdev = conf->mirrors[d].rdev;
+ if (rdev && rdev->raid_disk != d2) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ rdev->raid_disk = d2;
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ if (sysfs_create_link(&mddev->kobj,
+ &rdev->kobj, nm))
+ printk(KERN_WARNING
+ "md/raid1: cannot register "
+ "%s for %s\n",
+ nm, mdname(mddev));
}
+ if (rdev)
+ newmirrors[d2++].rdev = rdev;
+ }
kfree(conf->mirrors);
conf->mirrors = newmirrors;
kfree(conf->poolinfo);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95ada1..25a96c4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -227,7 +227,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
{
struct bio *bio = r10_bio->master_bio;
- bio_endio(bio, bio->bi_size,
+ bio_endio(bio,
test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
free_r10bio(r10_bio);
}
@@ -243,15 +243,13 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
r10_bio->devs[slot].addr + (r10_bio->sectors);
}
-static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- if (bio->bi_size)
- return 1;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
@@ -284,19 +282,15 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
- return 0;
}
-static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error)
+static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- if (bio->bi_size)
- return 1;
-
for (slot = 0; slot < conf->copies; slot++)
if (r10_bio->devs[slot].bio == bio)
break;
@@ -339,7 +333,6 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
}
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
- return 0;
}
@@ -453,7 +446,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
* If near_copies == raid_disk, there are no striping issues,
* but in that case, the function isn't called at all.
*/
-static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
+static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
struct bio_vec *bio_vec)
{
mddev_t *mddev = q->queuedata;
@@ -595,7 +588,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -610,7 +603,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid10_unplug(request_queue_t *q)
+static void raid10_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -618,7 +611,7 @@ static void raid10_unplug(request_queue_t *q)
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -630,7 +623,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -658,7 +651,7 @@ static int raid10_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -772,7 +765,7 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
@@ -787,7 +780,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
unsigned long flags;
if (unlikely(bio_barrier(bio))) {
- bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ bio_endio(bio, -EOPNOTSUPP);
return 0;
}
@@ -819,7 +812,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
" or bigger than %dk %llu %d\n", chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
return 0;
}
@@ -917,6 +910,13 @@ static int make_request(request_queue_t *q, struct bio * bio)
bio_list_add(&bl, mbio);
}
+ if (unlikely(!atomic_read(&r10_bio->remaining))) {
+ /* the array is dead */
+ md_write_end(mddev);
+ raid_end_bio_io(r10_bio);
+ return 0;
+ }
+
bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_merge(&conf->pending_bio_list, &bl);
@@ -1148,15 +1148,12 @@ abort:
}
-static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_read(struct bio *bio, int error)
{
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r10_bio->mddev);
int i,d;
- if (bio->bi_size)
- return 1;
-
for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
@@ -1185,10 +1182,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
reschedule_retry(r10_bio);
}
rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
- return 0;
}
-static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
+static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
@@ -1196,9 +1192,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
conf_t *conf = mddev_to_conf(mddev);
int i,d;
- if (bio->bi_size)
- return 1;
-
for (i = 0; i < conf->copies; i++)
if (r10_bio->devs[i].bio == bio)
break;
@@ -1221,7 +1214,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
}
}
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
- return 0;
}
/*
@@ -1367,7 +1359,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
if (test_bit(R10BIO_Uptodate, &r10_bio->state))
generic_make_request(wbio);
else
- bio_endio(wbio, wbio->bi_size, -EIO);
+ bio_endio(wbio, -EIO);
}
@@ -1557,7 +1549,6 @@ static void raid10d(mddev_t *mddev)
bio = r10_bio->devs[r10_bio->read_slot].bio;
r10_bio->devs[r10_bio->read_slot].bio =
mddev->ro ? IO_BLOCKED : NULL;
- bio_put(bio);
mirror = read_balance(conf, r10_bio);
if (mirror == -1) {
printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
@@ -1565,8 +1556,10 @@ static void raid10d(mddev_t *mddev)
bdevname(bio->bi_bdev,b),
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
+ bio_put(bio);
} else {
const int do_sync = bio_sync(r10_bio->master_bio);
+ bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0b66afe..caaca9e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
{
struct bio *bi = return_bi;
while (bi) {
- int bytes = bi->bi_size;
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
bi = return_bi;
@@ -289,7 +288,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
int pd_idx, int noblock)
@@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
return pending;
}
-static int
-raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error);
-static int
-raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error);
+static void
+raid5_end_read_request(struct bio *bi, int error);
+static void
+raid5_end_write_request(struct bio *bi, int error);
static void ops_run_io(struct stripe_head *sh)
{
@@ -493,12 +492,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen,
- ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_SRC,
+ ASYNC_TX_DEP_ACK,
tx, NULL, NULL);
else
tx = async_memcpy(bio_page, page, b_offset,
page_offset, clen,
- ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_DST,
+ ASYNC_TX_DEP_ACK,
tx, NULL, NULL);
}
if (clen < len) /* hit end of page */
@@ -514,7 +513,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
raid5_conf_t *conf = sh->raid_conf;
- int i, more_to_read = 0;
+ int i;
pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector);
@@ -522,16 +521,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
/* clear completed biofills */
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- /* check if this stripe has new incoming reads */
- if (dev->toread)
- more_to_read++;
/* acknowledge completion of a biofill operation */
- /* and check if we need to reply to a read request
- */
- if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+ /* and check if we need to reply to a read request,
+ * new R5_Wantfill requests are held off until
+ * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+ */
+ if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi, *rbi2;
- clear_bit(R5_Wantfill, &dev->flags);
/* The access to dev->read is outside of the
* spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +555,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
return_io(return_bi);
- if (more_to_read)
- set_bit(STRIPE_HANDLE, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
@@ -951,7 +947,7 @@ static int grow_stripes(raid5_conf_t *conf, int num)
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
@@ -1003,7 +999,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return -ENOMEM;
@@ -1113,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
conf->slab_cache = NULL;
}
-static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
- int error)
+static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
@@ -1123,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@@ -1135,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (uptodate) {
@@ -1188,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
-static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
- int error)
+static void raid5_end_write_request (struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
- if (bi->bi_size)
- return 1;
-
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
@@ -1211,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (!uptodate)
@@ -1222,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
@@ -2541,7 +2528,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
struct dma_async_tx_descriptor *tx = NULL;
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
- if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
+ if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
int dd_idx, pd_idx, j;
struct stripe_head *sh2;
@@ -2574,7 +2561,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j = 0; j < conf->raid_disks; j++)
if (j != sh2->pd_idx &&
- (r6s && j != r6s->qd_idx) &&
+ (!r6s || j != raid6_next_disk(sh2->pd_idx,
+ sh2->disks)) &&
!test_bit(R5_Expanded, &sh2->dev[j].flags))
break;
if (j == conf->raid_disks) {
@@ -2583,12 +2571,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
}
release_stripe(sh2);
- /* done submitting copies, wait for them to complete */
- if (i + 1 >= sh->disks) {
- async_tx_ack(tx);
- dma_wait_for_async_tx(tx);
- }
}
+ /* done submitting copies, wait for them to complete */
+ if (tx) {
+ async_tx_ack(tx);
+ dma_wait_for_async_tx(tx);
+ }
}
/*
@@ -2855,7 +2843,7 @@ static void handle_stripe5(struct stripe_head *sh)
sh->disks = conf->raid_disks;
sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
conf->raid_disks);
- s.locked += handle_write_operations5(sh, 0, 1);
+ s.locked += handle_write_operations5(sh, 1, 1);
} else if (s.expanded &&
!test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
@@ -3182,7 +3170,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -3197,7 +3185,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3204,7 @@ static void raid5_unplug_device(request_queue_t *q)
unplug_slaves(mddev);
}
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -3228,7 +3216,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -3267,7 +3255,7 @@ static int raid5_congested(void *data, int bits)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3342,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
* first).
* If the read failed..
*/
-static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
+static void raid5_align_endio(struct bio *bi, int error)
{
struct bio* raid_bi = bi->bi_private;
mddev_t *mddev;
@@ -3350,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -3362,22 +3348,21 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- bio_endio(raid_bi, bytes, 0);
+ bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
- return 0;
+ return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
- return 0;
}
static int bio_fits_rdev(struct bio *bi)
{
- request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
@@ -3396,7 +3381,7 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3451,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
}
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3478,7 +3463,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
int remaining;
if (unlikely(bio_barrier(bi))) {
- bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
+ bio_endio(bi, -EOPNOTSUPP);
return 0;
}
@@ -3594,12 +3579,11 @@ static int make_request(request_queue_t *q, struct bio * bi)
remaining = --bi->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = bi->bi_size;
if ( rw == WRITE )
md_write_end(mddev);
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
}
@@ -3877,10 +3861,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
remaining = --raid_bio->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = raid_bio->bi_size;
- raid_bio->bi_size = 0;
- raid_bio->bi_end_io(raid_bio, bytes,
+ raid_bio->bi_end_io(raid_bio,
test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
? 0 : -EIO);
}
OpenPOWER on IntegriCloud