summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c34
-rw-r--r--drivers/md/md.c636
-rw-r--r--drivers/md/multipath.c31
-rw-r--r--drivers/md/raid1.c216
-rw-r--r--drivers/md/raid10.c63
-rw-r--r--drivers/md/raid5.c261
-rw-r--r--drivers/md/raid6main.c40
7 files changed, 952 insertions, 329 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 01654fc..5131530 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -21,7 +21,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -272,7 +271,8 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
return ERR_PTR(-ENOMEM);
ITERATE_RDEV(mddev, rdev, tmp) {
- if (! rdev->in_sync || rdev->faulty)
+ if (! test_bit(In_sync, &rdev->flags)
+ || test_bit(Faulty, &rdev->flags))
continue;
target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512);
@@ -292,7 +292,8 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai
struct list_head *tmp;
ITERATE_RDEV(mddev, rdev, tmp)
- if (rdev->in_sync && !rdev->faulty)
+ if (test_bit(In_sync, &rdev->flags)
+ && !test_bit(Faulty, &rdev->flags))
md_super_write(mddev, rdev,
(rdev->sb_offset<<1) + offset
+ page->index * (PAGE_SIZE/512),
@@ -300,7 +301,7 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai
page);
if (wait)
- wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ md_super_wait(mddev);
return 0;
}
@@ -481,7 +482,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
reason = "bad magic";
- else if (sb->version != cpu_to_le32(BITMAP_MAJOR))
+ else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
+ le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
reason = "unrecognized superblock version";
else if (chunksize < 512 || chunksize > (1024 * 1024 * 4))
reason = "bitmap chunksize out of range (512B - 4MB)";
@@ -526,6 +528,8 @@ success:
bitmap->daemon_lastrun = jiffies;
bitmap->max_write_behind = write_behind;
bitmap->flags |= sb->state;
+ if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
+ bitmap->flags |= BITMAP_HOSTENDIAN;
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
if (sb->state & BITMAP_STALE)
bitmap->events_cleared = bitmap->mddev->events;
@@ -763,7 +767,10 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
- set_bit(bit, kaddr);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ set_bit(bit, kaddr);
+ else
+ ext2_set_bit(bit, kaddr);
kunmap_atomic(kaddr, KM_USER0);
PRINTK("set file bit %lu page %lu\n", bit, page->index);
@@ -821,8 +828,7 @@ int bitmap_unplug(struct bitmap *bitmap)
wake_up_process(bitmap->writeback_daemon->tsk));
spin_unlock_irq(&bitmap->write_lock);
} else
- wait_event(bitmap->mddev->sb_wait,
- atomic_read(&bitmap->mddev->pending_writes)==0);
+ md_super_wait(bitmap->mddev);
}
return 0;
}
@@ -890,6 +896,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
oldindex = ~0L;
for (i = 0; i < chunks; i++) {
+ int b;
index = file_page_index(i);
bit = file_page_offset(i);
if (index != oldindex) { /* this is a new page, read it in */
@@ -938,7 +945,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bitmap->filemap[bitmap->file_pages++] = page;
}
- if (test_bit(bit, page_address(page))) {
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ b = test_bit(bit, page_address(page));
+ else
+ b = ext2_test_bit(bit, page_address(page));
+ if (b) {
/* if the disk bit is set, set the memory bit */
bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap),
((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start)
@@ -1096,7 +1107,10 @@ int bitmap_daemon_work(struct bitmap *bitmap)
-1);
/* clear the bit */
- clear_bit(file_page_offset(j), page_address(page));
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ clear_bit(file_page_offset(j), page_address(page));
+ else
+ ext2_clear_bit(file_page_offset(j), page_address(page));
}
}
spin_unlock_irqrestore(&bitmap->lock, flags);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9ecf51e..adf960d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -131,6 +131,8 @@ static ctl_table raid_root_table[] = {
static struct block_device_operations md_fops;
+static int start_readonly;
+
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
@@ -181,7 +183,7 @@ static void mddev_put(mddev_t *mddev)
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
blk_put_queue(mddev->queue);
- kfree(mddev);
+ kobject_unregister(&mddev->kobj);
}
spin_unlock(&all_mddevs_lock);
}
@@ -330,18 +332,46 @@ static void free_disk_sb(mdk_rdev_t * rdev)
static int super_written(struct bio *bio, unsigned int bytes_done, int error)
{
mdk_rdev_t *rdev = bio->bi_private;
+ mddev_t *mddev = rdev->mddev;
if (bio->bi_size)
return 1;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
- md_error(rdev->mddev, rdev);
+ md_error(mddev, rdev);
- if (atomic_dec_and_test(&rdev->mddev->pending_writes))
- wake_up(&rdev->mddev->sb_wait);
+ if (atomic_dec_and_test(&mddev->pending_writes))
+ wake_up(&mddev->sb_wait);
bio_put(bio);
return 0;
}
+static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+{
+ struct bio *bio2 = bio->bi_private;
+ mdk_rdev_t *rdev = bio2->bi_private;
+ mddev_t *mddev = rdev->mddev;
+ if (bio->bi_size)
+ return 1;
+
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ error == -EOPNOTSUPP) {
+ unsigned long flags;
+ /* barriers don't appear to be supported :-( */
+ set_bit(BarriersNotsupp, &rdev->flags);
+ mddev->barriers_work = 0;
+ spin_lock_irqsave(&mddev->write_lock, flags);
+ bio2->bi_next = mddev->biolist;
+ mddev->biolist = bio2;
+ spin_unlock_irqrestore(&mddev->write_lock, flags);
+ wake_up(&mddev->sb_wait);
+ bio_put(bio);
+ return 0;
+ }
+ bio_put(bio2);
+ bio->bi_private = rdev;
+ return super_written(bio, bytes_done, error);
+}
+
void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page)
{
@@ -350,16 +380,54 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
+ *
+ * As we might need to resubmit the request if BIO_RW_BARRIER
+ * causes ENOTSUPP, we allocate a spare bio...
*/
struct bio *bio = bio_alloc(GFP_NOIO, 1);
+ int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
+ bio->bi_rw = rw;
+
atomic_inc(&mddev->pending_writes);
- submit_bio((1<<BIO_RW)|(1<<BIO_RW_SYNC), bio);
+ if (!test_bit(BarriersNotsupp, &rdev->flags)) {
+ struct bio *rbio;
+ rw |= (1<<BIO_RW_BARRIER);
+ rbio = bio_clone(bio, GFP_NOIO);
+ rbio->bi_private = bio;
+ rbio->bi_end_io = super_written_barrier;
+ submit_bio(rw, rbio);
+ } else
+ submit_bio(rw, bio);
+}
+
+void md_super_wait(mddev_t *mddev)
+{
+ /* wait for all superblock writes that were scheduled to complete.
+ * if any had to be retried (due to BARRIER problems), retry them
+ */
+ DEFINE_WAIT(wq);
+ for(;;) {
+ prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&mddev->pending_writes)==0)
+ break;
+ while (mddev->biolist) {
+ struct bio *bio;
+ spin_lock_irq(&mddev->write_lock);
+ bio = mddev->biolist;
+ mddev->biolist = bio->bi_next ;
+ bio->bi_next = NULL;
+ spin_unlock_irq(&mddev->write_lock);
+ submit_bio(bio->bi_rw, bio);
+ }
+ schedule();
+ }
+ finish_wait(&mddev->sb_wait, &wq);
}
static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
@@ -610,7 +678,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
rdev->raid_disk = -1;
- rdev->in_sync = 0;
+ rdev->flags = 0;
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
@@ -671,21 +739,19 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
return 0;
if (mddev->level != LEVEL_MULTIPATH) {
- rdev->faulty = 0;
- rdev->flags = 0;
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
- rdev->faulty = 1;
+ set_bit(Faulty, &rdev->flags);
else if (desc->state & (1<<MD_DISK_SYNC) &&
desc->raid_disk < mddev->raid_disks) {
- rdev->in_sync = 1;
+ set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
- rdev->in_sync = 1;
+ set_bit(In_sync, &rdev->flags);
return 0;
}
@@ -699,6 +765,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
mdk_rdev_t *rdev2;
int next_spare = mddev->raid_disks;
+
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
@@ -758,23 +825,27 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->disks[0].state = (1<<MD_DISK_REMOVED);
ITERATE_RDEV(mddev,rdev2,tmp) {
mdp_disk_t *d;
- if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
- rdev2->desc_nr = rdev2->raid_disk;
+ int desc_nr;
+ if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
+ && !test_bit(Faulty, &rdev2->flags))
+ desc_nr = rdev2->raid_disk;
else
- rdev2->desc_nr = next_spare++;
+ desc_nr = next_spare++;
+ rdev2->desc_nr = desc_nr;
d = &sb->disks[rdev2->desc_nr];
nr_disks++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
- if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
+ if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
+ && !test_bit(Faulty, &rdev2->flags))
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
- if (rdev2->faulty) {
+ if (test_bit(Faulty, &rdev2->flags)) {
d->state = (1<<MD_DISK_FAULTY);
failed++;
- } else if (rdev2->in_sync) {
+ } else if (test_bit(In_sync, &rdev2->flags)) {
d->state = (1<<MD_DISK_ACTIVE);
d->state |= (1<<MD_DISK_SYNC);
active++;
@@ -787,7 +858,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
}
-
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
mdp_disk_t *d = &sb->disks[i];
@@ -944,7 +1014,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
rdev->raid_disk = -1;
- rdev->in_sync = 0;
+ rdev->flags = 0;
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
@@ -996,22 +1066,19 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
case 0xffff: /* spare */
- rdev->faulty = 0;
break;
case 0xfffe: /* faulty */
- rdev->faulty = 1;
+ set_bit(Faulty, &rdev->flags);
break;
default:
- rdev->in_sync = 1;
- rdev->faulty = 0;
+ set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
- rdev->flags = 0;
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
- rdev->in_sync = 1;
+ set_bit(In_sync, &rdev->flags);
return 0;
}
@@ -1055,9 +1122,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
ITERATE_RDEV(mddev,rdev2,tmp) {
i = rdev2->desc_nr;
- if (rdev2->faulty)
+ if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
- else if (rdev2->in_sync)
+ else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -1115,6 +1182,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
{
mdk_rdev_t *same_pdev;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ struct kobject *ko;
if (rdev->mddev) {
MD_BUG();
@@ -1143,10 +1211,22 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
if (find_rdev_nr(mddev, rdev->desc_nr))
return -EBUSY;
}
+ bdevname(rdev->bdev,b);
+ if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
+ return -ENOMEM;
list_add(&rdev->same_set, &mddev->disks);
rdev->mddev = mddev;
- printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
+ printk(KERN_INFO "md: bind<%s>\n", b);
+
+ rdev->kobj.parent = &mddev->kobj;
+ kobject_add(&rdev->kobj);
+
+ if (rdev->bdev->bd_part)
+ ko = &rdev->bdev->bd_part->kobj;
+ else
+ ko = &rdev->bdev->bd_disk->kobj;
+ sysfs_create_link(&rdev->kobj, ko, "block");
return 0;
}
@@ -1160,6 +1240,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
list_del_init(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
+ sysfs_remove_link(&rdev->kobj, "block");
+ kobject_del(&rdev->kobj);
}
/*
@@ -1215,7 +1297,7 @@ static void export_rdev(mdk_rdev_t * rdev)
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
unlock_rdev(rdev);
- kfree(rdev);
+ kobject_put(&rdev->kobj);
}
static void kick_rdev_from_array(mdk_rdev_t * rdev)
@@ -1287,7 +1369,8 @@ static void print_rdev(mdk_rdev_t *rdev)
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
- rdev->faulty, rdev->in_sync, rdev->desc_nr);
+ test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
+ rdev->desc_nr);
if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock:\n");
print_sb((mdp_super_t*)page_address(rdev->sb_page));
@@ -1344,7 +1427,7 @@ static void md_update_sb(mddev_t * mddev)
int sync_req;
repeat:
- spin_lock(&mddev->write_lock);
+ spin_lock_irq(&mddev->write_lock);
sync_req = mddev->in_sync;
mddev->utime = get_seconds();
mddev->events ++;
@@ -1367,11 +1450,11 @@ repeat:
*/
if (!mddev->persistent) {
mddev->sb_dirty = 0;
- spin_unlock(&mddev->write_lock);
+ spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
return;
}
- spin_unlock(&mddev->write_lock);
+ spin_unlock_irq(&mddev->write_lock);
dprintk(KERN_INFO
"md: updating %s RAID superblock on device (in sync %d)\n",
@@ -1381,11 +1464,11 @@ repeat:
ITERATE_RDEV(mddev,rdev,tmp) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
- if (rdev->faulty)
+ if (test_bit(Faulty, &rdev->flags))
dprintk("(skipping faulty ");
dprintk("%s ", bdevname(rdev->bdev,b));
- if (!rdev->faulty) {
+ if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_offset<<1, rdev->sb_size,
rdev->sb_page);
@@ -1399,21 +1482,106 @@ repeat:
/* only need to write one superblock... */
break;
}
- wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ md_super_wait(mddev);
/* if there was a failure, sb_dirty was set to 1, and we re-write super */
- spin_lock(&mddev->write_lock);
+ spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
/* have to write it out again */
- spin_unlock(&mddev->write_lock);
+ spin_unlock_irq(&mddev->write_lock);
goto repeat;
}
mddev->sb_dirty = 0;
- spin_unlock(&mddev->write_lock);
+ spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
}
+struct rdev_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(mdk_rdev_t *, char *);
+ ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
+};
+
+static ssize_t
+state_show(mdk_rdev_t *rdev, char *page)
+{
+ char *sep = "";
+ int len=0;
+
+ if (test_bit(Faulty, &rdev->flags)) {
+ len+= sprintf(page+len, "%sfaulty",sep);
+ sep = ",";
+ }
+ if (test_bit(In_sync, &rdev->flags)) {
+ len += sprintf(page+len, "%sin_sync",sep);
+ sep = ",";
+ }
+ if (!test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags)) {
+ len += sprintf(page+len, "%sspare", sep);
+ sep = ",";
+ }
+ return len+sprintf(page+len, "\n");
+}
+
+static struct rdev_sysfs_entry
+rdev_state = __ATTR_RO(state);
+
+static ssize_t
+super_show(mdk_rdev_t *rdev, char *page)
+{
+ if (rdev->sb_loaded && rdev->sb_size) {
+ memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
+ return rdev->sb_size;
+ } else
+ return 0;
+}
+static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
+
+static struct attribute *rdev_default_attrs[] = {
+ &rdev_state.attr,
+ &rdev_super.attr,
+ NULL,
+};
+static ssize_t
+rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
+ mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+
+ if (!entry->show)
+ return -EIO;
+ return entry->show(rdev, page);
+}
+
+static ssize_t
+rdev_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
+ mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+
+ if (!entry->store)
+ return -EIO;
+ return entry->store(rdev, page, length);
+}
+
+static void rdev_free(struct kobject *ko)
+{
+ mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
+ kfree(rdev);
+}
+static struct sysfs_ops rdev_sysfs_ops = {
+ .show = rdev_attr_show,
+ .store = rdev_attr_store,
+};
+static struct kobj_type rdev_ktype = {
+ .release = rdev_free,
+ .sysfs_ops = &rdev_sysfs_ops,
+ .default_attrs = rdev_default_attrs,
+};
+
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
@@ -1445,11 +1613,15 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
if (err)
goto abort_free;
+ rdev->kobj.parent = NULL;
+ rdev->kobj.ktype = &rdev_ktype;
+ kobject_init(&rdev->kobj);
+
rdev->desc_nr = -1;
- rdev->faulty = 0;
- rdev->in_sync = 0;
+ rdev->flags = 0;
rdev->data_offset = 0;
atomic_set(&rdev->nr_pending, 0);
+ atomic_set(&rdev->read_errors, 0);
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) {
@@ -1537,7 +1709,7 @@ static void analyze_sbs(mddev_t * mddev)
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
- rdev->in_sync = 1;
+ set_bit(In_sync, &rdev->flags);
}
}
@@ -1551,6 +1723,162 @@ static void analyze_sbs(mddev_t * mddev)
}
+static ssize_t
+level_show(mddev_t *mddev, char *page)
+{
+ mdk_personality_t *p = mddev->pers;
+ if (p == NULL && mddev->raid_disks == 0)
+ return 0;
+ if (mddev->level >= 0)
+ return sprintf(page, "RAID-%d\n", mddev->level);
+ else
+ return sprintf(page, "%s\n", p->name);
+}
+
+static struct md_sysfs_entry md_level = __ATTR_RO(level);
+
+static ssize_t
+raid_disks_show(mddev_t *mddev, char *page)
+{
+ if (mddev->raid_disks == 0)
+ return 0;
+ return sprintf(page, "%d\n", mddev->raid_disks);
+}
+
+static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks);
+
+static ssize_t
+action_show(mddev_t *mddev, char *page)
+{
+ char *type = "idle";
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ type = "resync";
+ else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ type = "check";
+ else
+ type = "repair";
+ } else
+ type = "recover";
+ }
+ return sprintf(page, "%s\n", type);
+}
+
+static ssize_t
+action_store(mddev_t *mddev, const char *page, size_t len)
+{
+ if (!mddev->pers || !mddev->pers->sync_request)
+ return -EINVAL;
+
+ if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) {
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(mddev->sync_thread);
+ mddev->sync_thread = NULL;
+ mddev->recovery = 0;
+ }
+ return len;
+ }
+
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return -EBUSY;
+ if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 ||
+ strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ else {
+ if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
+ set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
+ return -EINVAL;
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ md_wakeup_thread(mddev->thread);
+ return len;
+}
+
+static ssize_t
+mismatch_cnt_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long) mddev->resync_mismatches);
+}
+
+static struct md_sysfs_entry
+md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+
+
+static struct md_sysfs_entry
+md_mismatches = __ATTR_RO(mismatch_cnt);
+
+static struct attribute *md_default_attrs[] = {
+ &md_level.attr,
+ &md_raid_disks.attr,
+ NULL,
+};
+
+static struct attribute *md_redundancy_attrs[] = {
+ &md_scan_mode.attr,
+ &md_mismatches.attr,
+ NULL,
+};
+static struct attribute_group md_redundancy_group = {
+ .name = NULL,
+ .attrs = md_redundancy_attrs,
+};
+
+
+static ssize_t
+md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
+ mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+ ssize_t rv;
+
+ if (!entry->show)
+ return -EIO;
+ mddev_lock(mddev);
+ rv = entry->show(mddev, page);
+ mddev_unlock(mddev);
+ return rv;
+}
+
+static ssize_t
+md_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
+ mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+ ssize_t rv;
+
+ if (!entry->store)
+ return -EIO;
+ mddev_lock(mddev);
+ rv = entry->store(mddev, page, length);
+ mddev_unlock(mddev);
+ return rv;
+}
+
+static void md_free(struct kobject *ko)
+{
+ mddev_t *mddev = container_of(ko, mddev_t, kobj);
+ kfree(mddev);
+}
+
+static struct sysfs_ops md_sysfs_ops = {
+ .show = md_attr_show,
+ .store = md_attr_store,
+};
+static struct kobj_type md_ktype = {
+ .release = md_free,
+ .sysfs_ops = &md_sysfs_ops,
+ .default_attrs = md_default_attrs,
+};
+
int mdp_major = 0;
static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -1592,6 +1920,11 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
add_disk(disk);
mddev->gendisk = disk;
up(&disks_sem);
+ mddev->kobj.parent = &disk->kobj;
+ mddev->kobj.k_name = NULL;
+ snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
+ mddev->kobj.ktype = &md_ktype;
+ kobject_register(&mddev->kobj);
return NULL;
}
@@ -1663,7 +1996,7 @@ static int do_md_run(mddev_t * mddev)
/* devices must have minimum size of one chunk */
ITERATE_RDEV(mddev,rdev,tmp) {
- if (rdev->faulty)
+ if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->size < chunk_size / 1024) {
printk(KERN_WARNING
@@ -1691,7 +2024,7 @@ static int do_md_run(mddev_t * mddev)
* Also find largest hardsector size
*/
ITERATE_RDEV(mddev,rdev,tmp) {
- if (rdev->faulty)
+ if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev, 0);
@@ -1715,6 +2048,10 @@ static int do_md_run(mddev_t * mddev)
mddev->recovery = 0;
mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
+ mddev->barriers_work = 1;
+
+ if (start_readonly)
+ mddev->ro = 2; /* read-only, but switch on first write */
/* before we start the array running, initialise the bitmap */
err = bitmap_create(mddev);
@@ -1730,12 +2067,24 @@ static int do_md_run(mddev_t * mddev)
bitmap_destroy(mddev);
return err;
}
+ if (mddev->pers->sync_request)
+ sysfs_create_group(&mddev->kobj, &md_redundancy_group);
+ else if (mddev->ro == 2) /* auto-readonly not meaningful */
+ mddev->ro = 0;
+
atomic_set(&mddev->writes_pending,0);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
mddev->in_sync = 1;
+
+ ITERATE_RDEV(mddev,rdev,tmp)
+ if (rdev->raid_disk >= 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ }
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -1821,16 +2170,19 @@ static int do_md_stop(mddev_t * mddev, int ro)
if (ro) {
err = -ENXIO;
- if (mddev->ro)
+ if (mddev->ro==1)
goto out;
mddev->ro = 1;
} else {
bitmap_flush(mddev);
- wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ md_super_wait(mddev);
if (mddev->ro)
set_disk_ro(disk, 0);
blk_queue_make_request(mddev->queue, md_fail_request);
mddev->pers->stop(mddev);
+ if (mddev->pers->sync_request)
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+
module_put(mddev->pers->owner);
mddev->pers = NULL;
if (mddev->ro)
@@ -1857,9 +2209,18 @@ static int do_md_stop(mddev_t * mddev, int ro)
* Free resources if final stop
*/
if (!ro) {
+ mdk_rdev_t *rdev;
+ struct list_head *tmp;
struct gendisk *disk;
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+ ITERATE_RDEV(mddev,rdev,tmp)
+ if (rdev->raid_disk >= 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
+
export_array(mddev);
mddev->array_size = 0;
@@ -2012,7 +2373,7 @@ static int autostart_array(dev_t startdev)
return err;
}
- if (start_rdev->faulty) {
+ if (test_bit(Faulty, &start_rdev->flags)) {
printk(KERN_WARNING
"md: can not autostart based on faulty %s!\n",
bdevname(start_rdev->bdev,b));
@@ -2071,11 +2432,11 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
nr=working=active=failed=spare=0;
ITERATE_RDEV(mddev,rdev,tmp) {
nr++;
- if (rdev->faulty)
+ if (test_bit(Faulty, &rdev->flags))
failed++;
else {
working++;
- if (rdev->in_sync)
+ if (test_bit(In_sync, &rdev->flags))
active++;
else
spare++;
@@ -2166,9 +2527,9 @@ static int get_disk_info(mddev_t * mddev, void __user * arg)
info.minor = MINOR(rdev->bdev->bd_dev);
info.raid_disk = rdev->raid_disk;
info.state = 0;
- if (rdev->faulty)
+ if (test_bit(Faulty, &rdev->flags))
info.state |= (1<<MD_DISK_FAULTY);
- else if (rdev->in_sync) {
+ else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
@@ -2261,7 +2622,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
validate_super(mddev, rdev);
rdev->saved_raid_disk = rdev->raid_disk;
- rdev->in_sync = 0; /* just to be sure */
+ clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
@@ -2299,11 +2660,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
else
rdev->raid_disk = -1;
- rdev->faulty = 0;
+ rdev->flags = 0;
+
if (rdev->raid_disk < mddev->raid_disks)
- rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
- else
- rdev->in_sync = 0;
+ if (info->state & (1<<MD_DISK_SYNC))
+ set_bit(In_sync, &rdev->flags);
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
@@ -2402,14 +2763,14 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
goto abort_export;
}
- if (rdev->faulty) {
+ if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
}
- rdev->in_sync = 0;
+ clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
bind_rdev_to_array(rdev, mddev);
@@ -2929,12 +3290,22 @@ static int md_ioctl(struct inode *inode, struct file *file,
/*
* The remaining ioctls are changing the state of the
- * superblock, so we do not allow read-only arrays
- * here:
+ * superblock, so we do not allow them on read-only arrays.
+ * However non-MD ioctls (e.g. get-size) will still come through
+ * here and hit the 'default' below, so only disallow
+ * 'md' ioctls, and switch to rw mode if started auto-readonly.
*/
- if (mddev->ro) {
- err = -EROFS;
- goto abort_unlock;
+ if (_IOC_TYPE(cmd) == MD_MAJOR &&
+ mddev->ro && mddev->pers) {
+ if (mddev->ro == 2) {
+ mddev->ro = 0;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+
+ } else {
+ err = -EROFS;
+ goto abort_unlock;
+ }
}
switch (cmd)
@@ -3064,21 +3435,17 @@ static int md_thread(void * arg)
*/
allow_signal(SIGKILL);
- complete(thread->event);
while (!kthread_should_stop()) {
- void (*run)(mddev_t *);
- wait_event_interruptible_timeout(thread->wqueue,
- test_bit(THREAD_WAKEUP, &thread->flags)
- || kthread_should_stop(),
- thread->timeout);
+ wait_event_timeout(thread->wqueue,
+ test_bit(THREAD_WAKEUP, &thread->flags)
+ || kthread_should_stop(),
+ thread->timeout);
try_to_freeze();
clear_bit(THREAD_WAKEUP, &thread->flags);
- run = thread->run;
- if (run)
- run(thread->mddev);
+ thread->run(thread->mddev);
}
return 0;
@@ -3097,7 +3464,6 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
const char *name)
{
mdk_thread_t *thread;
- struct completion event;
thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
if (!thread)
@@ -3106,18 +3472,14 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
memset(thread, 0, sizeof(mdk_thread_t));
init_waitqueue_head(&thread->wqueue);
- init_completion(&event);
- thread->event = &event;
thread->run = run;
thread->mddev = mddev;
- thread->name = name;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
}
- wait_for_completion(&event);
return thread;
}
@@ -3136,7 +3498,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
return;
}
- if (!rdev || rdev->faulty)
+ if (!rdev || test_bit(Faulty, &rdev->flags))
return;
/*
dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
@@ -3322,8 +3684,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
if (mddev->pers) {
- if (mddev->ro)
+ if (mddev->ro==1)
seq_printf(seq, " (read-only)");
+ if (mddev->ro==2)
+ seq_printf(seq, "(auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
@@ -3334,7 +3698,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
- if (rdev->faulty) {
+ if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
} else if (rdev->raid_disk < 0)
@@ -3363,11 +3727,15 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (mddev->pers) {
mddev->pers->status (seq, mddev);
seq_printf(seq, "\n ");
- if (mddev->curr_resync > 2) {
- status_resync (seq, mddev);
- seq_printf(seq, "\n ");
- } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
- seq_printf(seq, " resync=DELAYED\n ");
+ if (mddev->pers->sync_request) {
+ if (mddev->curr_resync > 2) {
+ status_resync (seq, mddev);
+ seq_printf(seq, "\n ");
+ } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
+ seq_printf(seq, "\tresync=DELAYED\n ");
+ else if (mddev->recovery_cp < MaxSector)
+ seq_printf(seq, "\tresync=PENDING\n ");
+ }
} else
seq_printf(seq, "\n ");
@@ -3504,15 +3872,22 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
if (bio_data_dir(bi) != WRITE)
return;
+ BUG_ON(mddev->ro == 1);
+ if (mddev->ro == 2) {
+ /* need to switch to read/write */
+ mddev->ro = 0;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
atomic_inc(&mddev->writes_pending);
if (mddev->in_sync) {
- spin_lock(&mddev->write_lock);
+ spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
mddev->sb_dirty = 1;
md_wakeup_thread(mddev->thread);
}
- spin_unlock(&mddev->write_lock);
+ spin_unlock_irq(&mddev->write_lock);
}
wait_event(mddev->sb_wait, mddev->sb_dirty==0);
}
@@ -3568,9 +3943,7 @@ static void md_do_sync(mddev_t *mddev)
mddev->curr_resync = 2;
try_again:
- if (signal_pending(current) ||
- kthread_should_stop()) {
- flush_signals(current);
+ if (kthread_should_stop()) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto skip;
}
@@ -3590,9 +3963,8 @@ static void md_do_sync(mddev_t *mddev)
* time 'round when curr_resync == 2
*/
continue;
- prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
- if (!signal_pending(current) &&
- !kthread_should_stop() &&
+ prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+ if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying resync of %s"
" until %s has finished resync (they"
@@ -3608,12 +3980,13 @@ static void md_do_sync(mddev_t *mddev)
}
} while (mddev->curr_resync < 2);
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
- else
+ mddev->resync_mismatches = 0;
+ } else
/* recovery follows the physical size of devices */
max_sectors = mddev->size << 1;
@@ -3626,7 +3999,8 @@ static void md_do_sync(mddev_t *mddev)
is_mddev_idle(mddev); /* this also initializes IO event counters */
/* we don't use the checkpoint if there's a bitmap */
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap)
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
+ && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
j = mddev->recovery_cp;
else
j = 0;
@@ -3699,13 +4073,12 @@ static void md_do_sync(mddev_t *mddev)
}
- if (signal_pending(current) || kthread_should_stop()) {
+ if (kthread_should_stop()) {
/*
* got a signal, exit.
*/
printk(KERN_INFO
"md: md_do_sync() got signal ... exiting\n");
- flush_signals(current);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out;
}
@@ -3727,7 +4100,7 @@ static void md_do_sync(mddev_t *mddev)
if (currspeed > sysctl_speed_limit_min) {
if ((currspeed > sysctl_speed_limit_max) ||
!is_mddev_idle(mddev)) {
- msleep_interruptible(250);
+ msleep(250);
goto repeat;
}
}
@@ -3820,7 +4193,7 @@ void md_check_recovery(mddev_t *mddev)
if (mddev_trylock(mddev)==0) {
int spares =0;
- spin_lock(&mddev->write_lock);
+ spin_lock_irq(&mddev->write_lock);
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
!mddev->in_sync && mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
@@ -3828,7 +4201,7 @@ void md_check_recovery(mddev_t *mddev)
}
if (mddev->safemode == 1)
mddev->safemode = 0;
- spin_unlock(&mddev->write_lock);
+ spin_unlock_irq(&mddev->write_lock);
if (mddev->sb_dirty)
md_update_sb(mddev);
@@ -3864,9 +4237,13 @@ void md_check_recovery(mddev_t *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
- if (mddev->recovery)
- /* probably just the RECOVERY_NEEDED flag */
- mddev->recovery = 0;
+ /* Clear some bits that don't mean anything, but
+ * might be left set
+ */
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
+ clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
/* no recovery is running.
* remove any failed drives, then
@@ -3876,31 +4253,41 @@ void md_check_recovery(mddev_t *mddev)
*/
ITERATE_RDEV(mddev,rdev,rtmp)
if (rdev->raid_disk >= 0 &&
- (rdev->faulty || ! rdev->in_sync) &&
+ (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
- if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0)
+ if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
+ char nm[20];
+ sprintf(nm,"rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
rdev->raid_disk = -1;
+ }
}
if (mddev->degraded) {
ITERATE_RDEV(mddev,rdev,rtmp)
if (rdev->raid_disk < 0
- && !rdev->faulty) {
- if (mddev->pers->hot_add_disk(mddev,rdev))
+ && !test_bit(Faulty, &rdev->flags)) {
+ if (mddev->pers->hot_add_disk(mddev,rdev)) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
spares++;
- else
+ } else
break;
}
}
- if (!spares && (mddev->recovery_cp == MaxSector )) {
- /* nothing we can do ... */
+ if (spares) {
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ } else if (mddev->recovery_cp < MaxSector) {
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ /* nothing to be done ... */
goto unlock;
- }
+
if (mddev->pers->sync_request) {
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- if (!spares)
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
if (spares && mddev->bitmap && ! mddev->bitmap->file) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
@@ -3975,7 +4362,7 @@ static int __init md_init(void)
" MD_SB_DISKS=%d\n",
MD_MAJOR_VERSION, MD_MINOR_VERSION,
MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
- printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR,
+ printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
BITMAP_MINOR);
if (register_blkdev(MAJOR_NR, "md"))
@@ -4039,7 +4426,7 @@ static void autostart_arrays(int part)
if (IS_ERR(rdev))
continue;
- if (rdev->faulty) {
+ if (test_bit(Faulty, &rdev->flags)) {
MD_BUG();
continue;
}
@@ -4086,6 +4473,23 @@ static __exit void md_exit(void)
module_init(md_init)
module_exit(md_exit)
+static int get_ro(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, "%d", start_readonly);
+}
+static int set_ro(const char *val, struct kernel_param *kp)
+{
+ char *e;
+ int num = simple_strtoul(val, &e, 10);
+ if (*val && (*e == '\0' || *e == '\n')) {
+ start_readonly = num;
+ return 0;;
+ }
+ return -EINVAL;
+}
+
+module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
+
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
EXPORT_SYMBOL(md_error);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index c06f447..145cdc5 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -63,8 +63,8 @@ static int multipath_map (multipath_conf_t *conf)
rcu_read_lock();
for (i = 0; i < disks; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
- if (rdev && rdev->in_sync) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
return i;
@@ -139,8 +139,9 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
- if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)
+ && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
@@ -211,7 +212,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->multipaths[i].rdev &&
- conf->multipaths[i].rdev->in_sync ? "U" : "_");
+ test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
}
@@ -224,8 +225,8 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
- if (rdev && !rdev->faulty) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
@@ -265,10 +266,10 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
/*
* Mark disk as unusable
*/
- if (!rdev->faulty) {
+ if (!test_bit(Faulty, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- rdev->in_sync = 0;
- rdev->faulty = 1;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Faulty, &rdev->flags);
mddev->sb_dirty = 1;
conf->working_disks--;
printk(KERN_ALERT "multipath: IO failure on %s,"
@@ -298,7 +299,7 @@ static void print_multipath_conf (multipath_conf_t *conf)
tmp = conf->multipaths + i;
if (tmp->rdev)
printk(" disk%d, o:%d, dev:%s\n",
- i,!tmp->rdev->faulty,
+ i,!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
@@ -330,8 +331,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
conf->working_disks++;
rdev->raid_disk = path;
- rdev->in_sync = 1;
- p->rdev = rdev;
+ set_bit(In_sync, &rdev->flags);
+ rcu_assign_pointer(p->rdev, rdev);
found = 1;
}
@@ -350,7 +351,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
rdev = p->rdev;
if (rdev) {
- if (rdev->in_sync ||
+ if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number);
err = -EBUSY;
@@ -482,7 +483,7 @@ static int multipath_run (mddev_t *mddev)
mddev->queue->max_sectors > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
- if (!rdev->faulty)
+ if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e16f473..2da9d3b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -301,7 +301,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
- int mirror, behind;
+ int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = mddev_to_conf(r1_bio->mddev);
if (bio->bi_size)
@@ -311,47 +311,54 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (r1_bio->bios[mirror] == bio)
break;
- /*
- * this branch is our 'one mirror IO has finished' event handler:
- */
- if (!uptodate) {
- md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
- /* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- } else
+ if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
+ set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
+ set_bit(R1BIO_BarrierRetry, &r1_bio->state);
+ r1_bio->mddev->barriers_work = 0;
+ } else {
/*
- * Set R1BIO_Uptodate in our master bio, so that
- * we will return a good error code for to the higher
- * levels even if IO on some other mirrored buffer fails.
- *
- * The 'master' represents the composite IO operation to
- * user-side. So if something waits for IO, then it will
- * wait for the 'master' bio.
+ * this branch is our 'one mirror IO has finished' event handler:
*/
- set_bit(R1BIO_Uptodate, &r1_bio->state);
-
- update_head_pos(mirror, r1_bio);
-
- behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
- if (behind) {
- if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
- atomic_dec(&r1_bio->behind_remaining);
-
- /* In behind mode, we ACK the master bio once the I/O has safely
- * reached all non-writemostly disks. Setting the Returned bit
- * ensures that this gets done only once -- we don't ever want to
- * return -EIO here, instead we'll wait */
-
- if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
- test_bit(R1BIO_Uptodate, &r1_bio->state)) {
- /* Maybe we can return now */
- if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
- struct bio *mbio = r1_bio->master_bio;
- PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
- (unsigned long long) mbio->bi_sector,
- (unsigned long long) mbio->bi_sector +
- (mbio->bi_size >> 9) - 1);
- bio_endio(mbio, mbio->bi_size, 0);
+ r1_bio->bios[mirror] = NULL;
+ bio_put(bio);
+ if (!uptodate) {
+ md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
+ /* an I/O failed, we can't clear the bitmap */
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ } else
+ /*
+ * Set R1BIO_Uptodate in our master bio, so that
+ * we will return a good error code for to the higher
+ * levels even if IO on some other mirrored buffer fails.
+ *
+ * The 'master' represents the composite IO operation to
+ * user-side. So if something waits for IO, then it will
+ * wait for the 'master' bio.
+ */
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+
+ update_head_pos(mirror, r1_bio);
+
+ if (behind) {
+ if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
+ atomic_dec(&r1_bio->behind_remaining);
+
+ /* In behind mode, we ACK the master bio once the I/O has safely
+ * reached all non-writemostly disks. Setting the Returned bit
+ * ensures that this gets done only once -- we don't ever want to
+ * return -EIO here, instead we'll wait */
+
+ if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
+ test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ /* Maybe we can return now */
+ if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
+ struct bio *mbio = r1_bio->master_bio;
+ PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
+ (unsigned long long) mbio->bi_sector,
+ (unsigned long long) mbio->bi_sector +
+ (mbio->bi_size >> 9) - 1);
+ bio_endio(mbio, mbio->bi_size, 0);
+ }
}
}
}
@@ -361,8 +368,16 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
* already.
*/
if (atomic_dec_and_test(&r1_bio->remaining)) {
+ if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
+ reschedule_retry(r1_bio);
+ /* Don't dec_pending yet, we want to hold
+ * the reference over the retry
+ */
+ return 0;
+ }
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
/* free extra copy of the data pages */
+/* FIXME bio has been freed!!! */
int i = bio->bi_vcnt;
while (i--)
__free_page(bio->bi_io_vec[i].bv_page);
@@ -416,12 +431,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* Choose the first operation device, for consistancy */
new_disk = 0;
- for (rdev = conf->mirrors[new_disk].rdev;
- !rdev || !rdev->in_sync
+ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+ !rdev || !test_bit(In_sync, &rdev->flags)
|| test_bit(WriteMostly, &rdev->flags);
- rdev = conf->mirrors[++new_disk].rdev) {
+ rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
- if (rdev && rdev->in_sync)
+ if (rdev && test_bit(In_sync, &rdev->flags))
wonly_disk = new_disk;
if (new_disk == conf->raid_disks - 1) {
@@ -434,12 +449,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* make sure the disk is operational */
- for (rdev = conf->mirrors[new_disk].rdev;
- !rdev || !rdev->in_sync ||
+ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+ !rdev || !test_bit(In_sync, &rdev->flags) ||
test_bit(WriteMostly, &rdev->flags);
- rdev = conf->mirrors[new_disk].rdev) {
+ rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
- if (rdev && rdev->in_sync)
+ if (rdev && test_bit(In_sync, &rdev->flags))
wonly_disk = new_disk;
if (new_disk <= 0)
@@ -474,10 +489,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
disk = conf->raid_disks;
disk--;
- rdev = conf->mirrors[disk].rdev;
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (!rdev ||
- !rdev->in_sync ||
+ !test_bit(In_sync, &rdev->flags) ||
test_bit(WriteMostly, &rdev->flags))
continue;
@@ -496,11 +511,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
if (new_disk >= 0) {
- rdev = conf->mirrors[new_disk].rdev;
+ rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
if (!rdev)
goto retry;
atomic_inc(&rdev->nr_pending);
- if (!rdev->in_sync) {
+ if (!test_bit(In_sync, &rdev->flags)) {
/* cannot risk returning a device that failed
* before we inc'ed nr_pending
*/
@@ -522,8 +537,8 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
- if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
@@ -556,8 +571,8 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
- if (rdev && !rdev->faulty) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
@@ -648,8 +663,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
+ int do_barriers;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
return 0;
}
@@ -728,10 +744,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
#endif
rcu_read_lock();
for (i = 0; i < disks; i++) {
- if ((rdev=conf->mirrors[i].rdev) != NULL &&
- !rdev->faulty) {
+ if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
+ !test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
- if (rdev->faulty) {
+ if (test_bit(Faulty, &rdev->flags)) {
atomic_dec(&rdev->nr_pending);
r1_bio->bios[i] = NULL;
} else
@@ -759,6 +775,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
+ do_barriers = bio->bi_rw & BIO_RW_BARRIER;
+ if (do_barriers)
+ set_bit(R1BIO_Barrier, &r1_bio->state);
+
bio_list_init(&bl);
for (i = 0; i < disks; i++) {
struct bio *mbio;
@@ -771,7 +791,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE;
+ mbio->bi_rw = WRITE | do_barriers;
mbio->bi_private = r1_bio;
if (behind_pages) {
@@ -824,7 +844,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++)
seq_printf(seq, "%s",
conf->mirrors[i].rdev &&
- conf->mirrors[i].rdev->in_sync ? "U" : "_");
+ test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
seq_printf(seq, "]");
}
@@ -840,14 +860,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* next level up know.
* else mark the drive as failed
*/
- if (rdev->in_sync
+ if (test_bit(In_sync, &rdev->flags)
&& conf->working_disks == 1)
/*
* Don't fail the drive, act as though we were just a
* normal single drive
*/
return;
- if (rdev->in_sync) {
+ if (test_bit(In_sync, &rdev->flags)) {
mddev->degraded++;
conf->working_disks--;
/*
@@ -855,8 +875,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
}
- rdev->in_sync = 0;
- rdev->faulty = 1;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Faulty, &rdev->flags);
mddev->sb_dirty = 1;
printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
" Operation continuing on %d devices\n",
@@ -881,7 +901,7 @@ static void print_conf(conf_t *conf)
tmp = conf->mirrors + i;
if (tmp->rdev)
printk(" disk %d, wo:%d, o:%d, dev:%s\n",
- i, !tmp->rdev->in_sync, !tmp->rdev->faulty,
+ i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
@@ -913,11 +933,11 @@ static int raid1_spare_active(mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i;
if (tmp->rdev
- && !tmp->rdev->faulty
- && !tmp->rdev->in_sync) {
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_bit(In_sync, &tmp->rdev->flags)) {
conf->working_disks++;
mddev->degraded--;
- tmp->rdev->in_sync = 1;
+ set_bit(In_sync, &tmp->rdev->flags);
}
}
@@ -954,7 +974,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
found = 1;
if (rdev->saved_raid_disk != mirror)
conf->fullsync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
@@ -972,7 +992,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
print_conf(conf);
rdev = p->rdev;
if (rdev) {
- if (rdev->in_sync ||
+ if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
goto abort;
@@ -1153,6 +1173,36 @@ static void raid1d(mddev_t *mddev)
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
sync_request_write(mddev, r1_bio);
unplug = 1;
+ } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
+ /* some requests in the r1bio were BIO_RW_BARRIER
+ * requests which failed with -ENOTSUPP. Hohumm..
+ * Better resubmit without the barrier.
+ * We know which devices to resubmit for, because
+ * all others have had their bios[] entry cleared.
+ */
+ int i;
+ clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
+ clear_bit(R1BIO_Barrier, &r1_bio->state);
+ for (i=0; i < conf->raid_disks; i++)
+ if (r1_bio->bios[i]) {
+ struct bio_vec *bvec;
+ int j;
+
+ bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
+ /* copy pages from the failed bio, as
+ * this might be a write-behind device */
+ __bio_for_each_segment(bvec, bio, j, 0)
+ bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
+ bio_put(r1_bio->bios[i]);
+ bio->bi_sector = r1_bio->sector +
+ conf->mirrors[i].rdev->data_offset;
+ bio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ bio->bi_end_io = raid1_end_write_request;
+ bio->bi_rw = WRITE;
+ bio->bi_private = r1_bio;
+ r1_bio->bios[i] = bio;
+ generic_make_request(bio);
+ }
} else {
int disk;
bio = r1_bio->bios[r1_bio->read_disk];
@@ -1260,7 +1310,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
* This call the bitmap_start_sync doesn't actually record anything
*/
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
- !conf->fullsync) {
+ !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
return sync_blocks;
@@ -1282,11 +1332,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* make sure disk is operational */
wonly = disk;
while (conf->mirrors[disk].rdev == NULL ||
- !conf->mirrors[disk].rdev->in_sync ||
+ !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) ||
test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags)
) {
if (conf->mirrors[disk].rdev &&
- conf->mirrors[disk].rdev->in_sync)
+ test_bit(In_sync, &conf->mirrors[disk].rdev->flags))
wonly = disk;
if (disk <= 0)
disk = conf->raid_disks;
@@ -1333,11 +1383,12 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio->bi_rw = READ;
bio->bi_end_io = end_sync_read;
} else if (conf->mirrors[i].rdev == NULL ||
- conf->mirrors[i].rdev->faulty) {
+ test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
still_degraded = 1;
continue;
- } else if (!conf->mirrors[i].rdev->in_sync ||
- sector_nr + RESYNC_SECTORS > mddev->recovery_cp) {
+ } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
+ sector_nr + RESYNC_SECTORS > mddev->recovery_cp ||
+ test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
write_targets ++;
@@ -1371,8 +1422,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
break;
if (sync_blocks == 0) {
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, still_degraded) &&
- !conf->fullsync)
+ &sync_blocks, still_degraded) &&
+ !conf->fullsync &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
if (sync_blocks < (PAGE_SIZE>>9))
BUG();
@@ -1478,7 +1530,7 @@ static int run(mddev_t *mddev)
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
- if (!rdev->faulty && rdev->in_sync)
+ if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
conf->working_disks++;
}
conf->raid_disks = mddev->raid_disks;
@@ -1518,7 +1570,7 @@ static int run(mddev_t *mddev)
*/
for (j = 0; j < conf->raid_disks &&
(!conf->mirrors[j].rdev ||
- !conf->mirrors[j].rdev->in_sync) ; j++)
+ !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
/* nothing */;
conf->last_used = j;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index bbe40e9..867f06a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -496,6 +496,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
+ mdk_rdev_t *rdev;
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
@@ -510,8 +511,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
slot = 0;
disk = r10_bio->devs[slot].devnum;
- while (!conf->mirrors[disk].rdev ||
- !conf->mirrors[disk].rdev->in_sync) {
+ while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
+ !test_bit(In_sync, &rdev->flags)) {
slot++;
if (slot == conf->copies) {
slot = 0;
@@ -527,8 +528,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
/* make sure the disk is operational */
slot = 0;
disk = r10_bio->devs[slot].devnum;
- while (!conf->mirrors[disk].rdev ||
- !conf->mirrors[disk].rdev->in_sync) {
+ while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
+ !test_bit(In_sync, &rdev->flags)) {
slot ++;
if (slot == conf->copies) {
disk = -1;
@@ -547,11 +548,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
int ndisk = r10_bio->devs[nslot].devnum;
- if (!conf->mirrors[ndisk].rdev ||
- !conf->mirrors[ndisk].rdev->in_sync)
+ if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
+ !test_bit(In_sync, &rdev->flags))
continue;
- if (!atomic_read(&conf->mirrors[ndisk].rdev->nr_pending)) {
+ if (!atomic_read(&rdev->nr_pending)) {
disk = ndisk;
slot = nslot;
break;
@@ -569,7 +570,7 @@ rb_out:
r10_bio->read_slot = slot;
/* conf->next_seq_sect = this_sector + sectors;*/
- if (disk >= 0 && conf->mirrors[disk].rdev)
+ if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
rcu_read_unlock();
@@ -583,8 +584,8 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
- if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
@@ -614,8 +615,8 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
- if (rdev && !rdev->faulty) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
@@ -768,9 +769,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
rcu_read_lock();
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
- if (conf->mirrors[d].rdev &&
- !conf->mirrors[d].rdev->faulty) {
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ if (rdev &&
+ !test_bit(Faulty, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
r10_bio->devs[i].bio = bio;
} else
r10_bio->devs[i].bio = NULL;
@@ -824,7 +826,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++)
seq_printf(seq, "%s",
conf->mirrors[i].rdev &&
- conf->mirrors[i].rdev->in_sync ? "U" : "_");
+ test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
seq_printf(seq, "]");
}
@@ -839,7 +841,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* next level up know.
* else mark the drive as failed
*/
- if (rdev->in_sync
+ if (test_bit(In_sync, &rdev->flags)
&& conf->working_disks == 1)
/*
* Don't fail the drive, just return an IO error.
@@ -849,7 +851,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* really dead" tests...
*/
return;
- if (rdev->in_sync) {
+ if (test_bit(In_sync, &rdev->flags)) {
mddev->degraded++;
conf->working_disks--;
/*
@@ -857,8 +859,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
}
- rdev->in_sync = 0;
- rdev->faulty = 1;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Faulty, &rdev->flags);
mddev->sb_dirty = 1;
printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n"
" Operation continuing on %d devices\n",
@@ -883,7 +885,8 @@ static void print_conf(conf_t *conf)
tmp = conf->mirrors + i;
if (tmp->rdev)
printk(" disk %d, wo:%d, o:%d, dev:%s\n",
- i, !tmp->rdev->in_sync, !tmp->rdev->faulty,
+ i, !test_bit(In_sync, &tmp->rdev->flags),
+ !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
@@ -936,11 +939,11 @@ static int raid10_spare_active(mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i;
if (tmp->rdev
- && !tmp->rdev->faulty
- && !tmp->rdev->in_sync) {
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_bit(In_sync, &tmp->rdev->flags)) {
conf->working_disks++;
mddev->degraded--;
- tmp->rdev->in_sync = 1;
+ set_bit(In_sync, &tmp->rdev->flags);
}
}
@@ -980,7 +983,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
p->head_position = 0;
rdev->raid_disk = mirror;
found = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
@@ -998,7 +1001,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
print_conf(conf);
rdev = p->rdev;
if (rdev) {
- if (rdev->in_sync ||
+ if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
goto abort;
@@ -1414,7 +1417,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (i=0 ; i<conf->raid_disks; i++)
if (conf->mirrors[i].rdev &&
- !conf->mirrors[i].rdev->in_sync) {
+ !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
/* want to reconstruct this device */
r10bio_t *rb2 = r10_bio;
@@ -1435,7 +1438,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (j=0; j<conf->copies;j++) {
int d = r10_bio->devs[j].devnum;
if (conf->mirrors[d].rdev &&
- conf->mirrors[d].rdev->in_sync) {
+ test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
/* This is where we read from */
bio = r10_bio->devs[0].bio;
bio->bi_next = biolist;
@@ -1511,7 +1514,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio = r10_bio->devs[i].bio;
bio->bi_end_io = NULL;
if (conf->mirrors[d].rdev == NULL ||
- conf->mirrors[d].rdev->faulty)
+ test_bit(Faulty, &conf->mirrors[d].rdev->flags))
continue;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
@@ -1697,7 +1700,7 @@ static int run(mddev_t *mddev)
mddev->queue->max_sectors = (PAGE_SIZE>>9);
disk->head_position = 0;
- if (!rdev->faulty && rdev->in_sync)
+ if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
conf->working_disks++;
}
conf->raid_disks = mddev->raid_disks;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1223e98..e2a4028 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -293,9 +293,31 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
return sh;
}
-static int grow_stripes(raid5_conf_t *conf, int num)
+static int grow_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
+ sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
+ if (!sh)
+ return 0;
+ memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
+ sh->raid_conf = conf;
+ spin_lock_init(&sh->lock);
+
+ if (grow_buffers(sh, conf->raid_disks)) {
+ shrink_buffers(sh, conf->raid_disks);
+ kmem_cache_free(conf->slab_cache, sh);
+ return 0;
+ }
+ /* we just created an active stripe so... */
+ atomic_set(&sh->count, 1);
+ atomic_inc(&conf->active_stripes);
+ INIT_LIST_HEAD(&sh->lru);
+ release_stripe(sh);
+ return 1;
+}
+
+static int grow_stripes(raid5_conf_t *conf, int num)
+{
kmem_cache_t *sc;
int devs = conf->raid_disks;
@@ -308,48 +330,39 @@ static int grow_stripes(raid5_conf_t *conf, int num)
return 1;
conf->slab_cache = sc;
while (num--) {
- sh = kmem_cache_alloc(sc, GFP_KERNEL);
- if (!sh)
- return 1;
- memset(sh, 0, sizeof(*sh) + (devs-1)*sizeof(struct r5dev));
- sh->raid_conf = conf;
- spin_lock_init(&sh->lock);
-
- if (grow_buffers(sh, conf->raid_disks)) {
- shrink_buffers(sh, conf->raid_disks);
- kmem_cache_free(sc, sh);
+ if (!grow_one_stripe(conf))
return 1;
- }
- /* we just created an active stripe so... */
- atomic_set(&sh->count, 1);
- atomic_inc(&conf->active_stripes);
- INIT_LIST_HEAD(&sh->lru);
- release_stripe(sh);
}
return 0;
}
-static void shrink_stripes(raid5_conf_t *conf)
+static int drop_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
- while (1) {
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
- if (!sh)
- break;
- if (atomic_read(&sh->count))
- BUG();
- shrink_buffers(sh, conf->raid_disks);
- kmem_cache_free(conf->slab_cache, sh);
- atomic_dec(&conf->active_stripes);
- }
+ spin_lock_irq(&conf->device_lock);
+ sh = get_free_stripe(conf);
+ spin_unlock_irq(&conf->device_lock);
+ if (!sh)
+ return 0;
+ if (atomic_read(&sh->count))
+ BUG();
+ shrink_buffers(sh, conf->raid_disks);
+ kmem_cache_free(conf->slab_cache, sh);
+ atomic_dec(&conf->active_stripes);
+ return 1;
+}
+
+static void shrink_stripes(raid5_conf_t *conf)
+{
+ while (drop_one_stripe(conf))
+ ;
+
kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
-static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done,
+static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
int error)
{
struct stripe_head *sh = bi->bi_private;
@@ -401,10 +414,35 @@ static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done,
}
#else
set_bit(R5_UPTODATE, &sh->dev[i].flags);
-#endif
+#endif
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ printk("R5: read error corrected!!\n");
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ }
+ if (atomic_read(&conf->disks[i].rdev->read_errors))
+ atomic_set(&conf->disks[i].rdev->read_errors, 0);
} else {
- md_error(conf->mddev, conf->disks[i].rdev);
+ int retry = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+ atomic_inc(&conf->disks[i].rdev->read_errors);
+ if (conf->mddev->degraded)
+ printk("R5: read error not correctable.\n");
+ else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+ /* Oh, no!!! */
+ printk("R5: read error NOT corrected!!\n");
+ else if (atomic_read(&conf->disks[i].rdev->read_errors)
+ > conf->max_nr_stripes)
+ printk("raid5: Too many read errors, failing device.\n");
+ else
+ retry = 1;
+ if (retry)
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ else {
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ md_error(conf->mddev, conf->disks[i].rdev);
+ }
}
rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
#if 0
@@ -487,19 +525,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
PRINTK("raid5: error called\n");
- if (!rdev->faulty) {
+ if (!test_bit(Faulty, &rdev->flags)) {
mddev->sb_dirty = 1;
- if (rdev->in_sync) {
+ if (test_bit(In_sync, &rdev->flags)) {
conf->working_disks--;
mddev->degraded++;
conf->failed_disks++;
- rdev->in_sync = 0;
+ clear_bit(In_sync, &rdev->flags);
/*
* if recovery was running, make sure it aborts.
*/
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
}
- rdev->faulty = 1;
+ set_bit(Faulty, &rdev->flags);
printk (KERN_ALERT
"raid5: Disk failure on %s, disabling device."
" Operation continuing on %d devices\n",
@@ -965,7 +1003,13 @@ static void handle_stripe(struct stripe_head *sh)
}
if (dev->written) written++;
rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
- if (!rdev || !rdev->in_sync) {
+ if (!rdev || !test_bit(In_sync, &rdev->flags)) {
+ /* The ReadError flag wil just be confusing now */
+ clear_bit(R5_ReadError, &dev->flags);
+ clear_bit(R5_ReWrite, &dev->flags);
+ }
+ if (!rdev || !test_bit(In_sync, &rdev->flags)
+ || test_bit(R5_ReadError, &dev->flags)) {
failed++;
failed_num = i;
} else
@@ -980,6 +1024,14 @@ static void handle_stripe(struct stripe_head *sh)
if (failed > 1 && to_read+to_write+written) {
for (i=disks; i--; ) {
int bitmap_end = 0;
+
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ mdk_rdev_t *rdev = conf->disks[i].rdev;
+ if (rdev && test_bit(In_sync, &rdev->flags))
+ /* multiple read failures in one stripe */
+ md_error(conf->mddev, rdev);
+ }
+
spin_lock_irq(&conf->device_lock);
/* fail all writes first */
bi = sh->dev[i].towrite;
@@ -1015,7 +1067,8 @@ static void handle_stripe(struct stripe_head *sh)
}
/* fail any reads if this device is non-operational */
- if (!test_bit(R5_Insync, &sh->dev[i].flags)) {
+ if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+ test_bit(R5_ReadError, &sh->dev[i].flags)) {
bi = sh->dev[i].toread;
sh->dev[i].toread = NULL;
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
@@ -1247,6 +1300,11 @@ static void handle_stripe(struct stripe_head *sh)
!memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
/* parity is correct (on disc, not in buffer any more) */
set_bit(STRIPE_INSYNC, &sh->state);
+ } else {
+ conf->mddev->resync_mismatches += STRIPE_SECTORS;
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ set_bit(STRIPE_INSYNC, &sh->state);
}
}
if (!test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -1274,7 +1332,27 @@ static void handle_stripe(struct stripe_head *sh)
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
-
+
+ /* If the failed drive is just a ReadError, then we might need to progress
+ * the repair/check process
+ */
+ if (failed == 1 && ! conf->mddev->ro &&
+ test_bit(R5_ReadError, &sh->dev[failed_num].flags)
+ && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
+ && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
+ ) {
+ dev = &sh->dev[failed_num];
+ if (!test_bit(R5_ReWrite, &dev->flags)) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_ReWrite, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ } else {
+ /* let's read it back */
+ set_bit(R5_Wantread, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ }
+ }
+
spin_unlock(&sh->lock);
while ((bi=return_bi)) {
@@ -1305,8 +1383,8 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_end_io = raid5_end_read_request;
rcu_read_lock();
- rdev = conf->disks[i].rdev;
- if (rdev && rdev->faulty)
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
@@ -1379,8 +1457,8 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
- if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
@@ -1424,8 +1502,8 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
- if (rdev && !rdev->faulty) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
@@ -1567,6 +1645,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return rv;
}
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
/* we can skip this block, and probably more */
sync_blocks /= STRIPE_SECTORS;
@@ -1663,6 +1742,74 @@ static void raid5d (mddev_t *mddev)
PRINTK("--- raid5d inactive\n");
}
+static ssize_t
+raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ if (conf)
+ return sprintf(page, "%d\n", conf->max_nr_stripes);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ char *end;
+ int new;
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ new = simple_strtoul(page, &end, 10);
+ if (!*page || (*end && *end != '\n') )
+ return -EINVAL;
+ if (new <= 16 || new > 32768)
+ return -EINVAL;
+ while (new < conf->max_nr_stripes) {
+ if (drop_one_stripe(conf))
+ conf->max_nr_stripes--;
+ else
+ break;
+ }
+ while (new > conf->max_nr_stripes) {
+ if (grow_one_stripe(conf))
+ conf->max_nr_stripes++;
+ else break;
+ }
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
+ raid5_show_stripe_cache_size,
+ raid5_store_stripe_cache_size);
+
+static ssize_t
+stripe_cache_active_show(mddev_t *mddev, char *page)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ if (conf)
+ return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
+ else
+ return 0;
+}
+
+static struct md_sysfs_entry
+raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
+
+static struct attribute *raid5_attrs[] = {
+ &raid5_stripecache_size.attr,
+ &raid5_stripecache_active.attr,
+ NULL,
+};
+static struct attribute_group raid5_attrs_group = {
+ .name = NULL,
+ .attrs = raid5_attrs,
+};
+
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
@@ -1709,7 +1856,7 @@ static int run(mddev_t *mddev)
disk->rdev = rdev;
- if (rdev->in_sync) {
+ if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
printk(KERN_INFO "raid5: device %s operational as raid"
" disk %d\n", bdevname(rdev->bdev,b),
@@ -1804,6 +1951,7 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
}
/* Ok, everything is just fine now */
+ sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
if (mddev->bitmap)
mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
@@ -1828,7 +1976,7 @@ abort:
-static int stop (mddev_t *mddev)
+static int stop(mddev_t *mddev)
{
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
@@ -1837,6 +1985,7 @@ static int stop (mddev_t *mddev)
shrink_stripes(conf);
free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
kfree(conf);
mddev->private = NULL;
return 0;
@@ -1887,7 +2036,7 @@ static void status (struct seq_file *seq, mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->disks[i].rdev &&
- conf->disks[i].rdev->in_sync ? "U" : "_");
+ test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
#if RAID5_DEBUG
#define D(x) \
@@ -1914,7 +2063,7 @@ static void print_raid5_conf (raid5_conf_t *conf)
tmp = conf->disks + i;
if (tmp->rdev)
printk(" disk %d, o:%d, dev:%s\n",
- i, !tmp->rdev->faulty,
+ i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
@@ -1928,12 +2077,12 @@ static int raid5_spare_active(mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
if (tmp->rdev
- && !tmp->rdev->faulty
- && !tmp->rdev->in_sync) {
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_bit(In_sync, &tmp->rdev->flags)) {
mddev->degraded--;
conf->failed_disks--;
conf->working_disks++;
- tmp->rdev->in_sync = 1;
+ set_bit(In_sync, &tmp->rdev->flags);
}
}
print_raid5_conf(conf);
@@ -1950,7 +2099,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
print_raid5_conf(conf);
rdev = p->rdev;
if (rdev) {
- if (rdev->in_sync ||
+ if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
goto abort;
@@ -1985,12 +2134,12 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
*/
for (disk=0; disk < mddev->raid_disks; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
- rdev->in_sync = 0;
+ clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
found = 1;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
print_raid5_conf(conf);
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 7757869..eae5a35 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -507,19 +507,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
PRINTK("raid6: error called\n");
- if (!rdev->faulty) {
+ if (!test_bit(Faulty, &rdev->flags)) {
mddev->sb_dirty = 1;
- if (rdev->in_sync) {
+ if (test_bit(In_sync, &rdev->flags)) {
conf->working_disks--;
mddev->degraded++;
conf->failed_disks++;
- rdev->in_sync = 0;
+ clear_bit(In_sync, &rdev->flags);
/*
* if recovery was running, make sure it aborts.
*/
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
}
- rdev->faulty = 1;
+ set_bit(Faulty, &rdev->flags);
printk (KERN_ALERT
"raid6: Disk failure on %s, disabling device."
" Operation continuing on %d devices\n",
@@ -1071,7 +1071,7 @@ static void handle_stripe(struct stripe_head *sh)
}
if (dev->written) written++;
rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
- if (!rdev || !rdev->in_sync) {
+ if (!rdev || !test_bit(In_sync, &rdev->flags)) {
if ( failed < 2 )
failed_num[failed] = i;
failed++;
@@ -1464,8 +1464,8 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_end_io = raid6_end_read_request;
rcu_read_lock();
- rdev = conf->disks[i].rdev;
- if (rdev && rdev->faulty)
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
@@ -1538,8 +1538,8 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
- if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
@@ -1583,8 +1583,8 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
- if (rdev && !rdev->faulty) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
@@ -1868,7 +1868,7 @@ static int run(mddev_t *mddev)
disk->rdev = rdev;
- if (rdev->in_sync) {
+ if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
printk(KERN_INFO "raid6: device %s operational as raid"
" disk %d\n", bdevname(rdev->bdev,b),
@@ -2052,7 +2052,7 @@ static void status (struct seq_file *seq, mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->disks[i].rdev &&
- conf->disks[i].rdev->in_sync ? "U" : "_");
+ test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
#if RAID6_DUMPSTATE
seq_printf (seq, "\n");
@@ -2078,7 +2078,7 @@ static void print_raid6_conf (raid6_conf_t *conf)
tmp = conf->disks + i;
if (tmp->rdev)
printk(" disk %d, o:%d, dev:%s\n",
- i, !tmp->rdev->faulty,
+ i, !test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
@@ -2092,12 +2092,12 @@ static int raid6_spare_active(mddev_t *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
if (tmp->rdev
- && !tmp->rdev->faulty
- && !tmp->rdev->in_sync) {
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_bit(In_sync, &tmp->rdev->flags)) {
mddev->degraded--;
conf->failed_disks--;
conf->working_disks++;
- tmp->rdev->in_sync = 1;
+ set_bit(In_sync, &tmp->rdev->flags);
}
}
print_raid6_conf(conf);
@@ -2114,7 +2114,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number)
print_raid6_conf(conf);
rdev = p->rdev;
if (rdev) {
- if (rdev->in_sync ||
+ if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
goto abort;
@@ -2149,12 +2149,12 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
*/
for (disk=0; disk < mddev->raid_disks; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
- rdev->in_sync = 0;
+ clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
found = 1;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
print_raid6_conf(conf);
OpenPOWER on IntegriCloud