summaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c335
1 files changed, 154 insertions, 181 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8dbab2e..21e2a7b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,10 +39,10 @@
#include <linux/raid/bitmap.h>
#include <linux/sysctl.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
+#include <linux/freezer.h>
#include <linux/init.h>
@@ -389,8 +389,12 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
if (bio->bi_size)
return 1;
- if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
+ if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ printk("md: super_written gets error=%d, uptodate=%d\n",
+ error, test_bit(BIO_UPTODATE, &bio->bi_flags));
+ WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
md_error(mddev, rdev);
+ }
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
@@ -970,12 +974,13 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
* version 1 superblock
*/
-static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
+static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
{
- unsigned int disk_csum, csum;
+ __le32 disk_csum;
+ u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
- unsigned int *isuper = (unsigned int*)sb;
+ __le32 *isuper = (__le32*)sb;
int i;
disk_csum = sb->sb_csum;
@@ -985,7 +990,7 @@ static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
- newcsum += le16_to_cpu(*(unsigned short*) isuper);
+ newcsum += le16_to_cpu(*(__le16*) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
@@ -1102,7 +1107,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (le32_to_cpu(sb->chunksize))
rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
- if (le32_to_cpu(sb->size) > rdev->size*2)
+ if (le64_to_cpu(sb->size) > rdev->size*2)
return -EINVAL;
return ret;
}
@@ -1224,7 +1229,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
else
sb->resync_offset = cpu_to_le64(0);
- sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
+ sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->size<<1);
@@ -1408,7 +1413,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
struct block_device *bdev;
char b[BDEVNAME_SIZE];
- bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+ bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
@@ -1418,7 +1423,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
if (err) {
printk(KERN_ERR "md: could not bd_claim %s.\n",
bdevname(bdev, b));
- blkdev_put_partition(bdev);
+ blkdev_put(bdev);
return err;
}
rdev->bdev = bdev;
@@ -1432,7 +1437,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
if (!bdev)
MD_BUG();
bd_release(bdev);
- blkdev_put_partition(bdev);
+ blkdev_put(bdev);
}
void md_autodetect_dev(dev_t dev);
@@ -1587,7 +1592,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
}
}
-void md_update_sb(mddev_t * mddev)
+static void md_update_sb(mddev_t * mddev, int force_change)
{
int err;
struct list_head *tmp;
@@ -1598,7 +1603,18 @@ void md_update_sb(mddev_t * mddev)
repeat:
spin_lock_irq(&mddev->write_lock);
- if (mddev->degraded && mddev->sb_dirty == 3)
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+ force_change = 1;
+ if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ /* just a clean<-> dirty transition, possibly leave spares alone,
+ * though if events isn't the right even/odd, we will have to do
+ * spares after all
+ */
+ nospares = 1;
+ if (force_change)
+ nospares = 0;
+ if (mddev->degraded)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
@@ -1608,20 +1624,14 @@ repeat:
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
- mddev->sb_dirty = 1;
+ nospares = 0;
sync_req = mddev->in_sync;
mddev->utime = get_seconds();
- if (mddev->sb_dirty == 3)
- /* just a clean<-> dirty transition, possibly leave spares alone,
- * though if events isn't the right even/odd, we will have to do
- * spares after all
- */
- nospares = 1;
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
- if (mddev->sb_dirty == 3
+ if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& (mddev->events & 1))
mddev->events--;
@@ -1652,7 +1662,6 @@ repeat:
MD_BUG();
mddev->events --;
}
- mddev->sb_dirty = 2;
sync_sbs(mddev, nospares);
/*
@@ -1660,7 +1669,7 @@ repeat:
* nonpersistent superblocks
*/
if (!mddev->persistent) {
- mddev->sb_dirty = 0;
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
return;
@@ -1697,20 +1706,20 @@ repeat:
break;
}
md_super_wait(mddev);
- /* if there was a failure, sb_dirty was set to 1, and we re-write super */
+ /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
spin_lock_irq(&mddev->write_lock);
- if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
+ if (mddev->in_sync != sync_req ||
+ test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
spin_unlock_irq(&mddev->write_lock);
goto repeat;
}
- mddev->sb_dirty = 0;
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
}
-EXPORT_SYMBOL_GPL(md_update_sb);
/* words written to sysfs files may, or my not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
@@ -1783,7 +1792,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
else {
mddev_t *mddev = rdev->mddev;
kick_rdev_from_array(rdev);
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
md_new_event(mddev);
err = 0;
}
@@ -1994,6 +2003,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
kobject_init(&rdev->kobj);
rdev->desc_nr = -1;
+ rdev->saved_raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
@@ -2426,7 +2436,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
spin_lock_irq(&mddev->write_lock);
if (atomic_read(&mddev->writes_pending) == 0) {
mddev->in_sync = 1;
- mddev->sb_dirty = 1;
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
spin_unlock_irq(&mddev->write_lock);
} else {
@@ -2438,7 +2448,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- mddev->sb_dirty = 0;
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -2520,6 +2530,36 @@ static struct md_sysfs_entry md_new_device =
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
static ssize_t
+bitmap_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ char *end;
+ unsigned long chunk, end_chunk;
+
+ if (!mddev->bitmap)
+ goto out;
+ /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
+ while (*buf) {
+ chunk = end_chunk = simple_strtoul(buf, &end, 0);
+ if (buf == end) break;
+ if (*end == '-') { /* range */
+ buf = end + 1;
+ end_chunk = simple_strtoul(buf, &end, 0);
+ if (buf == end) break;
+ }
+ if (*end && !isspace(*end)) break;
+ bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
+ buf = end;
+ while (isspace(*buf)) buf++;
+ }
+ bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
+out:
+ return len;
+}
+
+static struct md_sysfs_entry md_bitmap =
+__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
+
+static ssize_t
size_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
@@ -2543,7 +2583,7 @@ size_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers) {
err = update_size(mddev, size);
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
} else {
if (mddev->size == 0 ||
mddev->size > size)
@@ -2839,6 +2879,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_sync_completed.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
+ &md_bitmap.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
@@ -3111,8 +3152,8 @@ static int do_md_run(mddev_t * mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->sb_dirty)
- md_update_sb(mddev);
+ if (mddev->flags)
+ md_update_sb(mddev, 0);
set_capacity(disk, mddev->array_size<<1);
@@ -3159,6 +3200,7 @@ static int do_md_run(mddev_t * mddev)
mddev->changed = 1;
md_new_event(mddev);
+ kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
return 0;
}
@@ -3272,13 +3314,17 @@ static int do_md_stop(mddev_t * mddev, int mode)
module_put(mddev->pers->owner);
mddev->pers = NULL;
+
+ set_capacity(disk, 0);
+ mddev->changed = 1;
+
if (mddev->ro)
mddev->ro = 0;
}
- if (!mddev->in_sync || mddev->sb_dirty) {
+ if (!mddev->in_sync || mddev->flags) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
}
if (mode == 1)
set_disk_ro(disk, 1);
@@ -3291,7 +3337,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
if (mode == 0) {
mdk_rdev_t *rdev;
struct list_head *tmp;
- struct gendisk *disk;
+
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
@@ -3316,10 +3362,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
- disk = mddev->gendisk;
- if (disk)
- set_capacity(disk, 0);
- mddev->changed = 1;
} else if (mddev->pers)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
mdname(mddev));
@@ -3329,6 +3371,7 @@ out:
return err;
}
+#ifndef MODULE
static void autorun_array(mddev_t *mddev)
{
mdk_rdev_t *rdev;
@@ -3374,6 +3417,7 @@ static void autorun_devices(int part)
printk(KERN_INFO "md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
+ int unit;
dev_t dev;
LIST_HEAD(candidates);
rdev0 = list_entry(pending_raid_disks.next,
@@ -3393,16 +3437,19 @@ static void autorun_devices(int part)
* mostly sane superblocks. It's time to allocate the
* mddev.
*/
- if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
+ if (part) {
+ dev = MKDEV(mdp_major,
+ rdev0->preferred_minor << MdpMinorShift);
+ unit = MINOR(dev) >> MdpMinorShift;
+ } else {
+ dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
+ unit = MINOR(dev);
+ }
+ if (rdev0->preferred_minor != unit) {
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
- if (part)
- dev = MKDEV(mdp_major,
- rdev0->preferred_minor << MdpMinorShift);
- else
- dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
@@ -3439,67 +3486,7 @@ static void autorun_devices(int part)
}
printk(KERN_INFO "md: ... autorun DONE.\n");
}
-
-/*
- * import RAID devices based on one partition
- * if possible, the array gets run as well.
- */
-
-static int autostart_array(dev_t startdev)
-{
- char b[BDEVNAME_SIZE];
- int err = -EINVAL, i;
- mdp_super_t *sb = NULL;
- mdk_rdev_t *start_rdev = NULL, *rdev;
-
- start_rdev = md_import_device(startdev, 0, 0);
- if (IS_ERR(start_rdev))
- return err;
-
-
- /* NOTE: this can only work for 0.90.0 superblocks */
- sb = (mdp_super_t*)page_address(start_rdev->sb_page);
- if (sb->major_version != 0 ||
- sb->minor_version != 90 ) {
- printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
- export_rdev(start_rdev);
- return err;
- }
-
- if (test_bit(Faulty, &start_rdev->flags)) {
- printk(KERN_WARNING
- "md: can not autostart based on faulty %s!\n",
- bdevname(start_rdev->bdev,b));
- export_rdev(start_rdev);
- return err;
- }
- list_add(&start_rdev->same_set, &pending_raid_disks);
-
- for (i = 0; i < MD_SB_DISKS; i++) {
- mdp_disk_t *desc = sb->disks + i;
- dev_t dev = MKDEV(desc->major, desc->minor);
-
- if (!dev)
- continue;
- if (dev == startdev)
- continue;
- if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
- continue;
- rdev = md_import_device(dev, 0, 0);
- if (IS_ERR(rdev))
- continue;
-
- list_add(&rdev->same_set, &pending_raid_disks);
- }
-
- /*
- * possibly return codes
- */
- autorun_devices(0);
- return 0;
-
-}
-
+#endif /* !MODULE */
static int get_version(void __user * arg)
{
@@ -3737,6 +3724,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (err)
export_rdev(rdev);
+ md_update_sb(mddev, 1);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return err;
@@ -3808,7 +3796,7 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
goto busy;
kick_rdev_from_array(rdev);
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
@@ -3867,6 +3855,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
}
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
+ rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
goto abort_export;
@@ -3885,7 +3874,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
rdev->raid_disk = -1;
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
@@ -4016,7 +4005,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->max_disks = MD_SB_DISKS;
- mddev->sb_dirty = 1;
+ mddev->flags = 0;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
mddev->bitmap_offset = 0;
@@ -4059,11 +4049,8 @@ static int update_size(mddev_t *mddev, unsigned long size)
return -EBUSY;
ITERATE_RDEV(mddev,rdev,tmp) {
sector_t avail;
- if (rdev->sb_offset > rdev->data_offset)
- avail = (rdev->sb_offset*2) - rdev->data_offset;
- else
- avail = get_capacity(rdev->bdev->bd_disk)
- - rdev->data_offset;
+ avail = rdev->size * 2;
+
if (fit && (size == 0 || size > avail/2))
size = avail/2;
if (avail < ((sector_t)size << 1))
@@ -4185,7 +4172,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->bitmap_offset = 0;
}
}
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
return rv;
}
@@ -4259,27 +4246,6 @@ static int md_ioctl(struct inode *inode, struct file *file,
goto abort;
}
-
- if (cmd == START_ARRAY) {
- /* START_ARRAY doesn't need to lock the array as autostart_array
- * does the locking, and it could even be a different array
- */
- static int cnt = 3;
- if (cnt > 0 ) {
- printk(KERN_WARNING
- "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
- "This will not be supported beyond July 2006\n",
- current->comm, current->pid);
- cnt--;
- }
- err = autostart_array(new_decode_dev(arg));
- if (err) {
- printk(KERN_WARNING "md: autostart failed!\n");
- goto abort;
- }
- goto done;
- }
-
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -4460,7 +4426,7 @@ static int md_open(struct inode *inode, struct file *file)
mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
int err;
- if ((err = mddev_lock(mddev)))
+ if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
goto out;
err = 0;
@@ -4476,8 +4442,7 @@ static int md_release(struct inode *inode, struct file * file)
{
mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
- if (!mddev)
- BUG();
+ BUG_ON(!mddev);
mddev_put(mddev);
return 0;
@@ -4524,6 +4489,7 @@ static int md_thread(void * arg)
* many dirty RAID5 blocks.
*/
+ current->flags |= PF_NOFREEZE;
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
@@ -4540,7 +4506,6 @@ static int md_thread(void * arg)
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
- try_to_freeze();
clear_bit(THREAD_WAKEUP, &thread->flags);
@@ -4687,9 +4652,11 @@ static void status_resync(struct seq_file *seq, mddev_t * mddev)
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
- (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
- "resync" : "recovery")),
- per_milli/10, per_milli % 10,
+ (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
+ "check" :
+ (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
+ "resync" : "recovery"))),
+ per_milli/10, per_milli % 10,
(unsigned long long) resync,
(unsigned long long) max_blocks);
@@ -4882,8 +4849,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
chunk_kb ? "KB" : "B");
if (bitmap->file) {
seq_printf(seq, ", file: ");
- seq_path(seq, bitmap->file->f_vfsmnt,
- bitmap->file->f_dentry," \t\n");
+ seq_path(seq, bitmap->file->f_path.mnt,
+ bitmap->file->f_path.dentry," \t\n");
}
seq_printf(seq, "\n");
@@ -4948,6 +4915,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
}
static struct file_operations md_seq_fops = {
+ .owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -5042,12 +5010,12 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
- mddev->sb_dirty = 3;
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
md_wakeup_thread(mddev->thread);
}
spin_unlock_irq(&mddev->write_lock);
}
- wait_event(mddev->sb_wait, mddev->sb_dirty==0);
+ wait_event(mddev->sb_wait, mddev->flags==0);
}
void md_write_end(mddev_t *mddev)
@@ -5078,6 +5046,7 @@ void md_do_sync(mddev_t *mddev)
int skipped = 0;
struct list_head *rtmp;
mdk_rdev_t *rdev;
+ char *desc;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -5085,6 +5054,18 @@ void md_do_sync(mddev_t *mddev)
if (mddev->ro) /* never try to sync a read-only array */
return;
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ desc = "data-check";
+ else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ desc = "requested-resync";
+ else
+ desc = "resync";
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ desc = "reshape";
+ else
+ desc = "recovery";
+
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
@@ -5128,10 +5109,10 @@ void md_do_sync(mddev_t *mddev)
prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
- printk(KERN_INFO "md: delaying resync of %s"
- " until %s has finished resync (they"
+ printk(KERN_INFO "md: delaying %s of %s"
+ " until %s has finished (they"
" share one or more physical units)\n",
- mdname(mddev), mdname(mddev2));
+ desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
schedule();
finish_wait(&resync_wait, &wq);
@@ -5167,12 +5148,12 @@ void md_do_sync(mddev_t *mddev)
j = rdev->recovery_offset;
}
- printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
- printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
- " %d KB/sec/disc.\n", speed_min(mddev));
+ printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
+ printk(KERN_INFO "md: minimum _guaranteed_ speed:"
+ " %d KB/sec/disk.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
- "(but not more than %d KB/sec) for reconstruction.\n",
- speed_max(mddev));
+ "(but not more than %d KB/sec) for %s.\n",
+ speed_max(mddev), desc);
is_mddev_idle(mddev); /* this also initializes IO event counters */
@@ -5198,8 +5179,8 @@ void md_do_sync(mddev_t *mddev)
if (j>2) {
printk(KERN_INFO
- "md: resuming recovery of %s from checkpoint.\n",
- mdname(mddev));
+ "md: resuming %s of %s from checkpoint.\n",
+ desc, mdname(mddev));
mddev->curr_resync = j;
}
@@ -5282,7 +5263,7 @@ void md_do_sync(mddev_t *mddev)
}
}
}
- printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
+ printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
/*
* this also signals 'finished resyncing' to md_stop
*/
@@ -5295,15 +5276,14 @@ void md_do_sync(mddev_t *mddev)
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
- test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
printk(KERN_INFO
- "md: checkpointing recovery of %s.\n",
- mdname(mddev));
+ "md: checkpointing %s of %s.\n",
+ desc, mdname(mddev));
mddev->recovery_cp = mddev->curr_resync;
}
} else
@@ -5317,9 +5297,9 @@ void md_do_sync(mddev_t *mddev)
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
- mddev->sb_dirty = 1;
}
}
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
skip:
mddev->curr_resync = 0;
@@ -5374,7 +5354,7 @@ void md_check_recovery(mddev_t *mddev)
}
if ( ! (
- mddev->sb_dirty ||
+ mddev->flags ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->safemode == 1) ||
@@ -5390,14 +5370,14 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
!mddev->in_sync && mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
- mddev->sb_dirty = 3;
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
- if (mddev->sb_dirty)
- md_update_sb(mddev);
+ if (mddev->flags)
+ md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -5416,7 +5396,7 @@ void md_check_recovery(mddev_t *mddev)
/* activate any spares */
mddev->pers->spare_active(mddev);
}
- md_update_sb(mddev);
+ md_update_sb(mddev, 1);
/* if array is no-longer degraded, then any saved_raid_disk
* information must be scrapped
@@ -5556,22 +5536,15 @@ static void md_geninit(void)
static int __init md_init(void)
{
- printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
- " MD_SB_DISKS=%d\n",
- MD_MAJOR_VERSION, MD_MINOR_VERSION,
- MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
- printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
- BITMAP_MINOR);
-
if (register_blkdev(MAJOR_NR, "md"))
return -1;
if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
unregister_blkdev(MAJOR_NR, "md");
return -1;
}
- blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
- md_probe, NULL, NULL);
- blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
+ blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
+ md_probe, NULL, NULL);
+ blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
register_reboot_notifier(&md_notifier);
@@ -5623,15 +5596,15 @@ static void autostart_arrays(int part)
autorun_devices(part);
}
-#endif
+#endif /* !MODULE */
static __exit void md_exit(void)
{
mddev_t *mddev;
struct list_head *tmp;
- blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
- blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
+ blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
+ blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MAJOR_NR,"md");
unregister_blkdev(mdp_major, "mdp");
OpenPOWER on IntegriCloud