From 35b785f7691aa82c4b0b262392439cfa6f22816d Mon Sep 17 00:00:00 2001 From: Tomasz Majchrzak Date: Fri, 21 Oct 2016 16:26:57 +0200 Subject: md: add bad block support for external metadata Add new rdev flag which external metadata handler can use to switch on/off bad block support. If new bad block is encountered, notify it via rdev 'unacknowledged_bad_blocks' sysfs file. If bad block has been cleared, notify update to rdev 'bad_blocks' sysfs file. When bad blocks support is being removed, just clear rdev flag. It is not necessary to reset badblocks->shift field. If there are bad blocks cleared or added at the same time, it is ok for those changes to be applied to the structure. The array is in blocked state and the drive which cannot handle bad blocks any more will be removed from the array before it is unlocked. Simplify state_show function by adding a separator at the end of each string and overwrite last separator with new line. Signed-off-by: Tomasz Majchrzak Reviewed-by: Artur Paszkiewicz Signed-off-by: Shaohua Li --- drivers/md/md.c | 78 ++++++++++++++++++++++++++++----------------------------- drivers/md/md.h | 3 +++ 2 files changed, 42 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 2089d46..622ccb9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2523,51 +2523,38 @@ struct rdev_sysfs_entry { static ssize_t state_show(struct md_rdev *rdev, char *page) { - char *sep = ""; + char *sep = ","; size_t len = 0; unsigned long flags = ACCESS_ONCE(rdev->flags); if (test_bit(Faulty, &flags) || - rdev->badblocks.unacked_exist) { - len+= sprintf(page+len, "%sfaulty",sep); - sep = ","; - } - if (test_bit(In_sync, &flags)) { - len += sprintf(page+len, "%sin_sync",sep); - sep = ","; - } - if (test_bit(Journal, &flags)) { - len += sprintf(page+len, "%sjournal",sep); - sep = ","; - } - if (test_bit(WriteMostly, &flags)) { - len += sprintf(page+len, "%swrite_mostly",sep); - sep = ","; - } + rdev->badblocks.unacked_exist) + len += sprintf(page+len, "faulty%s", sep); + if (test_bit(In_sync, &flags)) + len += sprintf(page+len, "in_sync%s", sep); + if (test_bit(Journal, &flags)) + len += sprintf(page+len, "journal%s", sep); + if (test_bit(WriteMostly, &flags)) + len += sprintf(page+len, "write_mostly%s", sep); if (test_bit(Blocked, &flags) || (rdev->badblocks.unacked_exist - && !test_bit(Faulty, &flags))) { - len += sprintf(page+len, "%sblocked", sep); - sep = ","; - } + && !test_bit(Faulty, &flags))) + len += sprintf(page+len, "blocked%s", sep); if (!test_bit(Faulty, &flags) && !test_bit(Journal, &flags) && - !test_bit(In_sync, &flags)) { - len += sprintf(page+len, "%sspare", sep); - sep = ","; - } - if (test_bit(WriteErrorSeen, &flags)) { - len += sprintf(page+len, "%swrite_error", sep); - sep = ","; - } - if (test_bit(WantReplacement, &flags)) { - len += sprintf(page+len, "%swant_replacement", sep); - sep = ","; - } - if (test_bit(Replacement, &flags)) { - len += sprintf(page+len, "%sreplacement", sep); - sep = ","; - } + !test_bit(In_sync, &flags)) + len += sprintf(page+len, "spare%s", sep); + if (test_bit(WriteErrorSeen, &flags)) + len += sprintf(page+len, "write_error%s", sep); + if (test_bit(WantReplacement, &flags)) + len += sprintf(page+len, "want_replacement%s", sep); + if (test_bit(Replacement, &flags)) + len += sprintf(page+len, "replacement%s", sep); + if (test_bit(ExternalBbl, &flags)) + len += sprintf(page+len, "external_bbl%s", sep); + + if (len) + len -= strlen(sep); return len+sprintf(page+len, "\n"); } @@ -2708,6 +2695,13 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } } else err = -EBUSY; + } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { + set_bit(ExternalBbl, &rdev->flags); + rdev->badblocks.shift = 0; + err = 0; + } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { + clear_bit(ExternalBbl, &rdev->flags); + err = 0; } if (!err) sysfs_notify_dirent_safe(rdev->sysfs_state); @@ -8614,6 +8608,9 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, rv = badblocks_set(&rdev->badblocks, s, sectors, 0); if (rv == 0) { /* Make sure they get written out promptly */ + if (test_bit(ExternalBbl, &rdev->flags)) + sysfs_notify(&rdev->kobj, NULL, + "unacknowledged_bad_blocks"); sysfs_notify_dirent_safe(rdev->sysfs_state); set_mask_bits(&mddev->flags, 0, BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING)); @@ -8627,12 +8624,15 @@ EXPORT_SYMBOL_GPL(rdev_set_badblocks); int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new) { + int rv; if (is_new) s += rdev->new_data_offset; else s += rdev->data_offset; - return badblocks_clear(&rdev->badblocks, - s, sectors); + rv = badblocks_clear(&rdev->badblocks, s, sectors); + if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) + sysfs_notify(&rdev->kobj, NULL, "bad_blocks"); + return rv; } EXPORT_SYMBOL_GPL(rdev_clear_badblocks); diff --git a/drivers/md/md.h b/drivers/md/md.h index 2b20417..21bd94f 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -168,6 +168,9 @@ enum flag_bits { * so it is safe to remove without * another synchronize_rcu() call. */ + ExternalBbl, /* External metadata provides bad + * block management for a disk + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, -- cgit v1.1 From dcbcb48650ecd8de747b2e21e0ee9484b462cb74 Mon Sep 17 00:00:00 2001 From: Tomasz Majchrzak Date: Fri, 21 Oct 2016 16:27:08 +0200 Subject: md: don't fail an array if there are unacknowledged bad blocks If external metadata handler supports bad blocks and unacknowledged bad blocks are present, don't report disk via sysfs as faulty. Such situation can be still handled so disk just has to be blocked for a moment. It makes it consistent with kernel state as corresponding rdev flag is also not set. When the disk in being unblocked there are few cases: 1. Disk has been in blocked and faulty state, it is being unblocked but it still remains in faulty state. Metadata handler will remove it from array in the next call. 2. There is no bad block support in external metadata handler and bad blocks are present - put the disk in blocked and faulty state (see case 1). 3. There is bad block support in external metadata handler and all bad blocks are acknowledged - clear all flags, continue. 4. There is bad block support in external metadata handler but there are still unacknowledged bad blocks - clear all flags, continue. It is fine to clear Blocked flag because it was probably not set anyway (if it was it is case 1). BlockedBadBlocks flag can also be cleared because the request waiting for it will set it again when it finds out that some bad block is still not acknowledged. Recovery is not necessary but there are no problems if the flag is set. Sysfs rdev state is still reported as blocked (due to unacknowledged bad blocks) so metadata handler will process remaining bad blocks and unblock disk again. Signed-off-by: Tomasz Majchrzak Signed-off-by: Shaohua Li --- drivers/md/md.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 622ccb9..7f56d67 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2528,7 +2528,8 @@ state_show(struct md_rdev *rdev, char *page) unsigned long flags = ACCESS_ONCE(rdev->flags); if (test_bit(Faulty, &flags) || - rdev->badblocks.unacked_exist) + (!test_bit(ExternalBbl, &flags) && + rdev->badblocks.unacked_exist)) len += sprintf(page+len, "faulty%s", sep); if (test_bit(In_sync, &flags)) len += sprintf(page+len, "in_sync%s", sep); @@ -2613,6 +2614,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = 0; } else if (cmd_match(buf, "-blocked")) { if (!test_bit(Faulty, &rdev->flags) && + !test_bit(ExternalBbl, &rdev->flags) && rdev->badblocks.unacked_exist) { /* metadata handler doesn't understand badblocks, * so we need to fail the device -- cgit v1.1 From 91a6c4aded58cb49f99320480bad62493c288761 Mon Sep 17 00:00:00 2001 From: Tomasz Majchrzak Date: Tue, 25 Oct 2016 17:07:08 +0200 Subject: md: wake up personality thread after array state update When raid1/raid10 array fails to write to one of the drives, the request is added to bio_end_io_list and finished by personality thread. The thread doesn't handle it as long as MD_CHANGE_PENDING flag is set. In case of external metadata this flag is cleared, however the thread is not woken up. It causes request to be blocked for few seconds (until another action on the array wakes up the thread) or to get stuck indefinitely. Wake up personality thread once MD_CHANGE_PENDING has been cleared. Moving 'restart_array' call after the flag is cleared it not a solution because in read-write mode the call doesn't wake up the thread. Signed-off-by: Tomasz Majchrzak Signed-off-by: Shaohua Li --- drivers/md/md.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 7f56d67..94f882a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3922,6 +3922,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) if (st == active) { restart_array(mddev); clear_bit(MD_CHANGE_PENDING, &mddev->flags); + md_wakeup_thread(mddev->thread); wake_up(&mddev->sb_wait); err = 0; } else /* st == clean */ { -- cgit v1.1 From 7adb072ca83d71103ecc8578bee5f73c4f1eba89 Mon Sep 17 00:00:00 2001 From: Tomasz Majchrzak Date: Wed, 26 Oct 2016 09:20:39 +0200 Subject: raid5: revert commit 11367799f3d1 Revert commit 11367799f3d1 ("md: Prevent IO hold during accessing to faulty raid5 array") as it doesn't comply with commit c3cce6cda162 ("md/raid5: ensure device failure recorded before write request returns."). That change is not required anymore as the problem is resolved by commit 16f889499a52 ("md: report 'write_pending' state when array in sync") - read request is stuck as array state is not reported correctly via sysfs attribute. Signed-off-by: Tomasz Majchrzak Signed-off-by: Shaohua Li --- drivers/md/raid5.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 92ac251..b6e4670 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4645,9 +4645,7 @@ finish: } if (!bio_list_empty(&s.return_bi)) { - if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) && - (s.failed <= conf->max_degraded || - conf->mddev->external == 0)) { + if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->return_bi, &s.return_bi); spin_unlock_irq(&conf->device_lock); -- cgit v1.1 From cbb387323610295be9d36c51287b668c1140704f Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Mon, 31 Oct 2016 10:19:00 +0800 Subject: md/bitmap: call bitmap_file_unmap once bitmap_storage_alloc returns -ENOMEM It is possible that bitmap_storage_alloc could return -ENOMEM, and some member inside store could be allocated such as filemap. To avoid memory leak, we need to call bitmap_file_unmap to free those members in the bitmap_resize. Reviewed-by: NeilBrown Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 2d82692..cd3a065 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -2029,8 +2029,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, !bitmap->mddev->bitmap_info.external, mddev_is_clustered(bitmap->mddev) ? bitmap->cluster_slot : 0); - if (ret) + if (ret) { + bitmap_file_unmap(&store); goto err; + } pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); -- cgit v1.1 From 7f0f0d87fa172c71f74ea916d70765862ee2d53a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:49 +1100 Subject: md: fix some issues with alloc_disk_sb() 1/ don't print a warning if allocation fails. page_alloc() does that already. 2/ always check return status for error. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 94f882a..88c1821 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -684,11 +684,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) static int alloc_disk_sb(struct md_rdev *rdev) { rdev->sb_page = alloc_page(GFP_KERNEL); - if (!rdev->sb_page) { - printk(KERN_ALERT "md: out of memory.\n"); + if (!rdev->sb_page) return -ENOMEM; - } - return 0; } @@ -8788,15 +8785,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) * variable in case we err in the future */ rdev->sb_page = NULL; - alloc_disk_sb(rdev); - ClearPageUptodate(rdev->sb_page); - rdev->sb_loaded = 0; - err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version); - + err = alloc_disk_sb(rdev); + if (err == 0) { + ClearPageUptodate(rdev->sb_page); + rdev->sb_loaded = 0; + err = super_types[mddev->major_version]. + load_super(rdev, NULL, mddev->minor_version); + } if (err < 0) { pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", __func__, __LINE__, rdev->desc_nr, err); - put_page(rdev->sb_page); + if (rdev->sb_page) + put_page(rdev->sb_page); rdev->sb_page = swapout; rdev->sb_loaded = 1; return err; -- cgit v1.1 From 9d48739ef19aa8ad6026fd312b3ed81b560c8579 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:49 +1100 Subject: md: change all printk() to pr_err() or pr_warn() etc. 1/ using pr_debug() for a number of messages reduces the noise of md, but still allows them to be enabled when needed. 2/ try to be consistent in the usage of pr_err() and pr_warn(), and document the intention 3/ When strings have been split onto multiple lines, rejoin into a single string. The cost of having lines > 80 chars is less than the cost of not being able to easily search for a particular message. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 389 +++++++++++++++++++++++++------------------------------- 1 file changed, 170 insertions(+), 219 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 88c1821..22c9efd 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -30,6 +30,18 @@ You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + Errors, Warnings, etc. + Please use: + pr_crit() for error conditions that risk data loss + pr_err() for error conditions that are unexpected, like an IO error + or internal inconsistency + pr_warn() for error conditions that could have been predicated, like + adding a device to an array when it has incompatible metadata + pr_info() for every interesting, very rare events, like an array starting + or stopping, or resync starting or stopping + pr_debug() for everything else. + */ #include @@ -712,7 +724,7 @@ static void super_written(struct bio *bio) struct mddev *mddev = rdev->mddev; if (bio->bi_error) { - printk("md: super_written gets error=%d\n", bio->bi_error); + pr_err("md: super_written gets error=%d\n", bio->bi_error); md_error(mddev, rdev); } @@ -792,8 +804,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size) return 0; fail: - printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", - bdevname(rdev->bdev,b)); + pr_err("md: disabled device %s, could not read superblock.\n", + bdevname(rdev->bdev,b)); return -EINVAL; } @@ -815,7 +827,6 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) if (!tmp1 || !tmp2) { ret = 0; - printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); goto abort; } @@ -929,7 +940,7 @@ int md_check_no_bitmap(struct mddev *mddev) { if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) return 0; - printk(KERN_ERR "%s: bitmaps are not supported for %s\n", + pr_warn("%s: bitmaps are not supported for %s\n", mdname(mddev), mddev->pers->name); return 1; } @@ -953,7 +964,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor rdev->sb_start = calc_dev_sboffset(rdev); ret = read_disk_sb(rdev, MD_SB_BYTES); - if (ret) return ret; + if (ret) + return ret; ret = -EINVAL; @@ -961,17 +973,15 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor sb = page_address(rdev->sb_page); if (sb->md_magic != MD_SB_MAGIC) { - printk(KERN_ERR "md: invalid raid superblock magic on %s\n", - b); + pr_warn("md: invalid raid superblock magic on %s\n", b); goto abort; } if (sb->major_version != 0 || sb->minor_version < 90 || sb->minor_version > 91) { - printk(KERN_WARNING "Bad version number %d.%d on %s\n", - sb->major_version, sb->minor_version, - b); + pr_warn("Bad version number %d.%d on %s\n", + sb->major_version, sb->minor_version, b); goto abort; } @@ -979,8 +989,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor goto abort; if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { - printk(KERN_WARNING "md: invalid superblock checksum on %s\n", - b); + pr_warn("md: invalid superblock checksum on %s\n", b); goto abort; } @@ -1001,14 +1010,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor __u64 ev1, ev2; mdp_super_t *refsb = page_address(refdev->sb_page); if (!uuid_equal(refsb, sb)) { - printk(KERN_WARNING "md: %s has different UUID to %s\n", + pr_warn("md: %s has different UUID to %s\n", b, bdevname(refdev->bdev,b2)); goto abort; } if (!sb_equal(refsb, sb)) { - printk(KERN_WARNING "md: %s has same UUID" - " but different superblock to %s\n", - b, bdevname(refdev->bdev, b2)); + pr_warn("md: %s has same UUID but different superblock to %s\n", + b, bdevname(refdev->bdev, b2)); goto abort; } ev1 = md_event(sb); @@ -1410,13 +1418,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ return -EINVAL; if (calc_sb_1_csum(sb) != sb->sb_csum) { - printk("md: invalid superblock checksum on %s\n", + pr_warn("md: invalid superblock checksum on %s\n", bdevname(rdev->bdev,b)); return -EINVAL; } if (le64_to_cpu(sb->data_size) < 10) { - printk("md: data_size too small on %s\n", - bdevname(rdev->bdev,b)); + pr_warn("md: data_size too small on %s\n", + bdevname(rdev->bdev,b)); return -EINVAL; } if (sb->pad0 || @@ -1500,8 +1508,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ sb->level != refsb->level || sb->layout != refsb->layout || sb->chunksize != refsb->chunksize) { - printk(KERN_WARNING "md: %s has strangely different" - " superblock to %s\n", + pr_warn("md: %s has strangely different superblock to %s\n", bdevname(rdev->bdev,b), bdevname(refdev->bdev,b2)); return -EINVAL; @@ -1643,8 +1650,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) case MD_DISK_ROLE_JOURNAL: /* journal device */ if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { /* journal device without journal feature */ - printk(KERN_WARNING - "md: journal device provided without journal feature, ignoring the device\n"); + pr_warn("md: journal device provided without journal feature, ignoring the device\n"); return -EINVAL; } set_bit(Journal, &rdev->flags); @@ -2001,9 +2007,9 @@ int md_integrity_register(struct mddev *mddev) blk_integrity_register(mddev->gendisk, bdev_get_integrity(reference->bdev)); - printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); + pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { - printk(KERN_ERR "md: failed to create integrity pool for %s\n", + pr_err("md: failed to create integrity pool for %s\n", mdname(mddev)); return -EINVAL; } @@ -2031,8 +2037,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) return 0; if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { - printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n", - mdname(mddev), bdevname(rdev->bdev, name)); + pr_err("%s: incompatible integrity profile for %s\n", + mdname(mddev), bdevname(rdev->bdev, name)); return -ENXIO; } @@ -2086,15 +2092,15 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) rcu_read_unlock(); if (!test_bit(Journal, &rdev->flags) && mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { - printk(KERN_WARNING "md: %s: array is limited to %d devices\n", - mdname(mddev), mddev->max_disks); + pr_warn("md: %s: array is limited to %d devices\n", + mdname(mddev), mddev->max_disks); return -EBUSY; } bdevname(rdev->bdev,b); strreplace(b, '/', '!'); rdev->mddev = mddev; - printk(KERN_INFO "md: bind<%s>\n", b); + pr_debug("md: bind<%s>\n", b); if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) goto fail; @@ -2113,8 +2119,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) return 0; fail: - printk(KERN_WARNING "md: failed to register dev-%s for %s\n", - b, mdname(mddev)); + pr_warn("md: failed to register dev-%s for %s\n", + b, mdname(mddev)); return err; } @@ -2131,7 +2137,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev) bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); - printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); + pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); rdev->mddev = NULL; sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); @@ -2161,8 +2167,7 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, shared ? (struct md_rdev *)lock_rdev : rdev); if (IS_ERR(bdev)) { - printk(KERN_ERR "md: could not open %s.\n", - __bdevname(dev, b)); + pr_warn("md: could not open %s.\n", __bdevname(dev, b)); return PTR_ERR(bdev); } rdev->bdev = bdev; @@ -2182,8 +2187,7 @@ static void export_rdev(struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; - printk(KERN_INFO "md: export_rdev(%s)\n", - bdevname(rdev->bdev,b)); + pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b)); md_rdev_clear(rdev); #ifndef MODULE if (test_bit(AutoDetected, &rdev->flags)) @@ -3204,10 +3208,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe sector_t size; rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); - if (!rdev) { - printk(KERN_ERR "md: could not alloc mem for new device!\n"); + if (!rdev) return ERR_PTR(-ENOMEM); - } err = md_rdev_init(rdev); if (err) @@ -3224,8 +3226,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; if (!size) { - printk(KERN_WARNING - "md: %s has zero or unknown size, marking faulty!\n", + pr_warn("md: %s has zero or unknown size, marking faulty!\n", bdevname(rdev->bdev,b)); err = -EINVAL; goto abort_free; @@ -3235,16 +3236,13 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe err = super_types[super_format]. load_super(rdev, NULL, super_minor); if (err == -EINVAL) { - printk(KERN_WARNING - "md: %s does not have a valid v%d.%d " - "superblock, not importing!\n", + pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n", bdevname(rdev->bdev,b), - super_format, super_minor); + super_format, super_minor); goto abort_free; } if (err < 0) { - printk(KERN_WARNING - "md: could not read %s's sb, not importing!\n", + pr_warn("md: could not read %s's sb, not importing!\n", bdevname(rdev->bdev,b)); goto abort_free; } @@ -3280,9 +3278,7 @@ static void analyze_sbs(struct mddev *mddev) case 0: break; default: - printk( KERN_ERR \ - "md: fatal superblock inconsistency in %s" - " -- removing from array\n", + pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n", bdevname(rdev->bdev,b)); md_kick_rdev_from_array(rdev); } @@ -3295,18 +3291,16 @@ static void analyze_sbs(struct mddev *mddev) if (mddev->max_disks && (rdev->desc_nr >= mddev->max_disks || i > mddev->max_disks)) { - printk(KERN_WARNING - "md: %s: %s: only %d devices permitted\n", - mdname(mddev), bdevname(rdev->bdev, b), - mddev->max_disks); + pr_warn("md: %s: %s: only %d devices permitted\n", + mdname(mddev), bdevname(rdev->bdev, b), + mddev->max_disks); md_kick_rdev_from_array(rdev); continue; } if (rdev != freshest) { if (super_types[mddev->major_version]. validate_super(mddev, rdev)) { - printk(KERN_WARNING "md: kicking non-fresh %s" - " from array!\n", + pr_warn("md: kicking non-fresh %s from array!\n", bdevname(rdev->bdev,b)); md_kick_rdev_from_array(rdev); continue; @@ -3377,7 +3371,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) unsigned long msec; if (mddev_is_clustered(mddev)) { - pr_info("md: Safemode is disabled for clustered mode\n"); + pr_warn("md: Safemode is disabled for clustered mode\n"); return -EINVAL; } @@ -3465,8 +3459,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) rv = -EINVAL; if (!mddev->pers->quiesce) { - printk(KERN_WARNING "md: %s: %s does not support online personality change\n", - mdname(mddev), mddev->pers->name); + pr_warn("md: %s: %s does not support online personality change\n", + mdname(mddev), mddev->pers->name); goto out_unlock; } @@ -3484,7 +3478,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) pers = find_pers(level, clevel); if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); - printk(KERN_WARNING "md: personality %s not loaded\n", clevel); + pr_warn("md: personality %s not loaded\n", clevel); rv = -EINVAL; goto out_unlock; } @@ -3498,8 +3492,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) } if (!pers->takeover) { module_put(pers->owner); - printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", - mdname(mddev), clevel); + pr_warn("md: %s: %s does not support personality takeover\n", + mdname(mddev), clevel); rv = -EINVAL; goto out_unlock; } @@ -3519,8 +3513,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) mddev->delta_disks = 0; mddev->reshape_backwards = 0; module_put(pers->owner); - printk(KERN_WARNING "md: %s: %s would not accept array\n", - mdname(mddev), clevel); + pr_warn("md: %s: %s would not accept array\n", + mdname(mddev), clevel); rv = PTR_ERR(priv); goto out_unlock; } @@ -3563,9 +3557,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) pers->sync_request != NULL) { /* need to add the md_redundancy_group */ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) - printk(KERN_WARNING - "md: cannot register extra attributes for %s\n", - mdname(mddev)); + pr_warn("md: cannot register extra attributes for %s\n", + mdname(mddev)); mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); } if (oldpers->sync_request != NULL && @@ -3596,9 +3589,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len) clear_bit(In_sync, &rdev->flags); else { if (sysfs_link_rdev(mddev, rdev)) - printk(KERN_WARNING "md: cannot register rd%d" - " for %s after level change\n", - rdev->raid_disk, mdname(mddev)); + pr_warn("md: cannot register rd%d for %s after level change\n", + rdev->raid_disk, mdname(mddev)); } } @@ -5065,13 +5057,13 @@ static int md_alloc(dev_t dev, char *name) /* This isn't possible, but as kobject_init_and_add is marked * __must_check, we must do something with the result */ - printk(KERN_WARNING "md: cannot register %s/md - name in use\n", - disk->disk_name); + pr_debug("md: cannot register %s/md - name in use\n", + disk->disk_name); error = 0; } if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_bitmap_group)) - printk(KERN_DEBUG "pointless warning\n"); + pr_debug("pointless warning\n"); mutex_unlock(&mddev->open_mutex); abort: mutex_unlock(&disks_mutex); @@ -5173,15 +5165,15 @@ int md_run(struct mddev *mddev) if (mddev->dev_sectors && rdev->data_offset + mddev->dev_sectors > rdev->sb_start) { - printk("md: %s: data overlaps metadata\n", - mdname(mddev)); + pr_warn("md: %s: data overlaps metadata\n", + mdname(mddev)); return -EINVAL; } } else { if (rdev->sb_start + rdev->sb_size/512 > rdev->data_offset) { - printk("md: %s: metadata overlaps data\n", - mdname(mddev)); + pr_warn("md: %s: metadata overlaps data\n", + mdname(mddev)); return -EINVAL; } } @@ -5196,11 +5188,11 @@ int md_run(struct mddev *mddev) if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); if (mddev->level != LEVEL_NONE) - printk(KERN_WARNING "md: personality for level %d is not loaded!\n", - mddev->level); + pr_warn("md: personality for level %d is not loaded!\n", + mddev->level); else - printk(KERN_WARNING "md: personality for level %s is not loaded!\n", - mddev->clevel); + pr_warn("md: personality for level %s is not loaded!\n", + mddev->clevel); return -EINVAL; } spin_unlock(&pers_lock); @@ -5230,21 +5222,16 @@ int md_run(struct mddev *mddev) if (rdev < rdev2 && rdev->bdev->bd_contains == rdev2->bdev->bd_contains) { - printk(KERN_WARNING - "%s: WARNING: %s appears to be" - " on the same physical disk as" - " %s.\n", - mdname(mddev), - bdevname(rdev->bdev,b), - bdevname(rdev2->bdev,b2)); + pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", + mdname(mddev), + bdevname(rdev->bdev,b), + bdevname(rdev2->bdev,b2)); warned = 1; } } if (warned) - printk(KERN_WARNING - "True protection against single-disk" - " failure might be compromised.\n"); + pr_warn("True protection against single-disk failure might be compromised.\n"); } mddev->recovery = 0; @@ -5258,14 +5245,14 @@ int md_run(struct mddev *mddev) err = pers->run(mddev); if (err) - printk(KERN_ERR "md: pers->run() failed ...\n"); + pr_warn("md: pers->run() failed ...\n"); else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { - WARN_ONCE(!mddev->external_size, "%s: default size too small," - " but 'external_size' not in effect?\n", __func__); - printk(KERN_ERR - "md: invalid array_size %llu > default size %llu\n", - (unsigned long long)mddev->array_sectors / 2, - (unsigned long long)pers->size(mddev, 0, 0) / 2); + WARN_ONCE(!mddev->external_size, + "%s: default size too small, but 'external_size' not in effect?\n", + __func__); + pr_warn("md: invalid array_size %llu > default size %llu\n", + (unsigned long long)mddev->array_sectors / 2, + (unsigned long long)pers->size(mddev, 0, 0) / 2); err = -EINVAL; } if (err == 0 && pers->sync_request && @@ -5275,8 +5262,8 @@ int md_run(struct mddev *mddev) bitmap = bitmap_create(mddev, -1); if (IS_ERR(bitmap)) { err = PTR_ERR(bitmap); - printk(KERN_ERR "%s: failed to create bitmap (%d)\n", - mdname(mddev), err); + pr_warn("%s: failed to create bitmap (%d)\n", + mdname(mddev), err); } else mddev->bitmap = bitmap; @@ -5312,9 +5299,8 @@ int md_run(struct mddev *mddev) if (pers->sync_request) { if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_redundancy_group)) - printk(KERN_WARNING - "md: cannot register extra attributes for %s\n", - mdname(mddev)); + pr_warn("md: cannot register extra attributes for %s\n", + mdname(mddev)); mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); } else if (mddev->ro == 2) /* auto-readonly not meaningful */ mddev->ro = 0; @@ -5415,8 +5401,7 @@ static int restart_array(struct mddev *mddev) mddev->safemode = 0; mddev->ro = 0; set_disk_ro(disk, 0); - printk(KERN_INFO "md: %s switched to read-write mode.\n", - mdname(mddev)); + pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); /* Kick recovery or resync if necessary */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -5510,8 +5495,8 @@ static void mddev_detach(struct mddev *mddev) struct bitmap *bitmap = mddev->bitmap; /* wait for behind writes to complete */ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { - printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n", - mdname(mddev)); + pr_debug("md:%s: behind writes in progress - waiting to stop.\n", + mdname(mddev)); /* need to kick something here to make sure I/O goes? */ wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); @@ -5585,7 +5570,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || mddev->sync_thread || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { - printk("md: %s still in use.\n",mdname(mddev)); + pr_warn("md: %s still in use.\n",mdname(mddev)); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -5647,7 +5632,7 @@ static int do_md_stop(struct mddev *mddev, int mode, mddev->sysfs_active || mddev->sync_thread || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { - printk("md: %s still in use.\n",mdname(mddev)); + pr_warn("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); @@ -5684,7 +5669,7 @@ static int do_md_stop(struct mddev *mddev, int mode, * Free resources if final stop */ if (mode == 0) { - printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); + pr_info("md: %s stopped.\n", mdname(mddev)); bitmap_destroy(mddev); if (mddev->bitmap_info.file) { @@ -5716,17 +5701,17 @@ static void autorun_array(struct mddev *mddev) if (list_empty(&mddev->disks)) return; - printk(KERN_INFO "md: running: "); + pr_info("md: running: "); rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; - printk("<%s>", bdevname(rdev->bdev,b)); + pr_cont("<%s>", bdevname(rdev->bdev,b)); } - printk("\n"); + pr_cont("\n"); err = do_md_run(mddev); if (err) { - printk(KERN_WARNING "md: do_md_run() returned %d\n", err); + pr_warn("md: do_md_run() returned %d\n", err); do_md_stop(mddev, 0, NULL); } } @@ -5749,7 +5734,7 @@ static void autorun_devices(int part) struct mddev *mddev; char b[BDEVNAME_SIZE]; - printk(KERN_INFO "md: autorun ...\n"); + pr_info("md: autorun ...\n"); while (!list_empty(&pending_raid_disks)) { int unit; dev_t dev; @@ -5757,13 +5742,12 @@ static void autorun_devices(int part) rdev0 = list_entry(pending_raid_disks.next, struct md_rdev, same_set); - printk(KERN_INFO "md: considering %s ...\n", - bdevname(rdev0->bdev,b)); + pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b)); INIT_LIST_HEAD(&candidates); rdev_for_each_list(rdev, tmp, &pending_raid_disks) if (super_90_load(rdev, rdev0, 0) >= 0) { - printk(KERN_INFO "md: adding %s ...\n", - bdevname(rdev->bdev,b)); + pr_debug("md: adding %s ...\n", + bdevname(rdev->bdev,b)); list_move(&rdev->same_set, &candidates); } /* @@ -5780,8 +5764,8 @@ static void autorun_devices(int part) unit = MINOR(dev); } if (rdev0->preferred_minor != unit) { - printk(KERN_INFO "md: unit number in %s is bad: %d\n", - bdevname(rdev0->bdev, b), rdev0->preferred_minor); + pr_warn("md: unit number in %s is bad: %d\n", + bdevname(rdev0->bdev, b), rdev0->preferred_minor); break; } @@ -5790,21 +5774,17 @@ static void autorun_devices(int part) if (!mddev || !mddev->gendisk) { if (mddev) mddev_put(mddev); - printk(KERN_ERR - "md: cannot allocate memory for md drive.\n"); break; } if (mddev_lock(mddev)) - printk(KERN_WARNING "md: %s locked, cannot run\n", - mdname(mddev)); + pr_warn("md: %s locked, cannot run\n", mdname(mddev)); else if (mddev->raid_disks || mddev->major_version || !list_empty(&mddev->disks)) { - printk(KERN_WARNING - "md: %s already running, cannot run %s\n", + pr_warn("md: %s already running, cannot run %s\n", mdname(mddev), bdevname(rdev0->bdev,b)); mddev_unlock(mddev); } else { - printk(KERN_INFO "md: created %s\n", mdname(mddev)); + pr_debug("md: created %s\n", mdname(mddev)); mddev->persistent = 1; rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); @@ -5823,7 +5803,7 @@ static void autorun_devices(int part) } mddev_put(mddev); } - printk(KERN_INFO "md: ... autorun DONE.\n"); + pr_info("md: ... autorun DONE.\n"); } #endif /* !MODULE */ @@ -5979,8 +5959,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) if (mddev_is_clustered(mddev) && !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { - pr_err("%s: Cannot add to clustered mddev.\n", - mdname(mddev)); + pr_warn("%s: Cannot add to clustered mddev.\n", + mdname(mddev)); return -EINVAL; } @@ -5992,8 +5972,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) /* expecting a device which has a superblock */ rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); if (IS_ERR(rdev)) { - printk(KERN_WARNING - "md: md_import_device returned %ld\n", + pr_warn("md: md_import_device returned %ld\n", PTR_ERR(rdev)); return PTR_ERR(rdev); } @@ -6004,8 +5983,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) err = super_types[mddev->major_version] .load_super(rdev, rdev0, mddev->minor_version); if (err < 0) { - printk(KERN_WARNING - "md: %s has different UUID to %s\n", + pr_warn("md: %s has different UUID to %s\n", bdevname(rdev->bdev,b), bdevname(rdev0->bdev,b2)); export_rdev(rdev); @@ -6026,9 +6004,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) if (mddev->pers) { int err; if (!mddev->pers->hot_add_disk) { - printk(KERN_WARNING - "%s: personality does not support diskops!\n", - mdname(mddev)); + pr_warn("%s: personality does not support diskops!\n", + mdname(mddev)); return -EINVAL; } if (mddev->persistent) @@ -6037,8 +6014,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) else rdev = md_import_device(dev, -1, -1); if (IS_ERR(rdev)) { - printk(KERN_WARNING - "md: md_import_device returned %ld\n", + pr_warn("md: md_import_device returned %ld\n", PTR_ERR(rdev)); return PTR_ERR(rdev); } @@ -6134,8 +6110,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) * for major_version==0 superblocks */ if (mddev->major_version != 0) { - printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", - mdname(mddev)); + pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); return -EINVAL; } @@ -6143,8 +6118,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) int err; rdev = md_import_device(dev, -1, 0); if (IS_ERR(rdev)) { - printk(KERN_WARNING - "md: error, md_import_device() returned %ld\n", + pr_warn("md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); return PTR_ERR(rdev); } @@ -6162,7 +6136,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) set_bit(WriteMostly, &rdev->flags); if (!mddev->persistent) { - printk(KERN_INFO "md: nonpersistent superblock ...\n"); + pr_debug("md: nonpersistent superblock ...\n"); rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; } else rdev->sb_start = calc_dev_sboffset(rdev); @@ -6206,8 +6180,8 @@ kick_rdev: return 0; busy: - printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", - bdevname(rdev->bdev,b), mdname(mddev)); + pr_debug("md: cannot remove active disk %s from %s ...\n", + bdevname(rdev->bdev,b), mdname(mddev)); return -EBUSY; } @@ -6221,22 +6195,19 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) return -ENODEV; if (mddev->major_version != 0) { - printk(KERN_WARNING "%s: HOT_ADD may only be used with" - " version-0 superblocks.\n", + pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", mdname(mddev)); return -EINVAL; } if (!mddev->pers->hot_add_disk) { - printk(KERN_WARNING - "%s: personality does not support diskops!\n", + pr_warn("%s: personality does not support diskops!\n", mdname(mddev)); return -EINVAL; } rdev = md_import_device(dev, -1, 0); if (IS_ERR(rdev)) { - printk(KERN_WARNING - "md: error, md_import_device() returned %ld\n", + pr_warn("md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); return -EINVAL; } @@ -6249,8 +6220,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) rdev->sectors = rdev->sb_start; if (test_bit(Faulty, &rdev->flags)) { - printk(KERN_WARNING - "md: can not hot-add faulty %s disk to %s!\n", + pr_warn("md: can not hot-add faulty %s disk to %s!\n", bdevname(rdev->bdev,b), mdname(mddev)); err = -EINVAL; goto abort_export; @@ -6306,23 +6276,23 @@ static int set_bitmap_file(struct mddev *mddev, int fd) f = fget(fd); if (f == NULL) { - printk(KERN_ERR "%s: error: failed to get bitmap file\n", - mdname(mddev)); + pr_warn("%s: error: failed to get bitmap file\n", + mdname(mddev)); return -EBADF; } inode = f->f_mapping->host; if (!S_ISREG(inode->i_mode)) { - printk(KERN_ERR "%s: error: bitmap file must be a regular file\n", - mdname(mddev)); + pr_warn("%s: error: bitmap file must be a regular file\n", + mdname(mddev)); err = -EBADF; } else if (!(f->f_mode & FMODE_WRITE)) { - printk(KERN_ERR "%s: error: bitmap file must open for write\n", - mdname(mddev)); + pr_warn("%s: error: bitmap file must open for write\n", + mdname(mddev)); err = -EBADF; } else if (atomic_read(&inode->i_writecount) != 1) { - printk(KERN_ERR "%s: error: bitmap file is already in use\n", - mdname(mddev)); + pr_warn("%s: error: bitmap file is already in use\n", + mdname(mddev)); err = -EBUSY; } if (err) { @@ -6387,8 +6357,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) info->major_version >= ARRAY_SIZE(super_types) || super_types[info->major_version].name == NULL) { /* maybe try to auto-load a module? */ - printk(KERN_INFO - "md: superblock version %d not known\n", + pr_warn("md: superblock version %d not known\n", info->major_version); return -EINVAL; } @@ -6654,8 +6623,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) if (mddev->bitmap_info.nodes) { /* hold PW on all the bitmap lock */ if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { - printk("md: can't change bitmap to none since the" - " array is in use by more than one node\n"); + pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); rv = -EPERM; md_cluster_ops->unlock_all_bitmaps(mddev); goto err; @@ -6841,9 +6809,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, } err = mddev_lock(mddev); if (err) { - printk(KERN_INFO - "md: ioctl lock interrupted, reason %d, cmd %d\n", - err, cmd); + pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", + err, cmd); goto out; } @@ -6858,30 +6825,24 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, if (mddev->pers) { err = update_array_info(mddev, &info); if (err) { - printk(KERN_WARNING "md: couldn't update" - " array info. %d\n", err); + pr_warn("md: couldn't update array info. %d\n", err); goto unlock; } goto unlock; } if (!list_empty(&mddev->disks)) { - printk(KERN_WARNING - "md: array %s already has disks!\n", - mdname(mddev)); + pr_warn("md: array %s already has disks!\n", mdname(mddev)); err = -EBUSY; goto unlock; } if (mddev->raid_disks) { - printk(KERN_WARNING - "md: array %s already initialised!\n", - mdname(mddev)); + pr_warn("md: array %s already initialised!\n", mdname(mddev)); err = -EBUSY; goto unlock; } err = set_array_info(mddev, &info); if (err) { - printk(KERN_WARNING "md: couldn't set" - " array info. %d\n", err); + pr_warn("md: couldn't set array info. %d\n", err); goto unlock; } goto unlock; @@ -7582,8 +7543,8 @@ static const struct file_operations md_seq_fops = { int register_md_personality(struct md_personality *p) { - printk(KERN_INFO "md: %s personality registered for level %d\n", - p->name, p->level); + pr_debug("md: %s personality registered for level %d\n", + p->name, p->level); spin_lock(&pers_lock); list_add_tail(&p->list, &pers_list); spin_unlock(&pers_lock); @@ -7593,7 +7554,7 @@ EXPORT_SYMBOL(register_md_personality); int unregister_md_personality(struct md_personality *p) { - printk(KERN_INFO "md: %s personality unregistered\n", p->name); + pr_debug("md: %s personality unregistered\n", p->name); spin_lock(&pers_lock); list_del_init(&p->list); spin_unlock(&pers_lock); @@ -7633,7 +7594,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes) spin_lock(&pers_lock); /* ensure module won't be unloaded */ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { - pr_err("can't find md-cluster module or get it's reference.\n"); + pr_warn("can't find md-cluster module or get it's reference.\n"); spin_unlock(&pers_lock); return -ENOENT; } @@ -7908,11 +7869,9 @@ void md_do_sync(struct md_thread *thread) mddev2->curr_resync >= mddev->curr_resync) { if (mddev2_minor != mddev2->md_minor) { mddev2_minor = mddev2->md_minor; - printk(KERN_INFO "md: delaying %s of %s" - " until %s has finished (they" - " share one or more physical units)\n", - desc, mdname(mddev), - mdname(mddev2)); + pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", + desc, mdname(mddev), + mdname(mddev2)); } mddev_put(mddev2); if (signal_pending(current)) @@ -7969,12 +7928,10 @@ void md_do_sync(struct md_thread *thread) } } - printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); - printk(KERN_INFO "md: minimum _guaranteed_ speed:" - " %d KB/sec/disk.\n", speed_min(mddev)); - printk(KERN_INFO "md: using maximum available idle IO bandwidth " - "(but not more than %d KB/sec) for %s.\n", - speed_max(mddev), desc); + pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); + pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); + pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", + speed_max(mddev), desc); is_mddev_idle(mddev, 1); /* this initializes IO event counters */ @@ -7991,16 +7948,15 @@ void md_do_sync(struct md_thread *thread) * Tune reconstruction: */ window = 32*(PAGE_SIZE/512); - printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", - window/2, (unsigned long long)max_sectors/2); + pr_debug("md: using %dk window, over a total of %lluk.\n", + window/2, (unsigned long long)max_sectors/2); atomic_set(&mddev->recovery_active, 0); last_check = 0; if (j>2) { - printk(KERN_INFO - "md: resuming %s of %s from checkpoint.\n", - desc, mdname(mddev)); + pr_debug("md: resuming %s of %s from checkpoint.\n", + desc, mdname(mddev)); mddev->curr_resync = j; } else mddev->curr_resync = 3; /* no longer delayed */ @@ -8127,9 +8083,9 @@ void md_do_sync(struct md_thread *thread) } } } - printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, - test_bit(MD_RECOVERY_INTR, &mddev->recovery) - ? "interrupted" : "done"); + pr_info("md: %s: %s %s.\n",mdname(mddev), desc, + test_bit(MD_RECOVERY_INTR, &mddev->recovery) + ? "interrupted" : "done"); /* * this also signals 'finished resyncing' to md_stop */ @@ -8149,9 +8105,8 @@ void md_do_sync(struct md_thread *thread) if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (mddev->curr_resync >= mddev->recovery_cp) { - printk(KERN_INFO - "md: checkpointing %s of %s.\n", - desc, mdname(mddev)); + pr_debug("md: checkpointing %s of %s.\n", + desc, mdname(mddev)); if (test_bit(MD_RECOVERY_ERROR, &mddev->recovery)) mddev->recovery_cp = @@ -8299,8 +8254,8 @@ static void md_start_sync(struct work_struct *ws) mddev, "resync"); if (!mddev->sync_thread) { - printk(KERN_ERR "%s: could not start resync thread...\n", - mdname(mddev)); + pr_warn("%s: could not start resync thread...\n", + mdname(mddev)); /* leave the spares where they are, it shouldn't hurt */ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); @@ -8350,8 +8305,8 @@ void md_check_recovery(struct mddev *mddev) if (signal_pending(current)) { if (mddev->pers->sync_request && !mddev->external) { - printk(KERN_INFO "md: %s in immediate safe mode\n", - mdname(mddev)); + pr_debug("md: %s in immediate safe mode\n", + mdname(mddev)); mddev->safemode = 2; } flush_signals(current); @@ -8749,7 +8704,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) rdev2->saved_raid_disk = role; ret = remove_and_add_spares(mddev, rdev2); pr_info("Activated spare: %s\n", - bdevname(rdev2->bdev,b)); + bdevname(rdev2->bdev,b)); /* wakeup mddev->thread here, so array could * perform resync with the new activated disk */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -8874,9 +8829,6 @@ void md_autodetect_dev(dev_t dev) mutex_lock(&detected_devices_mutex); list_add_tail(&node_detected_dev->list, &all_detected_devices); mutex_unlock(&detected_devices_mutex); - } else { - printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" - ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); } } @@ -8890,7 +8842,7 @@ static void autostart_arrays(int part) i_scanned = 0; i_passed = 0; - printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); + pr_info("md: Autodetecting RAID arrays.\n"); mutex_lock(&detected_devices_mutex); while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { @@ -8915,8 +8867,7 @@ static void autostart_arrays(int part) } mutex_unlock(&detected_devices_mutex); - printk(KERN_INFO "md: Scanned %d and added %d devices.\n", - i_scanned, i_passed); + pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); autorun_devices(part); } -- cgit v1.1 From ec0cc226854a79c0cf2969a7a86eddea5f8e8b09 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:49 +1100 Subject: md/bitmap: change all printk() to pr_*() Follow err/warn distinction introduced in md.c Join multi-part strings into single string. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 107 ++++++++++++++++++++++++---------------------------- 1 file changed, 50 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index cd3a065..23563f5 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -405,10 +405,10 @@ static int read_page(struct file *file, unsigned long index, ret = -EIO; out: if (ret) - printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n", - (int)PAGE_SIZE, - (unsigned long long)index << PAGE_SHIFT, - ret); + pr_err("md: bitmap read error: (%dB @ %llu): %d\n", + (int)PAGE_SIZE, + (unsigned long long)index << PAGE_SHIFT, + ret); return ret; } @@ -455,24 +455,24 @@ void bitmap_print_sb(struct bitmap *bitmap) if (!bitmap || !bitmap->storage.sb_page) return; sb = kmap_atomic(bitmap->storage.sb_page); - printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); - printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); - printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); - printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n", - *(__u32 *)(sb->uuid+0), - *(__u32 *)(sb->uuid+4), - *(__u32 *)(sb->uuid+8), - *(__u32 *)(sb->uuid+12)); - printk(KERN_DEBUG " events: %llu\n", - (unsigned long long) le64_to_cpu(sb->events)); - printk(KERN_DEBUG "events cleared: %llu\n", - (unsigned long long) le64_to_cpu(sb->events_cleared)); - printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state)); - printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize)); - printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); - printk(KERN_DEBUG " sync size: %llu KB\n", - (unsigned long long)le64_to_cpu(sb->sync_size)/2); - printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); + pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); + pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); + pr_debug(" version: %d\n", le32_to_cpu(sb->version)); + pr_debug(" uuid: %08x.%08x.%08x.%08x\n", + *(__u32 *)(sb->uuid+0), + *(__u32 *)(sb->uuid+4), + *(__u32 *)(sb->uuid+8), + *(__u32 *)(sb->uuid+12)); + pr_debug(" events: %llu\n", + (unsigned long long) le64_to_cpu(sb->events)); + pr_debug("events cleared: %llu\n", + (unsigned long long) le64_to_cpu(sb->events_cleared)); + pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); + pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); + pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); + pr_debug(" sync size: %llu KB\n", + (unsigned long long)le64_to_cpu(sb->sync_size)/2); + pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); kunmap_atomic(sb); } @@ -506,14 +506,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) BUG_ON(!chunksize); if (!is_power_of_2(chunksize)) { kunmap_atomic(sb); - printk(KERN_ERR "bitmap chunksize not a power of 2\n"); + pr_warn("bitmap chunksize not a power of 2\n"); return -EINVAL; } sb->chunksize = cpu_to_le32(chunksize); daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { - printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n"); + pr_debug("Choosing daemon_sleep default (5 sec)\n"); daemon_sleep = 5 * HZ; } sb->daemon_sleep = cpu_to_le32(daemon_sleep); @@ -584,7 +584,7 @@ re_read: /* to 4k blocks */ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); - pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, + pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, bitmap->cluster_slot, offset); } @@ -634,7 +634,7 @@ re_read: else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; if (reason) { - printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n", + pr_warn("%s: invalid bitmap file superblock: %s\n", bmname(bitmap), reason); goto out; } @@ -648,18 +648,15 @@ re_read: * bitmap's UUID and event counter to the mddev's */ if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { - printk(KERN_INFO - "%s: bitmap superblock UUID mismatch\n", - bmname(bitmap)); + pr_warn("%s: bitmap superblock UUID mismatch\n", + bmname(bitmap)); goto out; } events = le64_to_cpu(sb->events); if (!nodes && (events < bitmap->mddev->events)) { - printk(KERN_INFO - "%s: bitmap file is out of date (%llu < %llu) " - "-- forcing full recovery\n", - bmname(bitmap), events, - (unsigned long long) bitmap->mddev->events); + pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n", + bmname(bitmap), events, + (unsigned long long) bitmap->mddev->events); set_bit(BITMAP_STALE, &bitmap->flags); } } @@ -679,8 +676,8 @@ out: if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { err = md_setup_cluster(bitmap->mddev, nodes); if (err) { - pr_err("%s: Could not setup cluster service (%d)\n", - bmname(bitmap), err); + pr_warn("%s: Could not setup cluster service (%d)\n", + bmname(bitmap), err); goto out_no_sb; } bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); @@ -847,15 +844,13 @@ static void bitmap_file_kick(struct bitmap *bitmap) ptr = file_path(bitmap->storage.file, path, PAGE_SIZE); - printk(KERN_ALERT - "%s: kicking failed bitmap file %s from array!\n", - bmname(bitmap), IS_ERR(ptr) ? "" : ptr); + pr_warn("%s: kicking failed bitmap file %s from array!\n", + bmname(bitmap), IS_ERR(ptr) ? "" : ptr); kfree(path); } else - printk(KERN_ALERT - "%s: disabling internal bitmap due to errors\n", - bmname(bitmap)); + pr_warn("%s: disabling internal bitmap due to errors\n", + bmname(bitmap)); } } @@ -1056,14 +1051,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) outofdate = test_bit(BITMAP_STALE, &bitmap->flags); if (outofdate) - printk(KERN_INFO "%s: bitmap file is out of date, doing full " - "recovery\n", bmname(bitmap)); + pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap)); if (file && i_size_read(file->f_mapping->host) < store->bytes) { - printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", - bmname(bitmap), - (unsigned long) i_size_read(file->f_mapping->host), - store->bytes); + pr_warn("%s: bitmap file too short %lu < %lu\n", + bmname(bitmap), + (unsigned long) i_size_read(file->f_mapping->host), + store->bytes); goto err; } @@ -1137,16 +1131,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) offset = 0; } - printk(KERN_INFO "%s: bitmap initialized from disk: " - "read %lu pages, set %lu of %lu bits\n", - bmname(bitmap), store->file_pages, - bit_cnt, chunks); + pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n", + bmname(bitmap), store->file_pages, + bit_cnt, chunks); return 0; err: - printk(KERN_INFO "%s: bitmap initialisation failed: %d\n", - bmname(bitmap), ret); + pr_warn("%s: bitmap initialisation failed: %d\n", + bmname(bitmap), ret); return ret; } @@ -1825,8 +1818,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot) if (err) goto error; - printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", - bitmap->counts.pages, bmname(bitmap)); + pr_debug("created bitmap (%lu pages) for device %s\n", + bitmap->counts.pages, bmname(bitmap)); err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; if (err) @@ -2091,7 +2084,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); blocks = old_counts.chunks << old_counts.chunkshift; - pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n"); + pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); break; } else bitmap->counts.bp[page].count += 1; -- cgit v1.1 From a2e202afa6aecd0ca0f7863deca4267bf1346e3d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:49 +1100 Subject: md/linear: replace printk() with pr_*() Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/linear.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 86f5d43..9c7d4f5 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -101,8 +101,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) sector_t sectors; if (j < 0 || j >= raid_disks || disk->rdev) { - printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n", - mdname(mddev)); + pr_warn("md/linear:%s: disk numbering problem. Aborting!\n", + mdname(mddev)); goto out; } @@ -123,8 +123,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) discard_supported = true; } if (cnt != raid_disks) { - printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", - mdname(mddev)); + pr_warn("md/linear:%s: not enough drives present. Aborting!\n", + mdname(mddev)); goto out; } @@ -262,9 +262,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) return; out_of_bounds: - printk(KERN_ERR - "md/linear:%s: make_request: Sector %llu out of bounds on " - "dev %s: %llu sectors, offset %llu\n", + pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n", mdname(mddev), (unsigned long long)bio->bi_iter.bi_sector, bdevname(tmp_dev->rdev->bdev, b), @@ -275,7 +273,6 @@ out_of_bounds: static void linear_status (struct seq_file *seq, struct mddev *mddev) { - seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); } -- cgit v1.1 From 7279694da4af9e99e7ed85ed8883c740efff348b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:49 +1100 Subject: md/multipath: replace printk() with pr_*() Also remove all messages about memory allocation failure. page_alloc() reports those. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/multipath.c | 90 ++++++++++++++++++-------------------------------- 1 file changed, 33 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 673efbd..589b807 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf) } rcu_read_unlock(); - printk(KERN_ERR "multipath_map(): no more operational IO paths?\n"); + pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n"); return (-1); } @@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio) */ char b[BDEVNAME_SIZE]; md_error (mp_bh->mddev, rdev); - printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", - bdevname(rdev->bdev,b), - (unsigned long long)bio->bi_iter.bi_sector); + pr_info("multipath: %s: rescheduling sector %llu\n", + bdevname(rdev->bdev,b), + (unsigned long long)bio->bi_iter.bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, bio->bi_error); @@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) * first check if this is a queued request for a device * which has just failed. */ - printk(KERN_ALERT - "multipath: only one IO path left and IO error.\n"); + pr_warn("multipath: only one IO path left and IO error.\n"); /* leave it active... it's all we have */ return; } @@ -210,10 +209,8 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) } set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); - printk(KERN_ALERT "multipath: IO failure on %s," - " disabling IO path.\n" - "multipath: Operation continuing" - " on %d IO paths.\n", + pr_err("multipath: IO failure on %s, disabling IO path.\n" + "multipath: Operation continuing on %d IO paths.\n", bdevname(rdev->bdev, b), conf->raid_disks - mddev->degraded); } @@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf) int i; struct multipath_info *tmp; - printk("MULTIPATH conf printout:\n"); + pr_debug("MULTIPATH conf printout:\n"); if (!conf) { - printk("(conf==NULL)\n"); + pr_debug("(conf==NULL)\n"); return; } - printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, - conf->raid_disks); + pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, + conf->raid_disks); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; tmp = conf->multipaths + i; if (tmp->rdev) - printk(" disk%d, o:%d, dev:%s\n", - i,!test_bit(Faulty, &tmp->rdev->flags), - bdevname(tmp->rdev->bdev,b)); + pr_debug(" disk%d, o:%d, dev:%s\n", + i,!test_bit(Faulty, &tmp->rdev->flags), + bdevname(tmp->rdev->bdev,b)); } } @@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev == p->rdev) { if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { - printk(KERN_ERR "hot-remove-disk, slot %d is identified" - " but is still operational!\n", number); + pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number); err = -EBUSY; goto abort; } @@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread) bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; if ((mp_bh->path = multipath_map (conf))<0) { - printk(KERN_ALERT "multipath: %s: unrecoverable IO read" - " error for block %llu\n", - bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_iter.bi_sector); + pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)bio->bi_iter.bi_sector); multipath_end_bh_io(mp_bh, -EIO); } else { - printk(KERN_ERR "multipath: %s: redirecting sector %llu" - " to another IO path\n", - bdevname(bio->bi_bdev,b), - (unsigned long long)bio->bi_iter.bi_sector); + pr_err("multipath: %s: redirecting sector %llu to another IO path\n", + bdevname(bio->bi_bdev,b), + (unsigned long long)bio->bi_iter.bi_sector); *bio = *(mp_bh->master_bio); bio->bi_iter.bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; @@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev) return -EINVAL; if (mddev->level != LEVEL_MULTIPATH) { - printk("multipath: %s: raid level not set to multipath IO (%d)\n", - mdname(mddev), mddev->level); + pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n", + mdname(mddev), mddev->level); goto out; } /* @@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev) conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); mddev->private = conf; - if (!conf) { - printk(KERN_ERR - "multipath: couldn't allocate memory for %s\n", - mdname(mddev)); + if (!conf) goto out; - } conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, GFP_KERNEL); - if (!conf->multipaths) { - printk(KERN_ERR - "multipath: couldn't allocate memory for %s\n", - mdname(mddev)); + if (!conf->multipaths) goto out_free_conf; - } working_disks = 0; rdev_for_each(rdev, mddev) { @@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev) INIT_LIST_HEAD(&conf->retry_list); if (!working_disks) { - printk(KERN_ERR "multipath: no operational IO paths for %s\n", + pr_warn("multipath: no operational IO paths for %s\n", mdname(mddev)); goto out_free_conf; } @@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev) conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, sizeof(struct multipath_bh)); - if (conf->pool == NULL) { - printk(KERN_ERR - "multipath: couldn't allocate memory for %s\n", - mdname(mddev)); + if (conf->pool == NULL) goto out_free_conf; - } - { - mddev->thread = md_register_thread(multipathd, mddev, - "multipath"); - if (!mddev->thread) { - printk(KERN_ERR "multipath: couldn't allocate thread" - " for %s\n", mdname(mddev)); - goto out_free_conf; - } - } + mddev->thread = md_register_thread(multipathd, mddev, + "multipath"); + if (!mddev->thread) + goto out_free_conf; - printk(KERN_INFO - "multipath: array %s active with %d out of %d IO paths\n", + pr_info("multipath: array %s active with %d out of %d IO paths\n", mdname(mddev), conf->raid_disks - mddev->degraded, - mddev->raid_disks); + mddev->raid_disks); /* * Ok, everything is just fine now */ -- cgit v1.1 From 766038846e875740cf4c20dfc5d5b292ba47360a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:50 +1100 Subject: md/raid0: replace printk() with pr_*() This makes md/raid0 much less verbose as the messages about the array geometry are now pr_debug() Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid0.c | 89 +++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 45 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 258986a..b3ba77a 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -51,20 +51,21 @@ static void dump_zones(struct mddev *mddev) char b[BDEVNAME_SIZE]; struct r0conf *conf = mddev->private; int raid_disks = conf->strip_zone[0].nb_dev; - printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", - mdname(mddev), - conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); + pr_debug("md: RAID0 configuration for %s - %d zone%s\n", + mdname(mddev), + conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); for (j = 0; j < conf->nr_strip_zones; j++) { - printk(KERN_INFO "md: zone%d=[", j); + char line[200]; + int len = 0; + for (k = 0; k < conf->strip_zone[j].nb_dev; k++) - printk(KERN_CONT "%s%s", k?"/":"", - bdevname(conf->devlist[j*raid_disks - + k]->bdev, b)); - printk(KERN_CONT "]\n"); + len += snprintf(line+len, 200-len, "%s%s", k?"/":"", + bdevname(conf->devlist[j*raid_disks + + k]->bdev, b)); + pr_debug("md: zone%d=[%s]\n", j, line); zone_size = conf->strip_zone[j].zone_end - zone_start; - printk(KERN_INFO " zone-offset=%10lluKB, " - "device-offset=%10lluKB, size=%10lluKB\n", + pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n", (unsigned long long)zone_start>>1, (unsigned long long)conf->strip_zone[j].dev_start>>1, (unsigned long long)zone_size>>1); @@ -142,9 +143,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) * chunk size is a multiple of that sector size */ if ((mddev->chunk_sectors << 9) % blksize) { - printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n", - mdname(mddev), - mddev->chunk_sectors << 9, blksize); + pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n", + mdname(mddev), + mddev->chunk_sectors << 9, blksize); err = -EINVAL; goto abort; } @@ -186,19 +187,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } if (j < 0) { - printk(KERN_ERR - "md/raid0:%s: remove inactive devices before converting to RAID0\n", - mdname(mddev)); + pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n", + mdname(mddev)); goto abort; } if (j >= mddev->raid_disks) { - printk(KERN_ERR "md/raid0:%s: bad disk number %d - " - "aborting!\n", mdname(mddev), j); + pr_warn("md/raid0:%s: bad disk number %d - aborting!\n", + mdname(mddev), j); goto abort; } if (dev[j]) { - printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " - "aborting!\n", mdname(mddev), j); + pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n", + mdname(mddev), j); goto abort; } dev[j] = rdev1; @@ -208,8 +208,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) cnt++; } if (cnt != mddev->raid_disks) { - printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " - "aborting!\n", mdname(mddev), cnt, mddev->raid_disks); + pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n", + mdname(mddev), cnt, mddev->raid_disks); goto abort; } zone->nb_dev = cnt; @@ -357,8 +357,7 @@ static int raid0_run(struct mddev *mddev) int ret; if (mddev->chunk_sectors == 0) { - printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", - mdname(mddev)); + pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev)); return -EINVAL; } if (md_check_no_bitmap(mddev)) @@ -399,9 +398,9 @@ static int raid0_run(struct mddev *mddev) /* calculate array device size */ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); - printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", - mdname(mddev), - (unsigned long long)mddev->array_sectors); + pr_debug("md/raid0:%s: md_size is %llu sectors.\n", + mdname(mddev), + (unsigned long long)mddev->array_sectors); if (mddev->queue) { /* calculate the max read-ahead size. @@ -509,17 +508,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev) struct r0conf *priv_conf; if (mddev->degraded != 1) { - printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", - mdname(mddev), - mddev->degraded); + pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", + mdname(mddev), + mddev->degraded); return ERR_PTR(-EINVAL); } rdev_for_each(rdev, mddev) { /* check slot number for a disk */ if (rdev->raid_disk == mddev->raid_disks-1) { - printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", - mdname(mddev)); + pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n", + mdname(mddev)); return ERR_PTR(-EINVAL); } rdev->sectors = mddev->dev_sectors; @@ -549,19 +548,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev) * - all mirrors must be already degraded */ if (mddev->layout != ((1 << 8) + 2)) { - printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n", - mdname(mddev), - mddev->layout); + pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n", + mdname(mddev), + mddev->layout); return ERR_PTR(-EINVAL); } if (mddev->raid_disks & 1) { - printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n", - mdname(mddev)); + pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n", + mdname(mddev)); return ERR_PTR(-EINVAL); } if (mddev->degraded != (mddev->raid_disks>>1)) { - printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", - mdname(mddev)); + pr_warn("md/raid0:%s: All mirrors must be already degraded!\n", + mdname(mddev)); return ERR_PTR(-EINVAL); } @@ -588,7 +587,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev) * - (N - 1) mirror drives must be already faulty */ if ((mddev->raid_disks - 1) != mddev->degraded) { - printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", + pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", mdname(mddev)); return ERR_PTR(-EINVAL); } @@ -631,8 +630,8 @@ static void *raid0_takeover(struct mddev *mddev) */ if (mddev->bitmap) { - printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n", - mdname(mddev)); + pr_warn("md/raid0: %s: cannot takeover array with bitmap\n", + mdname(mddev)); return ERR_PTR(-EBUSY); } if (mddev->level == 4) @@ -642,8 +641,8 @@ static void *raid0_takeover(struct mddev *mddev) if (mddev->layout == ALGORITHM_PARITY_N) return raid0_takeover_raid45(mddev); - printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", - mdname(mddev), ALGORITHM_PARITY_N); + pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", + mdname(mddev), ALGORITHM_PARITY_N); } if (mddev->level == 10) @@ -652,7 +651,7 @@ static void *raid0_takeover(struct mddev *mddev) if (mddev->level == 1) return raid0_takeover_raid1(mddev); - printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", + pr_warn("Takeover from raid%i to raid0 not supported\n", mddev->level); return ERR_PTR(-EINVAL); -- cgit v1.1 From 1d41c216febe43150e99b24fde5eda9e8097dccc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:50 +1100 Subject: md/raid1: change printk() to pr_*() Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 99 ++++++++++++++++++++++-------------------------------- 1 file changed, 41 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 29e2df5..15f0b55 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -347,13 +347,10 @@ static void raid1_end_read_request(struct bio *bio) * oops, read error: */ char b[BDEVNAME_SIZE]; - printk_ratelimited( - KERN_ERR "md/raid1:%s: %s: " - "rescheduling sector %llu\n", - mdname(conf->mddev), - bdevname(rdev->bdev, - b), - (unsigned long long)r1_bio->sector); + pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n", + mdname(conf->mddev), + bdevname(rdev->bdev, b), + (unsigned long long)r1_bio->sector); set_bit(R1BIO_ReadError, &r1_bio->state); reschedule_retry(r1_bio); /* don't drop the reference on read_disk yet */ @@ -1461,34 +1458,33 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_mask_bits(&mddev->flags, 0, BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); - printk(KERN_ALERT - "md/raid1:%s: Disk failure on %s, disabling device.\n" - "md/raid1:%s: Operation continuing on %d devices.\n", - mdname(mddev), bdevname(rdev->bdev, b), - mdname(mddev), conf->raid_disks - mddev->degraded); + pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" + "md/raid1:%s: Operation continuing on %d devices.\n", + mdname(mddev), bdevname(rdev->bdev, b), + mdname(mddev), conf->raid_disks - mddev->degraded); } static void print_conf(struct r1conf *conf) { int i; - printk(KERN_DEBUG "RAID1 conf printout:\n"); + pr_debug("RAID1 conf printout:\n"); if (!conf) { - printk(KERN_DEBUG "(!conf)\n"); + pr_debug("(!conf)\n"); return; } - printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, - conf->raid_disks); + pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, + conf->raid_disks); rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev) - printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", - i, !test_bit(In_sync, &rdev->flags), - !test_bit(Faulty, &rdev->flags), - bdevname(rdev->bdev,b)); + pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", + i, !test_bit(In_sync, &rdev->flags), + !test_bit(Faulty, &rdev->flags), + bdevname(rdev->bdev,b)); } rcu_read_unlock(); } @@ -1825,11 +1821,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio) * work just disable and interrupt the recovery. * Don't fail devices as that won't really help. */ - printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" - " for block %llu\n", - mdname(mddev), - bdevname(bio->bi_bdev, b), - (unsigned long long)r1_bio->sector); + pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", + mdname(mddev), + bdevname(bio->bi_bdev, b), + (unsigned long long)r1_bio->sector); for (d = 0; d < conf->raid_disks * 2; d++) { rdev = conf->mirrors[d].rdev; if (!rdev || test_bit(Faulty, &rdev->flags)) @@ -2122,13 +2117,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk, if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); - printk(KERN_INFO - "md/raid1:%s: read error corrected " - "(%d sectors at %llu on %s)\n", - mdname(mddev), s, - (unsigned long long)(sect + - rdev->data_offset), - bdevname(rdev->bdev, b)); + pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)(sect + + rdev->data_offset), + bdevname(rdev->bdev, b)); } rdev_dec_pending(rdev, mddev); } else @@ -2317,9 +2310,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) read_more: disk = read_balance(conf, r1_bio, &max_sectors); if (disk == -1) { - printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" - " read error for block %llu\n", - mdname(mddev), b, (unsigned long long)r1_bio->sector); + pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", + mdname(mddev), b, (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { const unsigned long do_sync @@ -2330,12 +2322,10 @@ read_more: max_sectors); r1_bio->bios[r1_bio->read_disk] = bio; rdev = conf->mirrors[disk].rdev; - printk_ratelimited(KERN_ERR - "md/raid1:%s: redirecting sector %llu" - " to other mirror: %s\n", - mdname(mddev), - (unsigned long long)r1_bio->sector, - bdevname(rdev->bdev, b)); + pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n", + mdname(mddev), + (unsigned long long)r1_bio->sector, + bdevname(rdev->bdev, b)); bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; @@ -2875,12 +2865,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); - if (!conf->thread) { - printk(KERN_ERR - "md/raid1:%s: couldn't allocate thread\n", - mdname(mddev)); + if (!conf->thread) goto abort; - } return conf; @@ -2905,13 +2891,13 @@ static int raid1_run(struct mddev *mddev) bool discard_supported = false; if (mddev->level != 1) { - printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", - mdname(mddev), mddev->level); + pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", + mdname(mddev), mddev->level); return -EIO; } if (mddev->reshape_position != MaxSector) { - printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", - mdname(mddev)); + pr_warn("md/raid1:%s: reshape_position set but not supported\n", + mdname(mddev)); return -EIO; } /* @@ -2950,11 +2936,9 @@ static int raid1_run(struct mddev *mddev) mddev->recovery_cp = MaxSector; if (mddev->recovery_cp != MaxSector) - printk(KERN_NOTICE "md/raid1:%s: not clean" - " -- starting background reconstruction\n", - mdname(mddev)); - printk(KERN_INFO - "md/raid1:%s: active with %d out of %d mirrors\n", + pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", + mdname(mddev)); + pr_info("md/raid1:%s: active with %d out of %d mirrors\n", mdname(mddev), mddev->raid_disks - mddev->degraded, mddev->raid_disks); @@ -3107,9 +3091,8 @@ static int raid1_reshape(struct mddev *mddev) rdev->raid_disk = d2; sysfs_unlink_rdev(mddev, rdev); if (sysfs_link_rdev(mddev, rdev)) - printk(KERN_WARNING - "md/raid1:%s: cannot register rd%d\n", - mdname(mddev), rdev->raid_disk); + pr_warn("md/raid1:%s: cannot register rd%d\n", + mdname(mddev), rdev->raid_disk); } if (rdev) newmirrors[d2++].rdev = rdev; -- cgit v1.1 From 08464e0926402a95507a274d335e57ff6bc55ecf Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:50 +1100 Subject: md/raid10: change printk() to pr_*() Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid10.c | 141 +++++++++++++++++++++------------------------------- 1 file changed, 56 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 39fddda..25e3fd7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -404,8 +404,7 @@ static void raid10_end_read_request(struct bio *bio) * oops, read error - keep the refcount on the rdev */ char b[BDEVNAME_SIZE]; - printk_ratelimited(KERN_ERR - "md/raid10:%s: %s: rescheduling sector %llu\n", + pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n", mdname(conf->mddev), bdevname(rdev->bdev, b), (unsigned long long)r10_bio->sector); @@ -1589,11 +1588,10 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) set_mask_bits(&mddev->flags, 0, BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); spin_unlock_irqrestore(&conf->device_lock, flags); - printk(KERN_ALERT - "md/raid10:%s: Disk failure on %s, disabling device.\n" - "md/raid10:%s: Operation continuing on %d devices.\n", - mdname(mddev), bdevname(rdev->bdev, b), - mdname(mddev), conf->geo.raid_disks - mddev->degraded); + pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" + "md/raid10:%s: Operation continuing on %d devices.\n", + mdname(mddev), bdevname(rdev->bdev, b), + mdname(mddev), conf->geo.raid_disks - mddev->degraded); } static void print_conf(struct r10conf *conf) @@ -1601,13 +1599,13 @@ static void print_conf(struct r10conf *conf) int i; struct md_rdev *rdev; - printk(KERN_DEBUG "RAID10 conf printout:\n"); + pr_debug("RAID10 conf printout:\n"); if (!conf) { - printk(KERN_DEBUG "(!conf)\n"); + pr_debug("(!conf)\n"); return; } - printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, - conf->geo.raid_disks); + pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, + conf->geo.raid_disks); /* This is only called with ->reconfix_mutex held, so * rcu protection of rdev is not needed */ @@ -1615,10 +1613,10 @@ static void print_conf(struct r10conf *conf) char b[BDEVNAME_SIZE]; rdev = conf->mirrors[i].rdev; if (rdev) - printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", - i, !test_bit(In_sync, &rdev->flags), - !test_bit(Faulty, &rdev->flags), - bdevname(rdev->bdev,b)); + pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", + i, !test_bit(In_sync, &rdev->flags), + !test_bit(Faulty, &rdev->flags), + bdevname(rdev->bdev,b)); } } @@ -2109,10 +2107,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) ok = rdev_set_badblocks(rdev2, addr, s, 0); if (!ok) { /* just abort the recovery */ - printk(KERN_NOTICE - "md/raid10:%s: recovery aborted" - " due to read error\n", - mdname(mddev)); + pr_notice("md/raid10:%s: recovery aborted due to read error\n", + mdname(mddev)); conf->mirrors[dw].recovery_disabled = mddev->recovery_disabled; @@ -2259,14 +2255,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 char b[BDEVNAME_SIZE]; bdevname(rdev->bdev, b); - printk(KERN_NOTICE - "md/raid10:%s: %s: Raid device exceeded " - "read_error threshold [cur %d:max %d]\n", - mdname(mddev), b, - atomic_read(&rdev->read_errors), max_read_errors); - printk(KERN_NOTICE - "md/raid10:%s: %s: Failing raid device\n", - mdname(mddev), b); + pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n", + mdname(mddev), b, + atomic_read(&rdev->read_errors), max_read_errors); + pr_notice("md/raid10:%s: %s: Failing raid device\n", + mdname(mddev), b); md_error(mddev, rdev); r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; return; @@ -2356,20 +2349,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 s, conf->tmppage, WRITE) == 0) { /* Well, this device is dead */ - printk(KERN_NOTICE - "md/raid10:%s: read correction " - "write failed" - " (%d sectors at %llu on %s)\n", - mdname(mddev), s, - (unsigned long long)( - sect + - choose_data_offset(r10_bio, - rdev)), - bdevname(rdev->bdev, b)); - printk(KERN_NOTICE "md/raid10:%s: %s: failing " - "drive\n", - mdname(mddev), - bdevname(rdev->bdev, b)); + pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n", + mdname(mddev), s, + (unsigned long long)( + sect + + choose_data_offset(r10_bio, + rdev)), + bdevname(rdev->bdev, b)); + pr_notice("md/raid10:%s: %s: failing drive\n", + mdname(mddev), + bdevname(rdev->bdev, b)); } rdev_dec_pending(rdev, mddev); rcu_read_lock(); @@ -2397,24 +2386,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 READ)) { case 0: /* Well, this device is dead */ - printk(KERN_NOTICE - "md/raid10:%s: unable to read back " - "corrected sectors" - " (%d sectors at %llu on %s)\n", + pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)( sect + choose_data_offset(r10_bio, rdev)), bdevname(rdev->bdev, b)); - printk(KERN_NOTICE "md/raid10:%s: %s: failing " - "drive\n", + pr_notice("md/raid10:%s: %s: failing drive\n", mdname(mddev), bdevname(rdev->bdev, b)); break; case 1: - printk(KERN_INFO - "md/raid10:%s: read error corrected" - " (%d sectors at %llu on %s)\n", + pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n", mdname(mddev), s, (unsigned long long)( sect + @@ -2529,23 +2512,19 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) read_more: rdev = read_balance(conf, r10_bio, &max_sectors); if (rdev == NULL) { - printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" - " read error for block %llu\n", - mdname(mddev), b, - (unsigned long long)r10_bio->sector); + pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n", + mdname(mddev), b, + (unsigned long long)r10_bio->sector); raid_end_bio_io(r10_bio); return; } do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC); slot = r10_bio->read_slot; - printk_ratelimited( - KERN_ERR - "md/raid10:%s: %s: redirecting " - "sector %llu to another mirror\n", - mdname(mddev), - bdevname(rdev->bdev, b), - (unsigned long long)r10_bio->sector); + pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n", + mdname(mddev), + bdevname(rdev->bdev, b), + (unsigned long long)r10_bio->sector); bio = bio_clone_mddev(r10_bio->master_bio, GFP_NOIO, mddev); bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); @@ -3160,8 +3139,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (!any_working) { if (!test_and_set_bit(MD_RECOVERY_INTR, &mddev->recovery)) - printk(KERN_INFO "md/raid10:%s: insufficient " - "working devices for recovery.\n", + pr_warn("md/raid10:%s: insufficient working devices for recovery.\n", mdname(mddev)); mirror->recovery_disabled = mddev->recovery_disabled; @@ -3489,15 +3467,14 @@ static struct r10conf *setup_conf(struct mddev *mddev) copies = setup_geo(&geo, mddev, geo_new); if (copies == -2) { - printk(KERN_ERR "md/raid10:%s: chunk size must be " - "at least PAGE_SIZE(%ld) and be a power of 2.\n", - mdname(mddev), PAGE_SIZE); + pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n", + mdname(mddev), PAGE_SIZE); goto out; } if (copies < 2 || copies > mddev->raid_disks) { - printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", - mdname(mddev), mddev->new_layout); + pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n", + mdname(mddev), mddev->new_layout); goto out; } @@ -3557,9 +3534,6 @@ static struct r10conf *setup_conf(struct mddev *mddev) return conf; out: - if (err == -ENOMEM) - printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", - mdname(mddev)); if (conf) { mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); @@ -3656,7 +3630,7 @@ static int raid10_run(struct mddev *mddev) } /* need to check that every block has at least one working mirror */ if (!enough(conf, -1)) { - printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", + pr_err("md/raid10:%s: not enough operational mirrors.\n", mdname(mddev)); goto out_free_conf; } @@ -3698,11 +3672,9 @@ static int raid10_run(struct mddev *mddev) } if (mddev->recovery_cp != MaxSector) - printk(KERN_NOTICE "md/raid10:%s: not clean" - " -- starting background reconstruction\n", - mdname(mddev)); - printk(KERN_INFO - "md/raid10:%s: active with %d out of %d devices\n", + pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n", + mdname(mddev)); + pr_info("md/raid10:%s: active with %d out of %d devices\n", mdname(mddev), conf->geo.raid_disks - mddev->degraded, conf->geo.raid_disks); /* @@ -3739,7 +3711,7 @@ static int raid10_run(struct mddev *mddev) if (max(before_length, after_length) > min_offset_diff) { /* This cannot work */ - printk("md/raid10: offset difference not enough to continue reshape\n"); + pr_warn("md/raid10: offset difference not enough to continue reshape\n"); goto out_free_conf; } conf->offset_diff = min_offset_diff; @@ -3846,8 +3818,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) struct r10conf *conf; if (mddev->degraded > 0) { - printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", - mdname(mddev)); + pr_warn("md/raid10:%s: Error: degraded raid0!\n", + mdname(mddev)); return ERR_PTR(-EINVAL); } sector_div(size, devs); @@ -3887,9 +3859,8 @@ static void *raid10_takeover(struct mddev *mddev) /* for raid0 takeover only one zone is supported */ raid0_conf = mddev->private; if (raid0_conf->nr_strip_zones > 1) { - printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" - " with more than one zone.\n", - mdname(mddev)); + pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n", + mdname(mddev)); return ERR_PTR(-EINVAL); } return raid10_takeover_raid0(mddev, @@ -4078,8 +4049,8 @@ static int raid10_start_reshape(struct mddev *mddev) sector_t size = raid10_size(mddev, 0, 0); if (size < mddev->array_sectors) { spin_unlock_irq(&conf->device_lock); - printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", - mdname(mddev)); + pr_warn("md/raid10:%s: array size must be reduce before number of disks\n", + mdname(mddev)); return -EINVAL; } mddev->resync_max_sectors = size; -- cgit v1.1 From cc6167b4f3b3caabe973ca12612ac7d60aae0cfc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 2 Nov 2016 14:16:50 +1100 Subject: md/raid5: change printk() to pr_*() Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid5.c | 207 ++++++++++++++++++++++------------------------------- 1 file changed, 86 insertions(+), 121 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b6e4670..df88656 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -541,7 +541,7 @@ retry: if (dev->toread || dev->read || dev->towrite || dev->written || test_bit(R5_LOCKED, &dev->flags)) { - printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", + pr_err("sector=%llx i=%d %p %p %p %p %d\n", (unsigned long long)sh->sector, i, dev->toread, dev->read, dev->towrite, dev->written, test_bit(R5_LOCKED, &dev->flags)); @@ -2347,10 +2347,8 @@ static void raid5_end_read_request(struct bio * bi) * replacement device. We just fail those on * any error */ - printk_ratelimited( - KERN_INFO - "md/raid:%s: read error corrected" - " (%lu sectors at %llu on %s)\n", + pr_info_ratelimited( + "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n", mdname(conf->mddev), STRIPE_SECTORS, (unsigned long long)s, bdevname(rdev->bdev, b)); @@ -2370,36 +2368,29 @@ static void raid5_end_read_request(struct bio * bi) clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) - printk_ratelimited( - KERN_WARNING - "md/raid:%s: read error on replacement device " - "(sector %llu on %s).\n", + pr_warn_ratelimited( + "md/raid:%s: read error on replacement device (sector %llu on %s).\n", mdname(conf->mddev), (unsigned long long)s, bdn); else if (conf->mddev->degraded >= conf->max_degraded) { set_bad = 1; - printk_ratelimited( - KERN_WARNING - "md/raid:%s: read error not correctable " - "(sector %llu on %s).\n", + pr_warn_ratelimited( + "md/raid:%s: read error not correctable (sector %llu on %s).\n", mdname(conf->mddev), (unsigned long long)s, bdn); } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { /* Oh, no!!! */ set_bad = 1; - printk_ratelimited( - KERN_WARNING - "md/raid:%s: read error NOT corrected!! " - "(sector %llu on %s).\n", + pr_warn_ratelimited( + "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n", mdname(conf->mddev), (unsigned long long)s, bdn); } else if (atomic_read(&rdev->read_errors) > conf->max_nr_stripes) - printk(KERN_WARNING - "md/raid:%s: Too many read errors, failing device %s.\n", + pr_warn("md/raid:%s: Too many read errors, failing device %s.\n", mdname(conf->mddev), bdn); else retry = 1; @@ -2533,13 +2524,12 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(Faulty, &rdev->flags); set_mask_bits(&mddev->flags, 0, BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); - printk(KERN_ALERT - "md/raid:%s: Disk failure on %s, disabling device.\n" - "md/raid:%s: Operation continuing on %d devices.\n", - mdname(mddev), - bdevname(rdev->bdev, b), - mdname(mddev), - conf->raid_disks - mddev->degraded); + pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" + "md/raid:%s: Operation continuing on %d devices.\n", + mdname(mddev), + bdevname(rdev->bdev, b), + mdname(mddev), + conf->raid_disks - mddev->degraded); } /* @@ -2861,8 +2851,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) previous, &dummy1, &sh2); if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx || sh2.qd_idx != sh->qd_idx) { - printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", - mdname(conf->mddev)); + pr_warn("md/raid:%s: compute_blocknr: map not correct\n", + mdname(conf->mddev)); return 0; } return r_sector; @@ -3782,7 +3772,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, case check_state_compute_run: break; default: - printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", + pr_err("%s: unknown check_state: %d sector: %llu\n", __func__, sh->check_state, (unsigned long long) sh->sector); BUG(); @@ -3946,9 +3936,9 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, case check_state_compute_run: break; default: - printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", - __func__, sh->check_state, - (unsigned long long) sh->sector); + pr_warn("%s: unknown check_state: %d sector: %llu\n", + __func__, sh->check_state, + (unsigned long long) sh->sector); BUG(); } } @@ -6385,8 +6375,8 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); if (alloc_scratch_buffer(conf, percpu)) { - pr_err("%s: failed memory allocation for cpu%u\n", - __func__, cpu); + pr_warn("%s: failed memory allocation for cpu%u\n", + __func__, cpu); return -ENOMEM; } return 0; @@ -6456,29 +6446,29 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (mddev->new_level != 5 && mddev->new_level != 4 && mddev->new_level != 6) { - printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", - mdname(mddev), mddev->new_level); + pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n", + mdname(mddev), mddev->new_level); return ERR_PTR(-EIO); } if ((mddev->new_level == 5 && !algorithm_valid_raid5(mddev->new_layout)) || (mddev->new_level == 6 && !algorithm_valid_raid6(mddev->new_layout))) { - printk(KERN_ERR "md/raid:%s: layout %d not supported\n", - mdname(mddev), mddev->new_layout); + pr_warn("md/raid:%s: layout %d not supported\n", + mdname(mddev), mddev->new_layout); return ERR_PTR(-EIO); } if (mddev->new_level == 6 && mddev->raid_disks < 4) { - printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", - mdname(mddev), mddev->raid_disks); + pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n", + mdname(mddev), mddev->raid_disks); return ERR_PTR(-EINVAL); } if (!mddev->new_chunk_sectors || (mddev->new_chunk_sectors << 9) % PAGE_SIZE || !is_power_of_2(mddev->new_chunk_sectors)) { - printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", - mdname(mddev), mddev->new_chunk_sectors << 9); + pr_warn("md/raid:%s: invalid chunk size %d\n", + mdname(mddev), mddev->new_chunk_sectors << 9); return ERR_PTR(-EINVAL); } @@ -6569,9 +6559,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (test_bit(In_sync, &rdev->flags)) { char b[BDEVNAME_SIZE]; - printk(KERN_INFO "md/raid:%s: device %s operational as raid" - " disk %d\n", - mdname(mddev), bdevname(rdev->bdev, b), raid_disk); + pr_info("md/raid:%s: device %s operational as raid disk %d\n", + mdname(mddev), bdevname(rdev->bdev, b), raid_disk); } else if (rdev->saved_raid_disk != raid_disk) /* Cannot rely on bitmap to complete recovery */ conf->fullsync = 1; @@ -6605,21 +6594,18 @@ static struct r5conf *setup_conf(struct mddev *mddev) ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); conf->min_nr_stripes = max(NR_STRIPES, stripes); if (conf->min_nr_stripes != NR_STRIPES) - printk(KERN_INFO - "md/raid:%s: force stripe size %d for reshape\n", + pr_info("md/raid:%s: force stripe size %d for reshape\n", mdname(mddev), conf->min_nr_stripes); } memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); if (grow_stripes(conf, conf->min_nr_stripes)) { - printk(KERN_ERR - "md/raid:%s: couldn't allocate %dkB for buffers\n", - mdname(mddev), memory); + pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", + mdname(mddev), memory); goto abort; } else - printk(KERN_INFO "md/raid:%s: allocated %dkB\n", - mdname(mddev), memory); + pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); /* * Losing a stripe head costs more than the time to refill it, * it reduces the queue depth and so can hurt throughput. @@ -6631,18 +6617,16 @@ static struct r5conf *setup_conf(struct mddev *mddev) conf->shrinker.batch = 128; conf->shrinker.flags = 0; if (register_shrinker(&conf->shrinker)) { - printk(KERN_ERR - "md/raid:%s: couldn't register shrinker.\n", - mdname(mddev)); + pr_warn("md/raid:%s: couldn't register shrinker.\n", + mdname(mddev)); goto abort; } sprintf(pers_name, "raid%d", mddev->new_level); conf->thread = md_register_thread(raid5d, mddev, pers_name); if (!conf->thread) { - printk(KERN_ERR - "md/raid:%s: couldn't allocate thread.\n", - mdname(mddev)); + pr_warn("md/raid:%s: couldn't allocate thread.\n", + mdname(mddev)); goto abort; } @@ -6695,9 +6679,8 @@ static int raid5_run(struct mddev *mddev) int first = 1; if (mddev->recovery_cp != MaxSector) - printk(KERN_NOTICE "md/raid:%s: not clean" - " -- starting background reconstruction\n", - mdname(mddev)); + pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", + mdname(mddev)); rdev_for_each(rdev, mddev) { long long diff; @@ -6740,15 +6723,14 @@ static int raid5_run(struct mddev *mddev) int new_data_disks; if (journal_dev) { - printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n", - mdname(mddev)); + pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", + mdname(mddev)); return -EINVAL; } if (mddev->new_level != mddev->level) { - printk(KERN_ERR "md/raid:%s: unsupported reshape " - "required - aborting.\n", - mdname(mddev)); + pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", + mdname(mddev)); return -EINVAL; } old_disks = mddev->raid_disks - mddev->delta_disks; @@ -6763,8 +6745,8 @@ static int raid5_run(struct mddev *mddev) chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); new_data_disks = mddev->raid_disks - max_degraded; if (sector_div(here_new, chunk_sectors * new_data_disks)) { - printk(KERN_ERR "md/raid:%s: reshape_position not " - "on a stripe boundary\n", mdname(mddev)); + pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", + mdname(mddev)); return -EINVAL; } reshape_offset = here_new * chunk_sectors; @@ -6785,10 +6767,8 @@ static int raid5_run(struct mddev *mddev) abs(min_offset_diff) >= mddev->new_chunk_sectors) /* not really in-place - so OK */; else if (mddev->ro == 0) { - printk(KERN_ERR "md/raid:%s: in-place reshape " - "must be started in read-only mode " - "- aborting\n", - mdname(mddev)); + pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", + mdname(mddev)); return -EINVAL; } } else if (mddev->reshape_backwards @@ -6797,13 +6777,11 @@ static int raid5_run(struct mddev *mddev) : (here_new * chunk_sectors >= here_old * chunk_sectors + (-min_offset_diff))) { /* Reading from the same stripe as writing to - bad */ - printk(KERN_ERR "md/raid:%s: reshape_position too early for " - "auto-recovery - aborting.\n", - mdname(mddev)); + pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", + mdname(mddev)); return -EINVAL; } - printk(KERN_INFO "md/raid:%s: reshape will continue\n", - mdname(mddev)); + pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); /* OK, we should be able to continue; */ } else { BUG_ON(mddev->level != mddev->new_level); @@ -6822,8 +6800,8 @@ static int raid5_run(struct mddev *mddev) if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { if (!journal_dev) { - pr_err("md/raid:%s: journal disk is missing, force array readonly\n", - mdname(mddev)); + pr_warn("md/raid:%s: journal disk is missing, force array readonly\n", + mdname(mddev)); mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); } else if (mddev->recovery_cp == MaxSector) @@ -6850,8 +6828,7 @@ static int raid5_run(struct mddev *mddev) if (conf->disks[i].replacement && conf->reshape_progress != MaxSector) { /* replacements and reshape simply do not mix. */ - printk(KERN_ERR "md: cannot handle concurrent " - "replacement and reshape.\n"); + pr_warn("md: cannot handle concurrent replacement and reshape.\n"); goto abort; } if (test_bit(In_sync, &rdev->flags)) { @@ -6893,8 +6870,7 @@ static int raid5_run(struct mddev *mddev) mddev->degraded = calc_degraded(conf); if (has_failed(conf)) { - printk(KERN_ERR "md/raid:%s: not enough operational devices" - " (%d/%d failed)\n", + pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", mdname(mddev), mddev->degraded, conf->raid_disks); goto abort; } @@ -6906,29 +6882,19 @@ static int raid5_run(struct mddev *mddev) if (mddev->degraded > dirty_parity_disks && mddev->recovery_cp != MaxSector) { if (mddev->ok_start_degraded) - printk(KERN_WARNING - "md/raid:%s: starting dirty degraded array" - " - data corruption possible.\n", - mdname(mddev)); + pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n", + mdname(mddev)); else { - printk(KERN_ERR - "md/raid:%s: cannot start dirty degraded array.\n", - mdname(mddev)); + pr_crit("md/raid:%s: cannot start dirty degraded array.\n", + mdname(mddev)); goto abort; } } - if (mddev->degraded == 0) - printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" - " devices, algorithm %d\n", mdname(mddev), conf->level, - mddev->raid_disks-mddev->degraded, mddev->raid_disks, - mddev->new_layout); - else - printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" - " out of %d devices, algorithm %d\n", - mdname(mddev), conf->level, - mddev->raid_disks - mddev->degraded, - mddev->raid_disks, mddev->new_layout); + pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n", + mdname(mddev), conf->level, + mddev->raid_disks-mddev->degraded, mddev->raid_disks, + mddev->new_layout); print_raid5_conf(conf); @@ -6948,9 +6914,8 @@ static int raid5_run(struct mddev *mddev) mddev->to_remove = NULL; else if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) - printk(KERN_WARNING - "raid5: failed to create sysfs attributes for %s\n", - mdname(mddev)); + pr_warn("raid5: failed to create sysfs attributes for %s\n", + mdname(mddev)); md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); if (mddev->queue) { @@ -7038,8 +7003,8 @@ static int raid5_run(struct mddev *mddev) if (journal_dev) { char b[BDEVNAME_SIZE]; - printk(KERN_INFO"md/raid:%s: using device %s as journal\n", - mdname(mddev), bdevname(journal_dev->bdev, b)); + pr_debug("md/raid:%s: using device %s as journal\n", + mdname(mddev), bdevname(journal_dev->bdev, b)); r5l_init_log(conf, journal_dev); } @@ -7049,7 +7014,7 @@ abort: print_raid5_conf(conf); free_conf(conf); mddev->private = NULL; - printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); + pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); return -EIO; } @@ -7083,12 +7048,12 @@ static void print_raid5_conf (struct r5conf *conf) int i; struct disk_info *tmp; - printk(KERN_DEBUG "RAID conf printout:\n"); + pr_debug("RAID conf printout:\n"); if (!conf) { - printk("(conf==NULL)\n"); + pr_debug("(conf==NULL)\n"); return; } - printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, + pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, conf->raid_disks, conf->raid_disks - conf->mddev->degraded); @@ -7096,7 +7061,7 @@ static void print_raid5_conf (struct r5conf *conf) char b[BDEVNAME_SIZE]; tmp = conf->disks + i; if (tmp->rdev) - printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", + pr_debug(" disk %d, o:%d, dev:%s\n", i, !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev, b)); } @@ -7244,8 +7209,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) * write requests running. We should be safe */ r5l_init_log(conf, rdev); - printk(KERN_INFO"md/raid:%s: using device %s as journal\n", - mdname(mddev), bdevname(rdev->bdev, b)); + pr_debug("md/raid:%s: using device %s as journal\n", + mdname(mddev), bdevname(rdev->bdev, b)); return 0; } if (mddev->recovery_disabled == conf->recovery_disabled) @@ -7349,10 +7314,10 @@ static int check_stripe_cache(struct mddev *mddev) > conf->min_nr_stripes || ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 > conf->min_nr_stripes) { - printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", - mdname(mddev), - ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) - / STRIPE_SIZE)*4); + pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n", + mdname(mddev), + ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) + / STRIPE_SIZE)*4); return 0; } return 1; @@ -7433,8 +7398,8 @@ static int raid5_start_reshape(struct mddev *mddev) */ if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) < mddev->array_sectors) { - printk(KERN_ERR "md/raid:%s: array size must be reduced " - "before number of disks\n", mdname(mddev)); + pr_warn("md/raid:%s: array size must be reduced before number of disks\n", + mdname(mddev)); return -EINVAL; } @@ -7652,8 +7617,8 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level) /* for raid0 takeover only one zone is supported */ if (raid0_conf->nr_strip_zones > 1) { - printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", - mdname(mddev)); + pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n", + mdname(mddev)); return ERR_PTR(-EINVAL); } -- cgit v1.1 From 3fd880af4174d724d209a890101962b00d5d33d4 Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Wed, 2 Nov 2016 17:02:39 +0800 Subject: raid5-cache: restrict the use area of the log_offset variable We can calculate this offset by using ctx->meta_total_blocks, without passing in from the function Signed-off-by: JackieLiu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a227a9f..2e270e6 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -895,7 +895,7 @@ static int r5l_read_meta_block(struct r5l_log *log, static int r5l_recovery_flush_one_stripe(struct r5l_log *log, struct r5l_recovery_ctx *ctx, sector_t stripe_sect, - int *offset, sector_t *log_offset) + int *offset) { struct r5conf *conf = log->rdev->mddev->private; struct stripe_head *sh; @@ -904,6 +904,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); while (1) { + sector_t log_offset = r5l_ring_add(log, ctx->pos, + ctx->meta_total_blocks); payload = page_address(ctx->meta_page) + *offset; if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { @@ -911,16 +913,15 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, le64_to_cpu(payload->location), 0, &disk_index, sh); - sync_page_io(log->rdev, *log_offset, PAGE_SIZE, + sync_page_io(log->rdev, log_offset, PAGE_SIZE, sh->dev[disk_index].page, REQ_OP_READ, 0, false); sh->dev[disk_index].log_checksum = le32_to_cpu(payload->checksum[0]); set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); - ctx->meta_total_blocks += BLOCK_SECTORS; } else { disk_index = sh->pd_idx; - sync_page_io(log->rdev, *log_offset, PAGE_SIZE, + sync_page_io(log->rdev, log_offset, PAGE_SIZE, sh->dev[disk_index].page, REQ_OP_READ, 0, false); sh->dev[disk_index].log_checksum = @@ -930,7 +931,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, if (sh->qd_idx >= 0) { disk_index = sh->qd_idx; sync_page_io(log->rdev, - r5l_ring_add(log, *log_offset, BLOCK_SECTORS), + r5l_ring_add(log, log_offset, BLOCK_SECTORS), PAGE_SIZE, sh->dev[disk_index].page, REQ_OP_READ, 0, false); sh->dev[disk_index].log_checksum = @@ -938,11 +939,9 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); } - ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; } - *log_offset = r5l_ring_add(log, *log_offset, - le32_to_cpu(payload->size)); + ctx->meta_total_blocks += le32_to_cpu(payload->size); *offset += sizeof(struct r5l_payload_data_parity) + sizeof(__le32) * (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); @@ -999,12 +998,10 @@ static int r5l_recovery_flush_one_meta(struct r5l_log *log, struct r5l_payload_data_parity *payload; struct r5l_meta_block *mb; int offset; - sector_t log_offset; sector_t stripe_sector; mb = page_address(ctx->meta_page); offset = sizeof(struct r5l_meta_block); - log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); while (offset < le32_to_cpu(mb->meta_size)) { int dd; @@ -1013,7 +1010,7 @@ static int r5l_recovery_flush_one_meta(struct r5l_log *log, stripe_sector = raid5_compute_sector(conf, le64_to_cpu(payload->location), 0, &dd, NULL); if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, - &offset, &log_offset)) + &offset)) return -EINVAL; } return 0; -- cgit v1.1 From 060b0689f5df7e87641c820a605c779149da33ef Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 4 Nov 2016 16:46:03 +1100 Subject: md: perform async updates for metadata where possible. When adding devices to, or removing device from, an array we need to update the metadata. However we don't need to do it synchronously as data integrity doesn't depend on these changes being recorded instantly. So avoid the synchronous call to md_update_sb and just set a flag so that the thread will do it. This can reduce the number of updates performed when lots of devices are being added or removed. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 22c9efd..f389d8a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2599,8 +2599,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (err == 0) { md_kick_rdev_from_array(rdev); - if (mddev->pers) - md_update_sb(mddev, 1); + if (mddev->pers) { + set_bit(MD_CHANGE_DEVS, &mddev->flags); + md_wakeup_thread(mddev->thread); + } md_new_event(mddev); } } @@ -6175,7 +6177,11 @@ kick_rdev: md_cluster_ops->remove_disk(mddev, rdev); md_kick_rdev_from_array(rdev); - md_update_sb(mddev, 1); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + if (mddev->thread) + md_wakeup_thread(mddev->thread); + else + md_update_sb(mddev, 1); md_new_event(mddev); return 0; @@ -6240,7 +6246,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) rdev->raid_disk = -1; - md_update_sb(mddev, 1); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + if (!mddev->thread) + md_update_sb(mddev, 1); /* * Kick recovery, maybe this spare has to be added to the * array immediately. -- cgit v1.1 From 5e2c7a3611977b69ae0531e8fbdeab5dad17925a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 4 Nov 2016 16:46:03 +1100 Subject: md/raid1: abort delayed writes when device fails. When writing to an array with a bitmap enabled, the writes are grouped in batches which are preceded by an update to the bitmap. It is quite likely if that a drive develops a problem which is not media related, that the bitmap write will be the first to report an error and cause the device to be marked faulty (as the bitmap write is at the start of a batch). In this case, there is point submiting the subsequent writes to the failed device - that just wastes times. So re-check the Faulty state of a device before submitting a delayed write. This requires that we keep the 'rdev', rather than the 'bdev' in the bio, then swap in the bdev just before final submission. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 15f0b55..aac2a05 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -742,9 +742,14 @@ static void flush_pending_writes(struct r1conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); else @@ -1016,9 +1021,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); else @@ -1357,7 +1367,7 @@ read_again: mbio->bi_iter.bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); - mbio->bi_bdev = conf->mirrors[i].rdev->bdev; + mbio->bi_bdev = (void*)conf->mirrors[i].rdev; mbio->bi_end_io = raid1_end_write_request; bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); mbio->bi_private = r1_bio; -- cgit v1.1 From a9ae93c8cc0b63d8283f335604362f903d2244e2 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 4 Nov 2016 16:46:03 +1100 Subject: md/raid10: abort delayed writes when device fails. When writing to an array with a bitmap enabled, the writes are grouped in batches which are preceded by an update to the bitmap. It is quite likely if that a drive develops a problem which is not media related, that the bitmap write will be the first to report an error and cause the device to be marked faulty (as the bitmap write is at the start of a batch). In this case, there is point submiting the subsequent writes to the failed device - that just wastes times. So re-check the Faulty state of a device before submitting a delayed write. This requires that we keep the 'rdev', rather than the 'bdev' in the bio, then swap in the bdev just before final submission. Reported-by: Hannes Reinecke Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid10.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 25e3fd7..5290be3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -858,9 +858,14 @@ static void flush_pending_writes(struct r10conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); else @@ -1036,9 +1041,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; - if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) + bio->bi_bdev = rdev->bdev; + if (test_bit(Faulty, &rdev->flags)) { + bio->bi_error = -EIO; + bio_endio(bio); + } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) /* Just ignore it */ bio_endio(bio); else @@ -1357,7 +1367,7 @@ retry_write: mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev)); - mbio->bi_bdev = rdev->bdev; + mbio->bi_bdev = (void*)rdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); mbio->bi_private = r10_bio; @@ -1399,7 +1409,7 @@ retry_write: mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + choose_data_offset( r10_bio, rdev)); - mbio->bi_bdev = rdev->bdev; + mbio->bi_bdev = (void*)rdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); mbio->bi_private = r10_bio; -- cgit v1.1 From 85c9ccd4f026aad8e91ec1c182206e807cff932d Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 4 Nov 2016 16:46:03 +1100 Subject: md/bitmap: Don't write bitmap while earlier writes might be in-flight As we don't wait for writes to complete in bitmap_daemon_work, they could still be in-flight when bitmap_unplug writes again. Or when bitmap_daemon_work tries to write again. This can be confusing and could risk the wrong data being written last. So make sure we wait for old writes to complete before new writes start. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 23563f5..1a7f402 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -416,6 +416,21 @@ out: * bitmap file superblock operations */ +/* + * bitmap_wait_writes() should be called before writing any bitmap + * blocks, to ensure previous writes, particularly from + * bitmap_daemon_work(), have completed. + */ +static void bitmap_wait_writes(struct bitmap *bitmap) +{ + if (bitmap->storage.file) + wait_event(bitmap->write_wait, + atomic_read(&bitmap->pending_writes)==0); + else + md_super_wait(bitmap->mddev); +} + + /* update the event counter and sync the superblock to disk */ void bitmap_update_sb(struct bitmap *bitmap) { @@ -978,6 +993,7 @@ void bitmap_unplug(struct bitmap *bitmap) { unsigned long i; int dirty, need_write; + int writing = 0; if (!bitmap || !bitmap->storage.filemap || test_bit(BITMAP_STALE, &bitmap->flags)) @@ -992,15 +1008,15 @@ void bitmap_unplug(struct bitmap *bitmap) need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); if (dirty || need_write) { + if (!writing) + bitmap_wait_writes(bitmap); clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); write_page(bitmap, bitmap->storage.filemap[i], 0); + writing = 1; } } - if (bitmap->storage.file) - wait_event(bitmap->write_wait, - atomic_read(&bitmap->pending_writes)==0); - else - md_super_wait(bitmap->mddev); + if (writing) + bitmap_wait_writes(bitmap); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) bitmap_file_kick(bitmap); @@ -1282,6 +1298,7 @@ void bitmap_daemon_work(struct mddev *mddev) } spin_unlock_irq(&counts->lock); + bitmap_wait_writes(bitmap); /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. * DIRTY pages need to be written by bitmap_unplug so it can wait * for them. -- cgit v1.1 From f2c771a655046f21bb70d5813aa94979d2bd49c9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 9 Nov 2016 10:21:32 +1100 Subject: md/raid1: fix: IO can block resync indefinitely While performing a resync/recovery, raid1 divides the array space into three regions: - before the resync - at or shortly after the resync point - much further ahead of the resync point. Write requests to the first or third do not need to wait. Write requests to the middle region do need to wait if resync requests are pending. If there are any active write requests in the middle region, resync will wait for them. Due to an accounting error, there is a small range of addresses, between conf->next_resync and conf->start_next_window, where write requests will *not* be blocked, but *will* be counted in the middle region. This can effectively block resync indefinitely if filesystem writes happen repeatedly to this region. As ->next_window_requests is incremented when the sector is after conf->start_next_window + NEXT_NORMALIO_DISTANCE the same boundary should be used for determining when write requests should wait. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index aac2a05..9ac61cd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -834,7 +834,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) else if (conf->barrier && bio_data_dir(bio) == WRITE) { if ((conf->mddev->curr_resync_completed >= bio_end_sector(bio)) || - (conf->next_resync + NEXT_NORMALIO_DISTANCE + (conf->start_next_window + NEXT_NORMALIO_DISTANCE <= bio->bi_iter.bi_sector)) wait = false; else -- cgit v1.1 From be306c2989804ca5b90388df66fd3cf28ec74967 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 9 Nov 2016 10:21:33 +1100 Subject: md: define mddev flags, recovery flags and r1bio state bits using enums This is less error prone than using individual #defines. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.h | 76 ++++++++++++++++++++++++++---------------------------- drivers/md/raid1.h | 18 +++++++------ 2 files changed, 46 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.h b/drivers/md/md.h index 21bd94f..af6b33c 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -192,6 +192,25 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); struct md_cluster_info; +enum mddev_flags { + MD_CHANGE_DEVS, /* Some device status has changed */ + MD_CHANGE_CLEAN, /* transition to or from 'clean' */ + MD_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ + MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ + MD_CLOSING, /* If set, we are closing the array, do not open + * it then */ + MD_JOURNAL_CLEAN, /* A raid with journal is already clean */ + MD_HAS_JOURNAL, /* The raid array has journal feature set */ + MD_RELOAD_SB, /* Reload the superblock because another node + * updated it. + */ + MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node + * already took resync lock, need to + * release the lock */ +}; +#define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ + BIT(MD_CHANGE_CLEAN) | \ + BIT(MD_CHANGE_PENDING)) /* If these are set, md_update_sb needed */ struct mddev { void *private; struct md_personality *pers; @@ -199,21 +218,6 @@ struct mddev { int md_minor; struct list_head disks; unsigned long flags; -#define MD_CHANGE_DEVS 0 /* Some device status has changed */ -#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ -#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ -#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */ -#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */ -#define MD_CLOSING 4 /* If set, we are closing the array, do not open - * it then */ -#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */ -#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */ -#define MD_RELOAD_SB 7 /* Reload the superblock because another node - * updated it. - */ -#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node - * already took resync lock, need to - * release the lock */ int suspended; atomic_t active_io; @@ -307,31 +311,6 @@ struct mddev { int parallel_resync; int ok_start_degraded; - /* recovery/resync flags - * NEEDED: we might need to start a resync/recover - * RUNNING: a thread is running, or about to be started - * SYNC: actually doing a resync, not a recovery - * RECOVER: doing recovery, or need to try it. - * INTR: resync needs to be aborted for some reason - * DONE: thread is done and is waiting to be reaped - * REQUEST: user-space has requested a sync (used with SYNC) - * CHECK: user-space request for check-only, no repair - * RESHAPE: A reshape is happening - * ERROR: sync-action interrupted because io-error - * - * If neither SYNC or RESHAPE are set, then it is a recovery. - */ -#define MD_RECOVERY_RUNNING 0 -#define MD_RECOVERY_SYNC 1 -#define MD_RECOVERY_RECOVER 2 -#define MD_RECOVERY_INTR 3 -#define MD_RECOVERY_DONE 4 -#define MD_RECOVERY_NEEDED 5 -#define MD_RECOVERY_REQUESTED 6 -#define MD_RECOVERY_CHECK 7 -#define MD_RECOVERY_RESHAPE 8 -#define MD_RECOVERY_FROZEN 9 -#define MD_RECOVERY_ERROR 10 unsigned long recovery; /* If a RAID personality determines that recovery (of a particular @@ -445,6 +424,23 @@ struct mddev { unsigned int good_device_nr; /* good device num within cluster raid */ }; +enum recovery_flags { + /* + * If neither SYNC or RESHAPE are set, then it is a recovery. + */ + MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */ + MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */ + MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */ + MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */ + MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */ + MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */ + MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */ + MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */ + MD_RECOVERY_RESHAPE, /* A reshape is happening */ + MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */ + MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */ +}; + static inline int __must_check mddev_lock(struct mddev *mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 61c39b3..5ec1944 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -161,14 +161,15 @@ struct r1bio { }; /* bits for r1bio.state */ -#define R1BIO_Uptodate 0 -#define R1BIO_IsSync 1 -#define R1BIO_Degraded 2 -#define R1BIO_BehindIO 3 +enum r1bio_state { + R1BIO_Uptodate, + R1BIO_IsSync, + R1BIO_Degraded, + R1BIO_BehindIO, /* Set ReadError on bios that experience a readerror so that * raid1d knows what to do with them. */ -#define R1BIO_ReadError 4 + R1BIO_ReadError, /* For write-behind requests, we call bi_end_io when * the last non-write-behind device completes, providing * any write was successful. Otherwise we call when @@ -176,10 +177,11 @@ struct r1bio { * with failure when last write completes (and all failed). * Record that bi_end_io was called with this flag... */ -#define R1BIO_Returned 6 + R1BIO_Returned, /* If a write for this request means we can clear some * known-bad-block records, we set this flag */ -#define R1BIO_MadeGood 7 -#define R1BIO_WriteError 8 + R1BIO_MadeGood, + R1BIO_WriteError, +}; #endif -- cgit v1.1 From 6119e6792bcaf926cb284098042a576c1a55b513 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 9 Nov 2016 10:21:32 +1100 Subject: md: remove md_super_wait() call after bitmap_flush() bitmap_flush() finishes with bitmap_update_sb(), and that finishes with write_page(..., 1), so write_page() will wait for all writes to complete. So there is no point calling md_super_wait() immediately afterwards. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index f389d8a..1f1c7f0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5472,7 +5472,6 @@ static void __md_stop_writes(struct mddev *mddev) del_timer_sync(&mddev->safemode_timer); bitmap_flush(mddev); - md_super_wait(mddev); if (mddev->ro == 0 && ((!mddev->in_sync && !mddev_is_clustered(mddev)) || -- cgit v1.1 From 354b445b5f84f72e64cef9ffe175548e84c1a874 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 16 Nov 2016 17:20:19 -0800 Subject: raid5-cache: fix lockdep warning lockdep reports warning of the rcu_dereference usage. Using normal rdev access pattern to avoid the warning. Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 2e270e6..f73672b 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -970,16 +970,28 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, continue; /* in case device is broken */ + rcu_read_lock(); rdev = rcu_dereference(conf->disks[disk_index].rdev); - if (rdev) + if (rdev) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); sync_page_io(rdev, stripe_sect, PAGE_SIZE, sh->dev[disk_index].page, REQ_OP_WRITE, 0, false); + rdev_dec_pending(rdev, rdev->mddev); + rcu_read_lock(); + } rrdev = rcu_dereference(conf->disks[disk_index].replacement); - if (rrdev) + if (rrdev) { + atomic_inc(&rrdev->nr_pending); + rcu_read_unlock(); sync_page_io(rrdev, stripe_sect, PAGE_SIZE, sh->dev[disk_index].page, REQ_OP_WRITE, 0, false); + rdev_dec_pending(rrdev, rrdev->mddev); + rcu_read_lock(); + } + rcu_read_unlock(); } raid5_release_stripe(sh); return 0; -- cgit v1.1 From 109e37653033a5fcd3bf8cab0ed6a7ff433f758a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 13:22:04 +1100 Subject: md: add block tracing for bio_remapping The block tracing infrastructure (accessed with blktrace/blkparse) supports the tracing of mapping bios from one device to another. This is currently used when a bio in a partition is mapped to the whole device, when bios are mapped by dm, and for mapping in md/raid5. Other md personalities do not include this tracing yet, so add it. When a read-error is detected we redirect the request to a different device. This could justifiably be seen as a new mapping for the originial bio, or a secondary mapping for the bio that errors. This patch uses the second option. When md is used under dm-raid, the mappings are not traced as we do not have access to the block device number of the parent. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/linear.c | 18 ++++++++++++------ drivers/md/raid0.c | 13 ++++++++++--- drivers/md/raid1.c | 26 ++++++++++++++++++++++++-- drivers/md/raid10.c | 31 +++++++++++++++++++++++++++++-- 4 files changed, 75 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 9c7d4f5..5975c99 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "md.h" #include "linear.h" @@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) } do { - tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); + sector_t bio_sector = bio->bi_iter.bi_sector; + tmp_dev = which_dev(mddev, bio_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; end_sector = tmp_dev->end_sector; data_offset = tmp_dev->rdev->data_offset; bio->bi_bdev = tmp_dev->rdev->bdev; - if (unlikely(bio->bi_iter.bi_sector >= end_sector || - bio->bi_iter.bi_sector < start_sector)) + if (unlikely(bio_sector >= end_sector || + bio_sector < start_sector)) goto out_of_bounds; if (unlikely(bio_end_sector(bio) > end_sector)) { /* This bio crosses a device boundary, so we have to * split it. */ - split = bio_split(bio, end_sector - - bio->bi_iter.bi_sector, + split = bio_split(bio, end_sector - bio_sector, GFP_NOIO, fs_bio_set); bio_chain(split, bio); } else { @@ -256,8 +257,13 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ bio_endio(split); - } else + } else { + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(split->bi_bdev), + split, disk_devt(mddev->gendisk), + bio_sector); generic_make_request(split); + } } while (split != bio); return; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index b3ba77a..e628f18 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "md.h" #include "raid0.h" #include "raid5.h" @@ -463,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) } do { - sector_t sector = bio->bi_iter.bi_sector; + sector_t bio_sector = bio->bi_iter.bi_sector; + sector_t sector = bio_sector; unsigned chunk_sects = mddev->chunk_sectors; unsigned sectors = chunk_sects - @@ -472,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) : sector_div(sector, chunk_sects)); /* Restore due to sector_div */ - sector = bio->bi_iter.bi_sector; + sector = bio_sector; if (sectors < bio_sectors(bio)) { split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); @@ -491,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { /* Just ignore it */ bio_endio(split); - } else + } else { + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(split->bi_bdev), + split, disk_devt(mddev->gendisk), + bio_sector); generic_make_request(split); + } } while (split != bio); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9ac61cd..2dc193492 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "md.h" #include "raid1.h" #include "bitmap.h" @@ -1162,6 +1163,11 @@ read_again: bio_set_op_attrs(read_bio, op, do_sync); read_bio->bi_private = r1_bio; + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r1_bio->sector); + if (max_sectors < r1_bio->sectors) { /* could not read all from this device, so we will * need another r1_bio. @@ -1367,13 +1373,20 @@ read_again: mbio->bi_iter.bi_sector = (r1_bio->sector + conf->mirrors[i].rdev->data_offset); - mbio->bi_bdev = (void*)conf->mirrors[i].rdev; + mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), + mbio, disk_devt(mddev->gendisk), + r1_bio->sector); + /* flush_pending_writes() needs access to the rdev so...*/ + mbio->bi_bdev = (void*)conf->mirrors[i].rdev; + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); if (cb) plug = container_of(cb, struct raid1_plug_cb, cb); @@ -2290,6 +2303,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) struct bio *bio; char b[BDEVNAME_SIZE]; struct md_rdev *rdev; + dev_t bio_dev; + sector_t bio_sector; clear_bit(R1BIO_ReadError, &r1_bio->state); /* we got a read error. Maybe the drive is bad. Maybe just @@ -2303,6 +2318,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) bio = r1_bio->bios[r1_bio->read_disk]; bdevname(bio->bi_bdev, b); + bio_dev = bio->bi_bdev->bd_dev; + bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; bio_put(bio); r1_bio->bios[r1_bio->read_disk] = NULL; @@ -2353,6 +2370,8 @@ read_more: else mbio->bi_phys_segments++; spin_unlock_irq(&conf->device_lock); + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, bio_dev, bio_sector); generic_make_request(bio); bio = NULL; @@ -2367,8 +2386,11 @@ read_more: sectors_handled; goto read_more; - } else + } else { + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, bio_dev, bio_sector); generic_make_request(bio); + } } } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5290be3..67f0034 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "md.h" #include "raid10.h" #include "raid0.h" @@ -1165,6 +1166,10 @@ read_again: bio_set_op_attrs(read_bio, op, do_sync); read_bio->bi_private = r10_bio; + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r10_bio->sector); if (max_sectors < r10_bio->sectors) { /* Could not read all from this device, so we will * need another r10_bio. @@ -1367,11 +1372,18 @@ retry_write: mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ choose_data_offset(r10_bio, rdev)); - mbio->bi_bdev = (void*)rdev; + mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); mbio->bi_private = r10_bio; + if (conf->mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), + mbio, disk_devt(conf->mddev->gendisk), + r10_bio->sector); + /* flush_pending_writes() needs access to the rdev so...*/ + mbio->bi_bdev = (void*)rdev; + atomic_inc(&r10_bio->remaining); cb = blk_check_plugged(raid10_unplug, mddev, @@ -1409,11 +1421,18 @@ retry_write: mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + choose_data_offset( r10_bio, rdev)); - mbio->bi_bdev = (void*)rdev; + mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); mbio->bi_private = r10_bio; + if (conf->mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), + mbio, disk_devt(conf->mddev->gendisk), + r10_bio->sector); + /* flush_pending_writes() needs access to the rdev so...*/ + mbio->bi_bdev = (void*)rdev; + atomic_inc(&r10_bio->remaining); spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); @@ -2496,6 +2515,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) char b[BDEVNAME_SIZE]; unsigned long do_sync; int max_sectors; + dev_t bio_dev; + sector_t bio_last_sector; /* we got a read error. Maybe the drive is bad. Maybe just * the block and we can fix it. @@ -2507,6 +2528,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) */ bio = r10_bio->devs[slot].bio; bdevname(bio->bi_bdev, b); + bio_dev = bio->bi_bdev->bd_dev; + bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; bio_put(bio); r10_bio->devs[slot].bio = NULL; @@ -2546,6 +2569,10 @@ read_more: bio_set_op_attrs(bio, REQ_OP_READ, do_sync); bio->bi_private = r10_bio; bio->bi_end_io = raid10_end_read_request; + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, bio_dev, + bio_last_sector - r10_bio->sectors); + if (max_sectors < r10_bio->sectors) { /* Drat - have to split this up more */ struct bio *mbio = r10_bio->master_bio; -- cgit v1.1 From 581dbd94da80c845b28666716e30c22c0834756a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 14 Nov 2016 16:30:21 +1100 Subject: md/bitmap: add blktrace event for writes to the bitmap We trace wheneven bitmap_unplug() finds that it needs to write to the bitmap, or when bitmap_daemon_work() find there is work to do. This makes it easier to correlate bitmap updates with data writes. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 1a7f402..cf77cbf 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "md.h" #include "bitmap.h" @@ -1008,8 +1009,12 @@ void bitmap_unplug(struct bitmap *bitmap) need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); if (dirty || need_write) { - if (!writing) + if (!writing) { bitmap_wait_writes(bitmap); + if (bitmap->mddev->queue) + blk_add_trace_msg(bitmap->mddev->queue, + "md bitmap_unplug"); + } clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); write_page(bitmap, bitmap->storage.filemap[i], 0); writing = 1; @@ -1234,6 +1239,10 @@ void bitmap_daemon_work(struct mddev *mddev) } bitmap->allclean = 1; + if (bitmap->mddev->queue) + blk_add_trace_msg(bitmap->mddev->queue, + "md bitmap_daemon_work"); + /* Any file-page which is PENDING now needs to be written. * So set NEEDWRITE now, then after we make any last-minute changes * we will write it. -- cgit v1.1 From 578b54ade8a5e04df6edc14cb68ad0f6f491a1a1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 14 Nov 2016 16:30:21 +1100 Subject: md/raid1, raid10: add blktrace records when IO is delayed Both raid1 and raid10 will sometimes delay handling an IO request, such as when resync is happening or there are too many requests queued. Add some blktrace messsages so we can see when that is happening when looking for performance artefacts. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 8 ++++++++ drivers/md/raid10.c | 8 ++++++++ 2 files changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2dc193492..d24adc5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -71,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window, sector_t bi_sector); static void lower_barrier(struct r1conf *conf); +#define raid1_log(md, fmt, args...) \ + do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) + static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; @@ -861,6 +864,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) * that queue to allow conf->start_next_window * to increase. */ + raid1_log(conf->mddev, "wait barrier"); wait_event_lock_irq(conf->wait_barrier, !conf->array_frozen && (!conf->barrier || @@ -940,6 +944,7 @@ static void freeze_array(struct r1conf *conf, int extra) */ spin_lock_irq(&conf->resync_lock); conf->array_frozen = 1; + raid1_log(conf->mddev, "wait freeze"); wait_event_lock_irq_cmd(conf->wait_barrier, conf->nr_pending == conf->nr_queued+extra, conf->resync_lock, @@ -1144,6 +1149,7 @@ read_again: * take care not to over-take any writes * that are 'behind' */ + raid1_log(mddev, "wait behind writes"); wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); } @@ -1208,6 +1214,7 @@ read_again: */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); + raid1_log(mddev, "wait queued"); wait_event(conf->wait_barrier, conf->pending_count < max_queued_requests); } @@ -1299,6 +1306,7 @@ read_again: rdev_dec_pending(conf->mirrors[j].rdev, mddev); r1_bio->state = 0; allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); + raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); md_wait_for_blocked_rdev(blocked_rdev, mddev); start_next_window = wait_barrier(conf, bio); /* diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 67f0034..bd8c884 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -106,6 +106,9 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); static void end_reshape_write(struct bio *bio); static void end_reshape(struct r10conf *conf); +#define raid10_log(md, fmt, args...) \ + do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) + static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) { struct r10conf *conf = data; @@ -942,6 +945,7 @@ static void wait_barrier(struct r10conf *conf) * that queue to get the nr_pending * count down. */ + raid10_log(conf->mddev, "wait barrier"); wait_event_lock_irq(conf->wait_barrier, !conf->barrier || (atomic_read(&conf->nr_pending) && @@ -1093,6 +1097,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio) /* IO spans the reshape position. Need to wait for * reshape to pass */ + raid10_log(conf->mddev, "wait reshape"); allow_barrier(conf); wait_event(conf->wait_barrier, conf->reshape_progress <= bio->bi_iter.bi_sector || @@ -1112,6 +1117,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio) set_mask_bits(&mddev->flags, 0, BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); md_wakeup_thread(mddev->thread); + raid10_log(conf->mddev, "wait reshape metadata"); wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_PENDING, &mddev->flags)); @@ -1209,6 +1215,7 @@ read_again: */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); + raid10_log(mddev, "wait queued"); wait_event(conf->wait_barrier, conf->pending_count < max_queued_requests); } @@ -1336,6 +1343,7 @@ retry_write: } } allow_barrier(conf); + raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); md_wait_for_blocked_rdev(blocked_rdev, mddev); wait_barrier(conf); goto retry_write; -- cgit v1.1 From 504634f60f463e73e7d58c6810a04437da942dba Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 18 Nov 2016 09:44:08 -0800 Subject: md: add blktrace event for writes to superblock superblock write is an expensive operation. With raid5-cache, it can be called regularly. Tracing to help performance debug. Signed-off-by: Shaohua Li Cc: NeilBrown --- drivers/md/md.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 1f1c7f0..d3cef77 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -64,6 +64,7 @@ #include #include #include +#include #include "md.h" #include "bitmap.h" #include "md-cluster.h" @@ -2403,6 +2404,8 @@ repeat: pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", mdname(mddev), mddev->in_sync); + if (mddev->queue) + blk_add_trace_msg(mddev->queue, "md md_update_sb"); bitmap_update_sb(mddev->bitmap); rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; -- cgit v1.1 From c757ec95c22036b1cb85c56ede368bf8f6c08658 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:36 -0800 Subject: md/r5cache: Check array size in r5l_init_log Currently, r5l_write_stripe checks meta size for each stripe write, which is not necessary. With this patch, r5l_init_log checks maximal meta size of the array, which is (r5l_meta_block + raid_disks x r5l_payload_data_parity). If this is too big to fit in one page, r5l_init_log aborts. With current meta data, r5l_log support raid_disks up to 203. Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index f73672b..33fc850 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -441,7 +441,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) { int write_disks = 0; int data_pages, parity_pages; - int meta_size; int reserve; int i; int ret = 0; @@ -473,15 +472,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) parity_pages = 1 + !!(sh->qd_idx >= 0); data_pages = write_disks - parity_pages; - meta_size = - ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) - * data_pages) + - sizeof(struct r5l_payload_data_parity) + - sizeof(__le32) * parity_pages; - /* Doesn't work with very big raid array */ - if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE) - return -EINVAL; - set_bit(STRIPE_LOG_TRAPPED, &sh->state); /* * The stripe must enter state machine again to finish the write, so @@ -1197,6 +1187,22 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) if (PAGE_SIZE != 4096) return -EINVAL; + + /* + * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and + * raid_disks r5l_payload_data_parity. + * + * Write journal and cache does not work for very big array + * (raid_disks > 203) + */ + if (sizeof(struct r5l_meta_block) + + ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) * + conf->raid_disks) > PAGE_SIZE) { + pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n", + mdname(conf->mddev), conf->raid_disks); + return -EINVAL; + } + log = kzalloc(sizeof(*log), GFP_KERNEL); if (!log) return -ENOMEM; -- cgit v1.1 From 937621c36e0ea1af2aceeaea412ba3bd80247199 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:37 -0800 Subject: md/r5cache: move some code to raid5.h Move some define and inline functions to raid5.h, so they can be used in raid5-cache.c Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5.c | 71 ------------------------------------------------- drivers/md/raid5.h | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 71 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index df88656..34895f3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644); MODULE_PARM_DESC(devices_handle_discard_safely, "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); static struct workqueue_struct *raid5_wq; -/* - * Stripe cache - */ - -#define NR_STRIPES 256 -#define STRIPE_SIZE PAGE_SIZE -#define STRIPE_SHIFT (PAGE_SHIFT - 9) -#define STRIPE_SECTORS (STRIPE_SIZE>>9) -#define IO_THRESHOLD 1 -#define BYPASS_THRESHOLD 1 -#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) -#define HASH_MASK (NR_HASH - 1) -#define MAX_STRIPE_BATCH 8 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) { @@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) local_irq_enable(); } -/* bio's attached to a stripe+device for I/O are linked together in bi_sector - * order without overlap. There may be several bio's per stripe+device, and - * a bio could span several devices. - * When walking this list for a particular stripe+device, we must never proceed - * beyond a bio that extends past this device, as the next bio might no longer - * be valid. - * This function is used to determine the 'next' bio in the list, given the sector - * of the current stripe+device - */ -static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) -{ - int sectors = bio_sectors(bio); - if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) - return bio->bi_next; - else - return NULL; -} - -/* - * We maintain a biased count of active stripes in the bottom 16 bits of - * bi_phys_segments, and a count of processed stripes in the upper 16 bits - */ -static inline int raid5_bi_processed_stripes(struct bio *bio) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - return (atomic_read(segments) >> 16) & 0xffff; -} - -static inline int raid5_dec_bi_active_stripes(struct bio *bio) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - return atomic_sub_return(1, segments) & 0xffff; -} - -static inline void raid5_inc_bi_active_stripes(struct bio *bio) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - atomic_inc(segments); -} - -static inline void raid5_set_bi_processed_stripes(struct bio *bio, - unsigned int cnt) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - int old, new; - - do { - old = atomic_read(segments); - new = (old & 0xffff) | (cnt << 16); - } while (atomic_cmpxchg(segments, old, new) != old); -} - -static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) -{ - atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; - atomic_set(segments, cnt); -} - /* Find first data disk in a raid6 stripe */ static inline int raid6_d0(struct stripe_head *sh) { diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 57ec49f..ffc13c4 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -410,6 +410,83 @@ struct disk_info { struct md_rdev *rdev, *replacement; }; +/* + * Stripe cache + */ + +#define NR_STRIPES 256 +#define STRIPE_SIZE PAGE_SIZE +#define STRIPE_SHIFT (PAGE_SHIFT - 9) +#define STRIPE_SECTORS (STRIPE_SIZE>>9) +#define IO_THRESHOLD 1 +#define BYPASS_THRESHOLD 1 +#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) +#define HASH_MASK (NR_HASH - 1) +#define MAX_STRIPE_BATCH 8 + +/* bio's attached to a stripe+device for I/O are linked together in bi_sector + * order without overlap. There may be several bio's per stripe+device, and + * a bio could span several devices. + * When walking this list for a particular stripe+device, we must never proceed + * beyond a bio that extends past this device, as the next bio might no longer + * be valid. + * This function is used to determine the 'next' bio in the list, given the + * sector of the current stripe+device + */ +static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) +{ + int sectors = bio_sectors(bio); + + if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) + return bio->bi_next; + else + return NULL; +} + +/* + * We maintain a biased count of active stripes in the bottom 16 bits of + * bi_phys_segments, and a count of processed stripes in the upper 16 bits + */ +static inline int raid5_bi_processed_stripes(struct bio *bio) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + return (atomic_read(segments) >> 16) & 0xffff; +} + +static inline int raid5_dec_bi_active_stripes(struct bio *bio) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + return atomic_sub_return(1, segments) & 0xffff; +} + +static inline void raid5_inc_bi_active_stripes(struct bio *bio) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + atomic_inc(segments); +} + +static inline void raid5_set_bi_processed_stripes(struct bio *bio, + unsigned int cnt) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + int old, new; + + do { + old = atomic_read(segments); + new = (old & 0xffff) | (cnt << 16); + } while (atomic_cmpxchg(segments, old, new) != old); +} + +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) +{ + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + + atomic_set(segments, cnt); +} + /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. * This is because we sometimes take all the spinlocks * and creating that much locking depth can cause -- cgit v1.1 From 2ded370373a400c20cf0c6e941e724e61582a867 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:38 -0800 Subject: md/r5cache: State machine for raid5-cache write back mode This patch adds state machine for raid5-cache. With log device, the raid456 array could operate in two different modes (r5c_journal_mode): - write-back (R5C_MODE_WRITE_BACK) - write-through (R5C_MODE_WRITE_THROUGH) Existing code of raid5-cache only has write-through mode. For write-back cache, it is necessary to extend the state machine. With write-back cache, every stripe could operate in two different phases: - caching - writing-out In caching phase, the stripe handles writes as: - write to journal - return IO In writing-out phase, the stripe behaviors as a stripe in write through mode R5C_MODE_WRITE_THROUGH. STRIPE_R5C_CACHING is added to sh->state to differentiate caching and writing-out phase. Please note: this is a "no-op" patch for raid5-cache write-through mode. The following detailed explanation is copied from the raid5-cache.c: /* * raid5 cache state machine * * With rhe RAID cache, each stripe works in two phases: * - caching phase * - writing-out phase * * These two phases are controlled by bit STRIPE_R5C_CACHING: * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase * * When there is no journal, or the journal is in write-through mode, * the stripe is always in writing-out phase. * * For write-back journal, the stripe is sent to caching phase on write * (r5c_handle_stripe_dirtying). r5c_make_stripe_write_out() kicks off * the write-out phase by clearing STRIPE_R5C_CACHING. * * Stripes in caching phase do not write the raid disks. Instead, all * writes are committed from the log device. Therefore, a stripe in * caching phase handles writes as: * - write to log device * - return IO * * Stripes in writing-out phase handle writes as: * - calculate parity * - write pending data and parity to journal * - write data and parity to raid disks * - return IO for pending writes */ Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 143 ++++++++++++++++++++++++++++++++++++++++++++++- drivers/md/raid5.c | 45 +++++++++++++-- drivers/md/raid5.h | 31 +++++++++- 3 files changed, 211 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 33fc850..02a5544 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -40,6 +40,47 @@ */ #define R5L_POOL_SIZE 4 +/* + * r5c journal modes of the array: write-back or write-through. + * write-through mode has identical behavior as existing log only + * implementation. + */ +enum r5c_journal_mode { + R5C_JOURNAL_MODE_WRITE_THROUGH = 0, + R5C_JOURNAL_MODE_WRITE_BACK = 1, +}; + +/* + * raid5 cache state machine + * + * With rhe RAID cache, each stripe works in two phases: + * - caching phase + * - writing-out phase + * + * These two phases are controlled by bit STRIPE_R5C_CACHING: + * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase + * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase + * + * When there is no journal, or the journal is in write-through mode, + * the stripe is always in writing-out phase. + * + * For write-back journal, the stripe is sent to caching phase on write + * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off + * the write-out phase by clearing STRIPE_R5C_CACHING. + * + * Stripes in caching phase do not write the raid disks. Instead, all + * writes are committed from the log device. Therefore, a stripe in + * caching phase handles writes as: + * - write to log device + * - return IO + * + * Stripes in writing-out phase handle writes as: + * - calculate parity + * - write pending data and parity to journal + * - write data and parity to raid disks + * - return IO for pending writes + */ + struct r5l_log { struct md_rdev *rdev; @@ -96,6 +137,9 @@ struct r5l_log { spinlock_t no_space_stripes_lock; bool need_cache_flush; + + /* for r5c_cache */ + enum r5c_journal_mode r5c_journal_mode; }; /* @@ -133,6 +177,12 @@ enum r5l_io_unit_state { IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ }; +bool r5c_is_writeback(struct r5l_log *log) +{ + return (log != NULL && + log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); +} + static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) { start += inc; @@ -168,12 +218,51 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io, io->state = state; } +/* + * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. + * This function should only be called in write-back mode. + */ +static void r5c_make_stripe_write_out(struct stripe_head *sh) +{ + struct r5conf *conf = sh->raid_conf; + struct r5l_log *log = conf->log; + + BUG_ON(!r5c_is_writeback(log)); + + WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); + clear_bit(STRIPE_R5C_CACHING, &sh->state); +} + +/* + * Setting proper flags after writing (or flushing) data and/or parity to the + * log device. This is called from r5l_log_endio() or r5l_log_flush_endio(). + */ +static void r5c_finish_cache_stripe(struct stripe_head *sh) +{ + struct r5l_log *log = sh->raid_conf->log; + + if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { + BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); + /* + * Set R5_InJournal for parity dev[pd_idx]. This means + * all data AND parity in the journal. For RAID 6, it is + * NOT necessary to set the flag for dev[qd_idx], as the + * two parities are written out together. + */ + set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); + } else + BUG(); /* write-back logic in next patch */ +} + static void r5l_io_run_stripes(struct r5l_io_unit *io) { struct stripe_head *sh, *next; list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { list_del_init(&sh->log_list); + + r5c_finish_cache_stripe(sh); + set_bit(STRIPE_HANDLE, &sh->state); raid5_release_stripe(sh); } @@ -412,18 +501,19 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, r5l_append_payload_page(log, sh->dev[i].page); } - if (sh->qd_idx >= 0) { + if (parity_pages == 2) { r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, sh->sector, sh->dev[sh->pd_idx].log_checksum, sh->dev[sh->qd_idx].log_checksum, true); r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); - } else { + } else if (parity_pages == 1) { r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, sh->sector, sh->dev[sh->pd_idx].log_checksum, 0, false); r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); - } + } else /* Just writing data, not parity, in caching phase */ + BUG_ON(parity_pages != 0); list_add_tail(&sh->log_list, &io->stripe_list); atomic_inc(&io->pending_stripe); @@ -455,6 +545,8 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) return -EAGAIN; } + WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); + for (i = 0; i < sh->disks; i++) { void *addr; @@ -1112,6 +1204,49 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp) set_bit(MD_CHANGE_DEVS, &mddev->flags); } +/* + * Try handle write operation in caching phase. This function should only + * be called in write-back mode. + * + * If all outstanding writes can be handled in caching phase, returns 0 + * If writes requires write-out phase, call r5c_make_stripe_write_out() + * and returns -EAGAIN + */ +int r5c_try_caching_write(struct r5conf *conf, + struct stripe_head *sh, + struct stripe_head_state *s, + int disks) +{ + struct r5l_log *log = conf->log; + + BUG_ON(!r5c_is_writeback(log)); + + /* more write-back logic in next patches */ + r5c_make_stripe_write_out(sh); + return -EAGAIN; +} + +/* + * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the + * stripe is committed to RAID disks. + */ +void r5c_finish_stripe_write_out(struct r5conf *conf, + struct stripe_head *sh, + struct stripe_head_state *s) +{ + if (!conf->log || + !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) + return; + + WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); + clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); + + if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) + return; + BUG(); /* write-back logic in following patches */ +} + + static int r5l_load_log(struct r5l_log *log) { struct md_rdev *rdev = log->rdev; @@ -1249,6 +1384,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->no_space_stripes); spin_lock_init(&log->no_space_stripes_lock); + log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; + if (r5l_load_log(log)) goto error; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 34895f3..7c98eb0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4107,6 +4107,9 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (rdev && !test_bit(Faulty, &rdev->flags)) do_recovery = 1; } + + if (test_bit(R5_InJournal, &dev->flags)) + s->injournal++; } if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, @@ -4386,14 +4389,47 @@ static void handle_stripe(struct stripe_head *sh) || s.expanding) handle_stripe_fill(sh, &s, disks); - /* Now to consider new write requests and what else, if anything - * should be read. We do not handle new writes when: + /* + * When the stripe finishes full journal write cycle (write to journal + * and raid disk), this is the clean up procedure so it is ready for + * next operation. + */ + r5c_finish_stripe_write_out(conf, sh, &s); + + /* + * Now to consider new write requests, cache write back and what else, + * if anything should be read. We do not handle new writes when: * 1/ A 'write' operation (copy+xor) is already in flight. * 2/ A 'check' operation is in flight, as it may clobber the parity * block. + * 3/ A r5c cache log write is in flight. */ - if (s.to_write && !sh->reconstruct_state && !sh->check_state) - handle_stripe_dirtying(conf, sh, &s, disks); + + if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { + if (!r5c_is_writeback(conf->log)) { + if (s.to_write) + handle_stripe_dirtying(conf, sh, &s, disks); + } else { /* write back cache */ + int ret = 0; + + /* First, try handle writes in caching phase */ + if (s.to_write) + ret = r5c_try_caching_write(conf, sh, &s, + disks); + /* + * If caching phase failed: ret == -EAGAIN + * OR + * stripe under reclaim: !caching && injournal + * + * fall back to handle_stripe_dirtying() + */ + if (ret == -EAGAIN || + /* stripe under reclaim: !caching && injournal */ + (!test_bit(STRIPE_R5C_CACHING, &sh->state) && + s.injournal > 0)) + handle_stripe_dirtying(conf, sh, &s, disks); + } + } /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough @@ -5110,6 +5146,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) * data on failed drives. */ if (rw == READ && mddev->degraded == 0 && + !r5c_is_writeback(conf->log) && mddev->reshape_position == MaxSector) { bi = chunk_aligned_read(mddev, bi); if (!bi) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index ffc13c4..c9590a8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -264,6 +264,7 @@ struct stripe_head_state { int syncing, expanding, expanded, replacing; int locked, uptodate, to_read, to_write, failed, written; int to_fill, compute, req_compute, non_overwrite; + int injournal; int failed_num[2]; int p_failed, q_failed; int dec_preread_active; @@ -313,6 +314,11 @@ enum r5dev_flags { */ R5_Discard, /* Discard the stripe */ R5_SkipCopy, /* Don't copy data from bio to stripe cache */ + R5_InJournal, /* data being written is in the journal device. + * if R5_InJournal is set for parity pd_idx, all the + * data and parity being written are in the journal + * device + */ }; /* @@ -345,7 +351,23 @@ enum { STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add * to batch yet. */ - STRIPE_LOG_TRAPPED, /* trapped into log */ + STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c) + * this bit is used in two scenarios: + * + * 1. write-out phase + * set in first entry of r5l_write_stripe + * clear in second entry of r5l_write_stripe + * used to bypass logic in handle_stripe + * + * 2. caching phase + * set in r5c_try_caching_write() + * clear when journal write is done + * used to initiate r5c_cache_data() + * also used to bypass logic in handle_stripe + */ + STRIPE_R5C_CACHING, /* the stripe is in caching phase + * see more detail in the raid5-cache.c + */ }; #define STRIPE_EXPAND_SYNC_FLAGS \ @@ -710,4 +732,11 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh); extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); extern void r5l_quiesce(struct r5l_log *log, int state); extern bool r5l_log_disk_error(struct r5conf *conf); +extern bool r5c_is_writeback(struct r5l_log *log); +extern int +r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh, + struct stripe_head_state *s, int disks); +extern void +r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh, + struct stripe_head_state *s); #endif -- cgit v1.1 From 1e6d690b9334b7e1b31d25fd8d93e980e449a5f9 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:39 -0800 Subject: md/r5cache: caching phase of r5cache As described in previous patch, write back cache operates in two phases: caching and writing-out. The caching phase works as: 1. write data to journal (r5c_handle_stripe_dirtying, r5c_cache_data) 2. call bio_endio (r5c_handle_data_cached, r5c_return_dev_pending_writes). Then the writing-out phase is as: 1. Mark the stripe as write-out (r5c_make_stripe_write_out) 2. Calcualte parity (reconstruct or RMW) 3. Write parity (and maybe some other data) to journal device 4. Write data and parity to RAID disks This patch implements caching phase. The cache is integrated with stripe cache of raid456. It leverages code of r5l_log to write data to journal device. Writing-out phase of the cache is implemented in the next patch. With r5cache, write operation does not wait for parity calculation and write out, so the write latency is lower (1 write to journal device vs. read and then write to raid disks). Also, r5cache will reduce RAID overhead (multipile IO due to read-modify-write of parity) and provide more opportunities of full stripe writes. This patch adds 2 flags to stripe_head.state: - STRIPE_R5C_PARTIAL_STRIPE, - STRIPE_R5C_FULL_STRIPE, Instead of inactive_list, stripes with cached data are tracked in r5conf->r5c_full_stripe_list and r5conf->r5c_partial_stripe_list. STRIPE_R5C_FULL_STRIPE and STRIPE_R5C_PARTIAL_STRIPE are flags for stripes in these lists. Note: stripes in r5c_full/partial_stripe_list are not considered as "active". For RMW, the code allocates an extra page for each data block being updated. This is stored in r5dev->orig_page and the old data is read into it. Then the prexor calculation subtracts ->orig_page from the parity block, and the reconstruct calculation adds the ->page data back into the parity block. r5cache naturally excludes SkipCopy. When the array has write back cache, async_copy_data() will not skip copy. There are some known limitations of the cache implementation: 1. Write cache only covers full page writes (R5_OVERWRITE). Writes of smaller granularity are write through. 2. Only one log io (sh->log_io) for each stripe at anytime. Later writes for the same stripe have to wait. This can be improved by moving log_io to r5dev. 3. With writeback cache, read path must enter state machine, which is a significant bottleneck for some workloads. 4. There is no per stripe checkpoint (with r5l_payload_flush) in the log, so recovery code has to replay more than necessary data (sometimes all the log from last_checkpoint). This reduces availability of the array. This patch includes a fix proposed by ZhengYuan Liu Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 242 +++++++++++++++++++++++++++++++++++++++++++++-- drivers/md/raid5.c | 152 ++++++++++++++++++++++++----- drivers/md/raid5.h | 19 +++- 3 files changed, 381 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 02a5544..19c5af9 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -20,6 +20,7 @@ #include #include "md.h" #include "raid5.h" +#include "bitmap.h" /* * metadata/data stored in disk with 4k size unit (a block) regardless @@ -218,6 +219,43 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io, io->state = state; } +static void +r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev, + struct bio_list *return_bi) +{ + struct bio *wbi, *wbi2; + + wbi = dev->written; + dev->written = NULL; + while (wbi && wbi->bi_iter.bi_sector < + dev->sector + STRIPE_SECTORS) { + wbi2 = r5_next_bio(wbi, dev->sector); + if (!raid5_dec_bi_active_stripes(wbi)) { + md_write_end(conf->mddev); + bio_list_add(return_bi, wbi); + } + wbi = wbi2; + } +} + +void r5c_handle_cached_data_endio(struct r5conf *conf, + struct stripe_head *sh, int disks, struct bio_list *return_bi) +{ + int i; + + for (i = sh->disks; i--; ) { + if (sh->dev[i].written) { + set_bit(R5_UPTODATE, &sh->dev[i].flags); + r5c_return_dev_pending_writes(conf, &sh->dev[i], + return_bi); + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, + !test_bit(STRIPE_DEGRADED, &sh->state), + 0); + } + } +} + /* * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. * This function should only be called in write-back mode. @@ -231,6 +269,44 @@ static void r5c_make_stripe_write_out(struct stripe_head *sh) WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); clear_bit(STRIPE_R5C_CACHING, &sh->state); + + if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + atomic_inc(&conf->preread_active_stripes); + + if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { + BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0); + atomic_dec(&conf->r5c_cached_partial_stripes); + } + + if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { + BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0); + atomic_dec(&conf->r5c_cached_full_stripes); + } +} + +static void r5c_handle_data_cached(struct stripe_head *sh) +{ + int i; + + for (i = sh->disks; i--; ) + if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { + set_bit(R5_InJournal, &sh->dev[i].flags); + clear_bit(R5_LOCKED, &sh->dev[i].flags); + } + clear_bit(STRIPE_LOG_TRAPPED, &sh->state); +} + +/* + * this journal write must contain full parity, + * it may also contain some data pages + */ +static void r5c_handle_parity_cached(struct stripe_head *sh) +{ + int i; + + for (i = sh->disks; i--; ) + if (test_bit(R5_InJournal, &sh->dev[i].flags)) + set_bit(R5_Wantwrite, &sh->dev[i].flags); } /* @@ -250,8 +326,12 @@ static void r5c_finish_cache_stripe(struct stripe_head *sh) * two parities are written out together. */ set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); - } else - BUG(); /* write-back logic in next patch */ + } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { + r5c_handle_data_cached(sh); + } else { + r5c_handle_parity_cached(sh); + set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); + } } static void r5l_io_run_stripes(struct r5l_io_unit *io) @@ -491,7 +571,8 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, io = log->current_io; for (i = 0; i < sh->disks; i++) { - if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || + test_bit(R5_InJournal, &sh->dev[i].flags)) continue; if (i == sh->pd_idx || i == sh->qd_idx) continue; @@ -550,8 +631,10 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) for (i = 0; i < sh->disks; i++) { void *addr; - if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || + test_bit(R5_InJournal, &sh->dev[i].flags)) continue; + write_disks++; /* checksum is already calculated in last run */ if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) @@ -817,7 +900,6 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, } } - static void r5l_do_reclaim(struct r5l_log *log) { sector_t reclaim_target = xchg(&log->reclaim_target, 0); @@ -1218,12 +1300,80 @@ int r5c_try_caching_write(struct r5conf *conf, int disks) { struct r5l_log *log = conf->log; + int i; + struct r5dev *dev; + int to_cache = 0; BUG_ON(!r5c_is_writeback(log)); - /* more write-back logic in next patches */ - r5c_make_stripe_write_out(sh); - return -EAGAIN; + if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { + /* + * There are two different scenarios here: + * 1. The stripe has some data cached, and it is sent to + * write-out phase for reclaim + * 2. The stripe is clean, and this is the first write + * + * For 1, return -EAGAIN, so we continue with + * handle_stripe_dirtying(). + * + * For 2, set STRIPE_R5C_CACHING and continue with caching + * write. + */ + + /* case 1: anything injournal or anything in written */ + if (s->injournal > 0 || s->written > 0) + return -EAGAIN; + /* case 2 */ + set_bit(STRIPE_R5C_CACHING, &sh->state); + } + + for (i = disks; i--; ) { + dev = &sh->dev[i]; + /* if non-overwrite, use writing-out phase */ + if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) && + !test_bit(R5_InJournal, &dev->flags)) { + r5c_make_stripe_write_out(sh); + return -EAGAIN; + } + } + + for (i = disks; i--; ) { + dev = &sh->dev[i]; + if (dev->towrite) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_Wantdrain, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + to_cache++; + } + } + + if (to_cache) { + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + /* + * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data() + * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in + * r5c_handle_data_cached() + */ + set_bit(STRIPE_LOG_TRAPPED, &sh->state); + } + + return 0; +} + +/* + * free extra pages (orig_page) we allocated for prexor + */ +void r5c_release_extra_page(struct stripe_head *sh) +{ + int i; + + for (i = sh->disks; i--; ) + if (sh->dev[i].page != sh->dev[i].orig_page) { + struct page *p = sh->dev[i].orig_page; + + sh->dev[i].orig_page = sh->dev[i].page; + put_page(p); + } } /* @@ -1234,6 +1384,9 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s) { + int i; + int do_wakeup = 0; + if (!conf->log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) return; @@ -1243,7 +1396,78 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) return; - BUG(); /* write-back logic in following patches */ + + for (i = sh->disks; i--; ) { + clear_bit(R5_InJournal, &sh->dev[i].flags); + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) + do_wakeup = 1; + } + + /* + * analyse_stripe() runs before r5c_finish_stripe_write_out(), + * We updated R5_InJournal, so we also update s->injournal. + */ + s->injournal = 0; + + if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) + if (atomic_dec_and_test(&conf->pending_full_writes)) + md_wakeup_thread(conf->mddev->thread); + + if (do_wakeup) + wake_up(&conf->wait_for_overlap); +} + +int +r5c_cache_data(struct r5l_log *log, struct stripe_head *sh, + struct stripe_head_state *s) +{ + int pages = 0; + int reserve; + int i; + int ret = 0; + + BUG_ON(!log); + + for (i = 0; i < sh->disks; i++) { + void *addr; + + if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) + continue; + addr = kmap_atomic(sh->dev[i].page); + sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, + addr, PAGE_SIZE); + kunmap_atomic(addr); + pages++; + } + WARN_ON(pages == 0); + + /* + * The stripe must enter state machine again to call endio, so + * don't delay. + */ + clear_bit(STRIPE_DELAYED, &sh->state); + atomic_inc(&sh->count); + + mutex_lock(&log->io_mutex); + /* meta + data */ + reserve = (1 + pages) << (PAGE_SHIFT - 9); + if (!r5l_has_free_space(log, reserve)) { + spin_lock(&log->no_space_stripes_lock); + list_add_tail(&sh->log_list, &log->no_space_stripes); + spin_unlock(&log->no_space_stripes_lock); + + r5l_wake_reclaim(log, reserve); + } else { + ret = r5l_log_stripe(log, sh, pages, 0); + if (ret) { + spin_lock_irq(&log->io_list_lock); + list_add_tail(&sh->log_list, &log->no_mem_stripes); + spin_unlock_irq(&log->io_list_lock); + } + } + + mutex_unlock(&log->io_mutex); + return 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7c98eb0..f535ce2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -218,8 +218,17 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh) static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, struct list_head *temp_inactive_list) { + int i; + int injournal = 0; /* number of date pages with R5_InJournal */ + BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); + + if (r5c_is_writeback(conf->log)) + for (i = sh->disks; i--; ) + if (test_bit(R5_InJournal, &sh->dev[i].flags)) + injournal++; + if (test_bit(STRIPE_HANDLE, &sh->state)) { if (test_bit(STRIPE_DELAYED, &sh->state) && !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) @@ -245,8 +254,29 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) - list_add_tail(&sh->lru, temp_inactive_list); + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + if (!r5c_is_writeback(conf->log)) + list_add_tail(&sh->lru, temp_inactive_list); + else { + WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); + if (injournal == 0) + list_add_tail(&sh->lru, temp_inactive_list); + else if (injournal == conf->raid_disks - conf->max_degraded) { + /* full stripe */ + if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) + atomic_inc(&conf->r5c_cached_full_stripes); + if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) + atomic_dec(&conf->r5c_cached_partial_stripes); + list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); + } else { + /* partial stripe */ + if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE, + &sh->state)) + atomic_inc(&conf->r5c_cached_partial_stripes); + list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); + } + } + } } } @@ -830,8 +860,17 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) might_sleep(); - if (r5l_write_stripe(conf->log, sh) == 0) - return; + if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { + /* writing out phase */ + if (r5l_write_stripe(conf->log, sh) == 0) + return; + } else { /* caching phase */ + if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) { + r5c_cache_data(conf->log, sh, s); + return; + } + } + for (i = disks; i--; ) { int op, op_flags = 0; int replace_only = 0; @@ -1044,7 +1083,7 @@ again: static struct dma_async_tx_descriptor * async_copy_data(int frombio, struct bio *bio, struct page **page, sector_t sector, struct dma_async_tx_descriptor *tx, - struct stripe_head *sh) + struct stripe_head *sh, int no_skipcopy) { struct bio_vec bvl; struct bvec_iter iter; @@ -1084,7 +1123,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page, if (frombio) { if (sh->raid_conf->skip_copy && b_offset == 0 && page_offset == 0 && - clen == STRIPE_SIZE) + clen == STRIPE_SIZE && + !no_skipcopy) *page = bio_page; else tx = async_memcpy(*page, bio_page, page_offset, @@ -1166,7 +1206,7 @@ static void ops_run_biofill(struct stripe_head *sh) while (rbi && rbi->bi_iter.bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, &dev->page, - dev->sector, tx, sh); + dev->sector, tx, sh, 0); rbi = r5_next_bio(rbi, dev->sector); } } @@ -1293,10 +1333,15 @@ static int set_syndrome_sources(struct page **srcs, if (i == sh->qd_idx || i == sh->pd_idx || (srctype == SYNDROME_SRC_ALL) || (srctype == SYNDROME_SRC_WANT_DRAIN && - test_bit(R5_Wantdrain, &dev->flags)) || + (test_bit(R5_Wantdrain, &dev->flags) || + test_bit(R5_InJournal, &dev->flags))) || (srctype == SYNDROME_SRC_WRITTEN && - dev->written)) - srcs[slot] = sh->dev[i].page; + dev->written)) { + if (test_bit(R5_InJournal, &dev->flags)) + srcs[slot] = sh->dev[i].orig_page; + else + srcs[slot] = sh->dev[i].page; + } i = raid6_next_disk(i, disks); } while (i != d0_idx); @@ -1475,6 +1520,13 @@ static void ops_complete_prexor(void *stripe_head_ref) pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); + + if (r5c_is_writeback(sh->raid_conf->log)) + /* + * raid5-cache write back uses orig_page during prexor. + * After prexor, it is time to free orig_page + */ + r5c_release_extra_page(sh); } static struct dma_async_tx_descriptor * @@ -1496,7 +1548,9 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; /* Only process blocks that are known to be uptodate */ - if (test_bit(R5_Wantdrain, &dev->flags)) + if (test_bit(R5_InJournal, &dev->flags)) + xor_srcs[count++] = dev->orig_page; + else if (test_bit(R5_Wantdrain, &dev->flags)) xor_srcs[count++] = dev->page; } @@ -1530,6 +1584,7 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, static struct dma_async_tx_descriptor * ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) { + struct r5conf *conf = sh->raid_conf; int disks = sh->disks; int i; struct stripe_head *head_sh = sh; @@ -1547,6 +1602,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) again: dev = &sh->dev[i]; + /* + * clear R5_InJournal, so when rewriting a page in + * journal, it is not skipped by r5l_log_stripe() + */ + clear_bit(R5_InJournal, &dev->flags); spin_lock_irq(&sh->stripe_lock); chosen = dev->towrite; dev->towrite = NULL; @@ -1566,8 +1626,10 @@ again: set_bit(R5_Discard, &dev->flags); else { tx = async_copy_data(1, wbi, &dev->page, - dev->sector, tx, sh); - if (dev->page != dev->orig_page) { + dev->sector, tx, sh, + r5c_is_writeback(conf->log)); + if (dev->page != dev->orig_page && + !r5c_is_writeback(conf->log)) { set_bit(R5_SkipCopy, &dev->flags); clear_bit(R5_UPTODATE, &dev->flags); clear_bit(R5_OVERWRITE, &dev->flags); @@ -1675,7 +1737,8 @@ again: xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; - if (head_sh->dev[i].written) + if (head_sh->dev[i].written || + test_bit(R5_InJournal, &head_sh->dev[i].flags)) xor_srcs[count++] = dev->page; } } else { @@ -2796,6 +2859,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, int level = conf->level; if (rcw) { + /* + * In some cases, handle_stripe_dirtying initially decided to + * run rmw and allocates extra page for prexor. However, rcw is + * cheaper later on. We need to free the extra page now, + * because we won't be able to do that in ops_complete_prexor(). + */ + r5c_release_extra_page(sh); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -2806,6 +2876,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, if (!expand) clear_bit(R5_UPTODATE, &dev->flags); s->locked++; + } else if (test_bit(R5_InJournal, &dev->flags)) { + set_bit(R5_LOCKED, &dev->flags); + s->locked++; } } /* if we are not expanding this is a proper write request, and @@ -2845,6 +2918,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, set_bit(R5_LOCKED, &dev->flags); clear_bit(R5_UPTODATE, &dev->flags); s->locked++; + } else if (test_bit(R5_InJournal, &dev->flags)) { + set_bit(R5_LOCKED, &dev->flags); + s->locked++; } } if (!s->locked) @@ -3516,9 +3592,12 @@ static void handle_stripe_dirtying(struct r5conf *conf, } else for (i = disks; i--; ) { /* would I have to read this buffer for read_modify_write */ struct r5dev *dev = &sh->dev[i]; - if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && + if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx || + test_bit(R5_InJournal, &dev->flags)) && !test_bit(R5_LOCKED, &dev->flags) && - !(test_bit(R5_UPTODATE, &dev->flags) || + !((test_bit(R5_UPTODATE, &dev->flags) && + (!test_bit(R5_InJournal, &dev->flags) || + dev->page != dev->orig_page)) || test_bit(R5_Wantcompute, &dev->flags))) { if (test_bit(R5_Insync, &dev->flags)) rmw++; @@ -3530,13 +3609,15 @@ static void handle_stripe_dirtying(struct r5conf *conf, i != sh->pd_idx && i != sh->qd_idx && !test_bit(R5_LOCKED, &dev->flags) && !(test_bit(R5_UPTODATE, &dev->flags) || - test_bit(R5_Wantcompute, &dev->flags))) { + test_bit(R5_InJournal, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags))) { if (test_bit(R5_Insync, &dev->flags)) rcw++; else rcw += 2*disks; } } + pr_debug("for sector %llu, rmw=%d rcw=%d\n", (unsigned long long)sh->sector, rmw, rcw); set_bit(STRIPE_HANDLE, &sh->state); @@ -3548,10 +3629,24 @@ static void handle_stripe_dirtying(struct r5conf *conf, (unsigned long long)sh->sector, rmw); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; - if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && + if (test_bit(R5_InJournal, &dev->flags) && + dev->page == dev->orig_page && + !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { + /* alloc page for prexor */ + dev->orig_page = alloc_page(GFP_NOIO); + + /* will handle failure in a later patch*/ + BUG_ON(!dev->orig_page); + } + + if ((dev->towrite || + i == sh->pd_idx || i == sh->qd_idx || + test_bit(R5_InJournal, &dev->flags)) && !test_bit(R5_LOCKED, &dev->flags) && - !(test_bit(R5_UPTODATE, &dev->flags) || - test_bit(R5_Wantcompute, &dev->flags)) && + !((test_bit(R5_UPTODATE, &dev->flags) && + (!test_bit(R5_InJournal, &dev->flags) || + dev->page != dev->orig_page)) || + test_bit(R5_Wantcompute, &dev->flags)) && test_bit(R5_Insync, &dev->flags)) { if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { @@ -3577,6 +3672,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, i != sh->pd_idx && i != sh->qd_idx && !test_bit(R5_LOCKED, &dev->flags) && !(test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_InJournal, &dev->flags) || test_bit(R5_Wantcompute, &dev->flags))) { rcw++; if (test_bit(R5_Insync, &dev->flags) && @@ -3616,7 +3712,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, */ if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && (s->locked == 0 && (rcw == 0 || rmw == 0) && - !test_bit(STRIPE_BIT_DELAY, &sh->state))) + !test_bit(STRIPE_BIT_DELAY, &sh->state))) schedule_reconstruction(sh, s, rcw == 0, 0); } @@ -4110,6 +4206,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (test_bit(R5_InJournal, &dev->flags)) s->injournal++; + if (test_bit(R5_InJournal, &dev->flags) && dev->written) + s->just_cached++; } if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, @@ -4338,7 +4436,8 @@ static void handle_stripe(struct stripe_head *sh) struct r5dev *dev = &sh->dev[i]; if (test_bit(R5_LOCKED, &dev->flags) && (i == sh->pd_idx || i == sh->qd_idx || - dev->written)) { + dev->written || test_bit(R5_InJournal, + &dev->flags))) { pr_debug("Writing block %d\n", i); set_bit(R5_Wantwrite, &dev->flags); if (prexor) @@ -4378,6 +4477,10 @@ static void handle_stripe(struct stripe_head *sh) test_bit(R5_Discard, &qdev->flags)))))) handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + if (s.just_cached) + r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi); + r5l_stripe_write_finished(sh); + /* Now we might consider reading some blocks, either to check/generate * parity, or to satisfy requests * or to load a block that is being partially written. @@ -6499,6 +6602,11 @@ static struct r5conf *setup_conf(struct mddev *mddev) for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) INIT_LIST_HEAD(conf->temp_inactive_list + i); + atomic_set(&conf->r5c_cached_full_stripes, 0); + INIT_LIST_HEAD(&conf->r5c_full_stripe_list); + atomic_set(&conf->r5c_cached_partial_stripes, 0); + INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); + conf->level = mddev->new_level; conf->chunk_sectors = mddev->new_chunk_sectors; if (raid5_alloc_percpu(conf) != 0) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index c9590a8..73c1833 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -264,7 +264,7 @@ struct stripe_head_state { int syncing, expanding, expanded, replacing; int locked, uptodate, to_read, to_write, failed, written; int to_fill, compute, req_compute, non_overwrite; - int injournal; + int injournal, just_cached; int failed_num[2]; int p_failed, q_failed; int dec_preread_active; @@ -368,6 +368,12 @@ enum { STRIPE_R5C_CACHING, /* the stripe is in caching phase * see more detail in the raid5-cache.c */ + STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or + * in conf->r5c_partial_stripe_list) + */ + STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or + * in conf->r5c_full_stripe_list) + */ }; #define STRIPE_EXPAND_SYNC_FLAGS \ @@ -618,6 +624,12 @@ struct r5conf { */ atomic_t active_stripes; struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; + + atomic_t r5c_cached_full_stripes; + struct list_head r5c_full_stripe_list; + atomic_t r5c_cached_partial_stripes; + struct list_head r5c_partial_stripe_list; + atomic_t empty_inactive_list_nr; struct llist_head released_stripes; wait_queue_head_t wait_for_quiescent; @@ -739,4 +751,9 @@ r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh, extern void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s); +extern void r5c_release_extra_page(struct stripe_head *sh); +extern void r5c_handle_cached_data_endio(struct r5conf *conf, + struct stripe_head *sh, int disks, struct bio_list *return_bi); +extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh, + struct stripe_head_state *s); #endif -- cgit v1.1 From a39f7afde358ca89e9fc09a5525d3f8631a98a3a Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:40 -0800 Subject: md/r5cache: write-out phase and reclaim support There are two limited resources, stripe cache and journal disk space. For better performance, we priotize reclaim of full stripe writes. To free up more journal space, we free earliest data on the journal. In current implementation, reclaim happens when: 1. Periodically (every R5C_RECLAIM_WAKEUP_INTERVAL, 30 seconds) reclaim if there is no reclaim in the past 5 seconds. 2. when there are R5C_FULL_STRIPE_FLUSH_BATCH (256) cached full stripes, or cached stripes is enough for a full stripe (chunk size / 4k) (r5c_check_cached_full_stripe) 3. when there is pressure on stripe cache (r5c_check_stripe_cache_usage) 4. when there is pressure on journal space (r5l_write_stripe, r5c_cache_data) r5c_do_reclaim() contains new logic of reclaim. For stripe cache: When stripe cache pressure is high (more than 3/4 stripes are cached, or there is empty inactive lists), flush all full stripe. If fewer than R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) full stripes are flushed, flush some paritial stripes. When stripe cache pressure is moderate (1/2 to 3/4 of stripes are cached), flush all full stripes. For log space: To avoid deadlock due to log space, we need to reserve enough space to flush cached data. The size of required log space depends on total number of cached stripes (stripe_in_journal_count). In current implementation, the writing-out phase automatically include pending data writes with parity writes (similar to write through case). Therefore, we need up to (conf->raid_disks + 1) pages for each cached stripe (1 page for meta data, raid_disks pages for all data and parity). r5c_log_required_to_flush_cache() calculates log space required to flush cache. In the following, we refer to the space calculated by r5c_log_required_to_flush_cache() as reclaim_required_space. Two flags are added to r5conf->cache_state: R5C_LOG_TIGHT and R5C_LOG_CRITICAL. R5C_LOG_TIGHT is set when free space on the log device is less than 3x of reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log device is less than 2x of reclaim_required_space. r5c_cache keeps all data in cache (not fully committed to RAID) in a list (stripe_in_journal_list). These stripes are in the order of their first appearance on the journal. So the log tail (last_checkpoint) should point to the journal_start of the first item in the list. When R5C_LOG_TIGHT is set, r5l_reclaim_thread starts flushing out stripes at the head of stripe_in_journal. When R5C_LOG_CRITICAL is set, the state machine only writes data that are already in the log device (in stripe_in_journal_list). This patch includes a fix to improve performance by Shaohua Li . Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 411 +++++++++++++++++++++++++++++++++++++++++++---- drivers/md/raid5.c | 21 +++ drivers/md/raid5.h | 39 +++-- 3 files changed, 430 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 19c5af9..7dec2a0 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -29,12 +29,21 @@ #define BLOCK_SECTORS (8) /* - * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent - * recovery scans a very long log + * log->max_free_space is min(1/4 disk size, 10G reclaimable space). + * + * In write through mode, the reclaim runs every log->max_free_space. + * This can prevent the recovery scans for too long */ #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) +/* wake up reclaim thread periodically */ +#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ) +/* start flush with these full stripes */ +#define R5C_FULL_STRIPE_FLUSH_BATCH 256 +/* reclaim stripes in groups */ +#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) + /* * We only need 2 bios per I/O unit to make progress, but ensure we * have a few more available to not get too tight. @@ -141,6 +150,12 @@ struct r5l_log { /* for r5c_cache */ enum r5c_journal_mode r5c_journal_mode; + + /* all stripes in r5cache, in the order of seq at sh->log_start */ + struct list_head stripe_in_journal_list; + + spinlock_t stripe_in_journal_lock; + atomic_t stripe_in_journal_count; }; /* @@ -256,11 +271,109 @@ void r5c_handle_cached_data_endio(struct r5conf *conf, } } +/* Check whether we should flush some stripes to free up stripe cache */ +void r5c_check_stripe_cache_usage(struct r5conf *conf) +{ + int total_cached; + + if (!r5c_is_writeback(conf->log)) + return; + + total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + + atomic_read(&conf->r5c_cached_full_stripes); + + /* + * The following condition is true for either of the following: + * - stripe cache pressure high: + * total_cached > 3/4 min_nr_stripes || + * empty_inactive_list_nr > 0 + * - stripe cache pressure moderate: + * total_cached > 1/2 min_nr_stripes + */ + if (total_cached > conf->min_nr_stripes * 1 / 2 || + atomic_read(&conf->empty_inactive_list_nr) > 0) + r5l_wake_reclaim(conf->log, 0); +} + +/* + * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full + * stripes in the cache + */ +void r5c_check_cached_full_stripe(struct r5conf *conf) +{ + if (!r5c_is_writeback(conf->log)) + return; + + /* + * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes + * or a full stripe (chunk size / 4k stripes). + */ + if (atomic_read(&conf->r5c_cached_full_stripes) >= + min(R5C_FULL_STRIPE_FLUSH_BATCH, + conf->chunk_sectors >> STRIPE_SHIFT)) + r5l_wake_reclaim(conf->log, 0); +} + +/* + * Total log space (in sectors) needed to flush all data in cache + * + * Currently, writing-out phase automatically includes all pending writes + * to the same sector. So the reclaim of each stripe takes up to + * (conf->raid_disks + 1) pages of log space. + * + * To totally avoid deadlock due to log space, the code reserves + * (conf->raid_disks + 1) pages for each stripe in cache, which is not + * necessary in most cases. + * + * To improve this, we will need writing-out phase to be able to NOT include + * pending writes, which will reduce the requirement to + * (conf->max_degraded + 1) pages per stripe in cache. + */ +static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) +{ + struct r5l_log *log = conf->log; + + if (!r5c_is_writeback(log)) + return 0; + + return BLOCK_SECTORS * (conf->raid_disks + 1) * + atomic_read(&log->stripe_in_journal_count); +} + +/* + * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL + * + * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of + * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log + * device is less than 2x of reclaim_required_space. + */ +static inline void r5c_update_log_state(struct r5l_log *log) +{ + struct r5conf *conf = log->rdev->mddev->private; + sector_t free_space; + sector_t reclaim_space; + + if (!r5c_is_writeback(log)) + return; + + free_space = r5l_ring_distance(log, log->log_start, + log->last_checkpoint); + reclaim_space = r5c_log_required_to_flush_cache(conf); + if (free_space < 2 * reclaim_space) + set_bit(R5C_LOG_CRITICAL, &conf->cache_state); + else + clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); + if (free_space < 3 * reclaim_space) + set_bit(R5C_LOG_TIGHT, &conf->cache_state); + else + clear_bit(R5C_LOG_TIGHT, &conf->cache_state); +} + /* * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. * This function should only be called in write-back mode. */ -static void r5c_make_stripe_write_out(struct stripe_head *sh) +void r5c_make_stripe_write_out(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; struct r5l_log *log = conf->log; @@ -440,6 +553,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) { log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); + r5c_update_log_state(log); /* * If we filled up the log device start from the beginning again, * which will require a new bio. @@ -600,21 +714,43 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, atomic_inc(&io->pending_stripe); sh->log_io = io; + if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) + return 0; + + if (sh->log_start == MaxSector) { + BUG_ON(!list_empty(&sh->r5c)); + sh->log_start = io->log_start; + spin_lock_irq(&log->stripe_in_journal_lock); + list_add_tail(&sh->r5c, + &log->stripe_in_journal_list); + spin_unlock_irq(&log->stripe_in_journal_lock); + atomic_inc(&log->stripe_in_journal_count); + } return 0; } -static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); +/* add stripe to no_space_stripes, and then wake up reclaim */ +static inline void r5l_add_no_space_stripe(struct r5l_log *log, + struct stripe_head *sh) +{ + spin_lock(&log->no_space_stripes_lock); + list_add_tail(&sh->log_list, &log->no_space_stripes); + spin_unlock(&log->no_space_stripes_lock); +} + /* * running in raid5d, where reclaim could wait for raid5d too (when it flushes * data from log to raid disks), so we shouldn't wait for reclaim here */ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) { + struct r5conf *conf = sh->raid_conf; int write_disks = 0; int data_pages, parity_pages; int reserve; int i; int ret = 0; + bool wake_reclaim = false; if (!log) return -EAGAIN; @@ -658,22 +794,49 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) mutex_lock(&log->io_mutex); /* meta + data */ reserve = (1 + write_disks) << (PAGE_SHIFT - 9); - if (!r5l_has_free_space(log, reserve)) { - spin_lock(&log->no_space_stripes_lock); - list_add_tail(&sh->log_list, &log->no_space_stripes); - spin_unlock(&log->no_space_stripes_lock); - r5l_wake_reclaim(log, reserve); - } else { - ret = r5l_log_stripe(log, sh, data_pages, parity_pages); - if (ret) { - spin_lock_irq(&log->io_list_lock); - list_add_tail(&sh->log_list, &log->no_mem_stripes); - spin_unlock_irq(&log->io_list_lock); + if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { + if (!r5l_has_free_space(log, reserve)) { + r5l_add_no_space_stripe(log, sh); + wake_reclaim = true; + } else { + ret = r5l_log_stripe(log, sh, data_pages, parity_pages); + if (ret) { + spin_lock_irq(&log->io_list_lock); + list_add_tail(&sh->log_list, + &log->no_mem_stripes); + spin_unlock_irq(&log->io_list_lock); + } + } + } else { /* R5C_JOURNAL_MODE_WRITE_BACK */ + /* + * log space critical, do not process stripes that are + * not in cache yet (sh->log_start == MaxSector). + */ + if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && + sh->log_start == MaxSector) { + r5l_add_no_space_stripe(log, sh); + wake_reclaim = true; + reserve = 0; + } else if (!r5l_has_free_space(log, reserve)) { + if (sh->log_start == log->last_checkpoint) + BUG(); + else + r5l_add_no_space_stripe(log, sh); + } else { + ret = r5l_log_stripe(log, sh, data_pages, parity_pages); + if (ret) { + spin_lock_irq(&log->io_list_lock); + list_add_tail(&sh->log_list, + &log->no_mem_stripes); + spin_unlock_irq(&log->io_list_lock); + } } } mutex_unlock(&log->io_mutex); + if (wake_reclaim) + r5l_wake_reclaim(log, reserve); return 0; } @@ -720,10 +883,40 @@ static void r5l_run_no_space_stripes(struct r5l_log *log) spin_unlock(&log->no_space_stripes_lock); } +/* + * calculate new last_checkpoint + * for write through mode, returns log->next_checkpoint + * for write back, returns log_start of first sh in stripe_in_journal_list + */ +static sector_t r5c_calculate_new_cp(struct r5conf *conf) +{ + struct stripe_head *sh; + struct r5l_log *log = conf->log; + sector_t new_cp; + unsigned long flags; + + if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) + return log->next_checkpoint; + + spin_lock_irqsave(&log->stripe_in_journal_lock, flags); + if (list_empty(&conf->log->stripe_in_journal_list)) { + /* all stripes flushed */ + spin_unlock(&log->stripe_in_journal_lock); + return log->next_checkpoint; + } + sh = list_first_entry(&conf->log->stripe_in_journal_list, + struct stripe_head, r5c); + new_cp = sh->log_start; + spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); + return new_cp; +} + static sector_t r5l_reclaimable_space(struct r5l_log *log) { + struct r5conf *conf = log->rdev->mddev->private; + return r5l_ring_distance(log, log->last_checkpoint, - log->next_checkpoint); + r5c_calculate_new_cp(conf)); } static void r5l_run_no_mem_stripe(struct r5l_log *log) @@ -769,6 +962,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log) static void __r5l_stripe_write_finished(struct r5l_io_unit *io) { struct r5l_log *log = io->log; + struct r5conf *conf = log->rdev->mddev->private; unsigned long flags; spin_lock_irqsave(&log->io_list_lock, flags); @@ -779,7 +973,8 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io) return; } - if (r5l_reclaimable_space(log) > log->max_free_space) + if (r5l_reclaimable_space(log) > log->max_free_space || + test_bit(R5C_LOG_TIGHT, &conf->cache_state)) r5l_wake_reclaim(log, 0); spin_unlock_irqrestore(&log->io_list_lock, flags); @@ -900,14 +1095,146 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, } } +/* + * r5c_flush_stripe moves stripe from cached list to handle_list. When called, + * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes. + * + * must hold conf->device_lock + */ +static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) +{ + BUG_ON(list_empty(&sh->lru)); + BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); + BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); + + /* + * The stripe is not ON_RELEASE_LIST, so it is safe to call + * raid5_release_stripe() while holding conf->device_lock + */ + BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); + assert_spin_locked(&conf->device_lock); + + list_del_init(&sh->lru); + atomic_inc(&sh->count); + + set_bit(STRIPE_HANDLE, &sh->state); + atomic_inc(&conf->active_stripes); + r5c_make_stripe_write_out(sh); + + if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + atomic_inc(&conf->preread_active_stripes); + raid5_release_stripe(sh); +} + +/* + * if num == 0, flush all full stripes + * if num > 0, flush all full stripes. If less than num full stripes are + * flushed, flush some partial stripes until totally num stripes are + * flushed or there is no more cached stripes. + */ +void r5c_flush_cache(struct r5conf *conf, int num) +{ + int count; + struct stripe_head *sh, *next; + + assert_spin_locked(&conf->device_lock); + if (!conf->log) + return; + + count = 0; + list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { + r5c_flush_stripe(conf, sh); + count++; + } + + if (count >= num) + return; + list_for_each_entry_safe(sh, next, + &conf->r5c_partial_stripe_list, lru) { + r5c_flush_stripe(conf, sh); + if (++count >= num) + break; + } +} + +static void r5c_do_reclaim(struct r5conf *conf) +{ + struct r5l_log *log = conf->log; + struct stripe_head *sh; + int count = 0; + unsigned long flags; + int total_cached; + int stripes_to_flush; + + if (!r5c_is_writeback(log)) + return; + + total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + + atomic_read(&conf->r5c_cached_full_stripes); + + if (total_cached > conf->min_nr_stripes * 3 / 4 || + atomic_read(&conf->empty_inactive_list_nr) > 0) + /* + * if stripe cache pressure high, flush all full stripes and + * some partial stripes + */ + stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP; + else if (total_cached > conf->min_nr_stripes * 1 / 2 || + atomic_read(&conf->r5c_cached_full_stripes) > + R5C_FULL_STRIPE_FLUSH_BATCH) + /* + * if stripe cache pressure moderate, or if there is many full + * stripes,flush all full stripes + */ + stripes_to_flush = 0; + else + /* no need to flush */ + stripes_to_flush = -1; + + if (stripes_to_flush >= 0) { + spin_lock_irqsave(&conf->device_lock, flags); + r5c_flush_cache(conf, stripes_to_flush); + spin_unlock_irqrestore(&conf->device_lock, flags); + } + + /* if log space is tight, flush stripes on stripe_in_journal_list */ + if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) { + spin_lock_irqsave(&log->stripe_in_journal_lock, flags); + spin_lock(&conf->device_lock); + list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { + /* + * stripes on stripe_in_journal_list could be in any + * state of the stripe_cache state machine. In this + * case, we only want to flush stripe on + * r5c_cached_full/partial_stripes. The following + * condition makes sure the stripe is on one of the + * two lists. + */ + if (!list_empty(&sh->lru) && + !test_bit(STRIPE_HANDLE, &sh->state) && + atomic_read(&sh->count) == 0) { + r5c_flush_stripe(conf, sh); + } + if (count++ >= R5C_RECLAIM_STRIPE_GROUP) + break; + } + spin_unlock(&conf->device_lock); + spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); + } + md_wakeup_thread(conf->mddev->thread); +} + static void r5l_do_reclaim(struct r5l_log *log) { + struct r5conf *conf = log->rdev->mddev->private; sector_t reclaim_target = xchg(&log->reclaim_target, 0); sector_t reclaimable; sector_t next_checkpoint; - u64 next_cp_seq; + bool write_super; spin_lock_irq(&log->io_list_lock); + write_super = r5l_reclaimable_space(log) > log->max_free_space || + reclaim_target != 0 || !list_empty(&log->no_space_stripes); /* * move proper io_unit to reclaim list. We should not change the order. * reclaimable/unreclaimable io_unit can be mixed in the list, we @@ -928,12 +1255,12 @@ static void r5l_do_reclaim(struct r5l_log *log) log->io_list_lock); } - next_checkpoint = log->next_checkpoint; - next_cp_seq = log->next_cp_seq; + next_checkpoint = r5c_calculate_new_cp(conf); spin_unlock_irq(&log->io_list_lock); BUG_ON(reclaimable < 0); - if (reclaimable == 0) + + if (reclaimable == 0 || !write_super) return; /* @@ -945,7 +1272,7 @@ static void r5l_do_reclaim(struct r5l_log *log) mutex_lock(&log->io_mutex); log->last_checkpoint = next_checkpoint; - log->last_cp_seq = next_cp_seq; + r5c_update_log_state(log); mutex_unlock(&log->io_mutex); r5l_run_no_space_stripes(log); @@ -959,14 +1286,17 @@ static void r5l_reclaim_thread(struct md_thread *thread) if (!log) return; + r5c_do_reclaim(conf); r5l_do_reclaim(log); } -static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) +void r5l_wake_reclaim(struct r5l_log *log, sector_t space) { unsigned long target; unsigned long new = (unsigned long)space; /* overflow in theory */ + if (!log) + return; do { target = log->reclaim_target; if (new < target) @@ -990,11 +1320,12 @@ void r5l_quiesce(struct r5l_log *log, int state) return; log->reclaim_thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev, "reclaim"); + log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; } else if (state == 1) { /* make sure r5l_write_super_and_discard_space exits */ mddev = log->rdev->mddev; wake_up(&mddev->sb_wait); - r5l_wake_reclaim(log, -1L); + r5l_wake_reclaim(log, MaxSector); md_unregister_thread(&log->reclaim_thread); r5l_do_reclaim(log); } @@ -1415,12 +1746,22 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, if (do_wakeup) wake_up(&conf->wait_for_overlap); + + if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) + return; + + spin_lock_irq(&conf->log->stripe_in_journal_lock); + list_del_init(&sh->r5c); + spin_unlock_irq(&conf->log->stripe_in_journal_lock); + sh->log_start = MaxSector; + atomic_dec(&conf->log->stripe_in_journal_count); } int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh, struct stripe_head_state *s) { + struct r5conf *conf = sh->raid_conf; int pages = 0; int reserve; int i; @@ -1451,12 +1792,15 @@ r5c_cache_data(struct r5l_log *log, struct stripe_head *sh, mutex_lock(&log->io_mutex); /* meta + data */ reserve = (1 + pages) << (PAGE_SHIFT - 9); - if (!r5l_has_free_space(log, reserve)) { - spin_lock(&log->no_space_stripes_lock); - list_add_tail(&sh->log_list, &log->no_space_stripes); - spin_unlock(&log->no_space_stripes_lock); - r5l_wake_reclaim(log, reserve); + if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && + sh->log_start == MaxSector) + r5l_add_no_space_stripe(log, sh); + else if (!r5l_has_free_space(log, reserve)) { + if (sh->log_start == log->last_checkpoint) + BUG(); + else + r5l_add_no_space_stripe(log, sh); } else { ret = r5l_log_stripe(log, sh, pages, 0); if (ret) { @@ -1470,7 +1814,6 @@ r5c_cache_data(struct r5l_log *log, struct stripe_head *sh, return 0; } - static int r5l_load_log(struct r5l_log *log) { struct md_rdev *rdev = log->rdev; @@ -1530,6 +1873,9 @@ create: log->max_free_space = RECLAIM_MAX_FREE_SPACE; log->last_checkpoint = cp; log->next_checkpoint = cp; + mutex_lock(&log->io_mutex); + r5c_update_log_state(log); + mutex_unlock(&log->io_mutex); __free_page(page); @@ -1601,6 +1947,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) log->rdev->mddev, "reclaim"); if (!log->reclaim_thread) goto reclaim_thread; + log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; + init_waitqueue_head(&log->iounit_wait); INIT_LIST_HEAD(&log->no_mem_stripes); @@ -1609,6 +1957,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) spin_lock_init(&log->no_space_stripes_lock); log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; + INIT_LIST_HEAD(&log->stripe_in_journal_list); + spin_lock_init(&log->stripe_in_journal_lock); + atomic_set(&log->stripe_in_journal_count, 0); if (r5l_load_log(log)) goto error; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f535ce2..90638ba 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -228,6 +228,16 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, for (i = sh->disks; i--; ) if (test_bit(R5_InJournal, &sh->dev[i].flags)) injournal++; + /* + * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with + * data in journal, so they are not released to cached lists + */ + if (conf->quiesce && r5c_is_writeback(conf->log) && + !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { + if (test_bit(STRIPE_R5C_CACHING, &sh->state)) + r5c_make_stripe_write_out(sh); + set_bit(STRIPE_HANDLE, &sh->state); + } if (test_bit(STRIPE_HANDLE, &sh->state)) { if (test_bit(STRIPE_DELAYED, &sh->state) && @@ -268,6 +278,7 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) atomic_dec(&conf->r5c_cached_partial_stripes); list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); + r5c_check_cached_full_stripe(conf); } else { /* partial stripe */ if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE, @@ -639,9 +650,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector, } if (noblock && sh == NULL) break; + + r5c_check_stripe_cache_usage(conf); if (!sh) { set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); + r5l_wake_reclaim(conf->log, 0); wait_event_lock_irq( conf->wait_for_stripe, !list_empty(conf->inactive_list + hash) && @@ -1992,7 +2006,9 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, spin_lock_init(&sh->batch_lock); INIT_LIST_HEAD(&sh->batch_list); INIT_LIST_HEAD(&sh->lru); + INIT_LIST_HEAD(&sh->r5c); atomic_set(&sh->count, 1); + sh->log_start = MaxSector; for (i = 0; i < disks; i++) { struct r5dev *dev = &sh->dev[i]; @@ -4759,6 +4775,10 @@ static int raid5_congested(struct mddev *mddev, int bits) if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) return 1; + + /* Also checks whether there is pressure on r5cache log space */ + if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) + return 1; if (conf->quiesce) return 1; if (atomic_read(&conf->empty_inactive_list_nr)) @@ -7661,6 +7681,7 @@ static void raid5_quiesce(struct mddev *mddev, int state) /* '2' tells resync/reshape to pause so that all * active stripes can drain */ + r5c_flush_cache(conf, INT_MAX); conf->quiesce = 2; wait_event_cmd(conf->wait_for_quiescent, atomic_read(&conf->active_stripes) == 0 && diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 73c1833..35b4c0f 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -226,6 +226,8 @@ struct stripe_head { struct r5l_io_unit *log_io; struct list_head log_list; + sector_t log_start; /* first meta block on the journal */ + struct list_head r5c; /* for r5c_cache->stripe_in_journal */ /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -537,6 +539,27 @@ struct r5worker_group { int stripes_cnt; }; +enum r5_cache_state { + R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked, + * waiting for 25% to be free + */ + R5_ALLOC_MORE, /* It might help to allocate another + * stripe. + */ + R5_DID_ALLOC, /* A stripe was allocated, don't allocate + * more until at least one has been + * released. This avoids flooding + * the cache. + */ + R5C_LOG_TIGHT, /* log device space tight, need to + * prioritize stripes at last_checkpoint + */ + R5C_LOG_CRITICAL, /* log device is running out of space, + * only process stripes that are already + * occupying the log + */ +}; + struct r5conf { struct hlist_head *stripe_hashtbl; /* only protect corresponding hash list and inactive_list */ @@ -636,17 +659,6 @@ struct r5conf { wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_overlap; unsigned long cache_state; -#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked, - * waiting for 25% to be free - */ -#define R5_ALLOC_MORE 2 /* It might help to allocate another - * stripe. - */ -#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate - * more until at least one has been - * released. This avoids flooding - * the cache. - */ struct shrinker shrinker; int pool_size; /* number of disks in stripeheads in pool */ spinlock_t device_lock; @@ -752,8 +764,13 @@ extern void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s); extern void r5c_release_extra_page(struct stripe_head *sh); +extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space); extern void r5c_handle_cached_data_endio(struct r5conf *conf, struct stripe_head *sh, int disks, struct bio_list *return_bi); extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh, struct stripe_head_state *s); +extern void r5c_make_stripe_write_out(struct stripe_head *sh); +extern void r5c_flush_cache(struct r5conf *conf, int num); +extern void r5c_check_stripe_cache_usage(struct r5conf *conf); +extern void r5c_check_cached_full_stripe(struct r5conf *conf); #endif -- cgit v1.1 From 2c7da14b90a01e48b17a028de6050a796cfd6d8d Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:41 -0800 Subject: md/r5cache: sysfs entry journal_mode With write cache, journal_mode is the knob to switch between write-back and write-through. Below is an example: root@virt-test:~/# cat /sys/block/md0/md/journal_mode [write-through] write-back root@virt-test:~/# echo write-back > /sys/block/md0/md/journal_mode root@virt-test:~/# cat /sys/block/md0/md/journal_mode write-through [write-back] Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ drivers/md/raid5.c | 1 + drivers/md/raid5.h | 1 + 3 files changed, 67 insertions(+) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 7dec2a0..b9ad0e8 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -60,6 +60,8 @@ enum r5c_journal_mode { R5C_JOURNAL_MODE_WRITE_BACK = 1, }; +static char *r5c_journal_mode_str[] = {"write-through", + "write-back"}; /* * raid5 cache state machine * @@ -1617,6 +1619,69 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp) set_bit(MD_CHANGE_DEVS, &mddev->flags); } +static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) +{ + struct r5conf *conf = mddev->private; + int ret; + + if (!conf->log) + return 0; + + switch (conf->log->r5c_journal_mode) { + case R5C_JOURNAL_MODE_WRITE_THROUGH: + ret = snprintf( + page, PAGE_SIZE, "[%s] %s\n", + r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], + r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); + break; + case R5C_JOURNAL_MODE_WRITE_BACK: + ret = snprintf( + page, PAGE_SIZE, "%s [%s]\n", + r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], + r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); + break; + default: + ret = 0; + } + return ret; +} + +static ssize_t r5c_journal_mode_store(struct mddev *mddev, + const char *page, size_t length) +{ + struct r5conf *conf = mddev->private; + struct r5l_log *log = conf->log; + int val = -1, i; + int len = length; + + if (!log) + return -ENODEV; + + if (len && page[len - 1] == '\n') + len -= 1; + for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++) + if (strlen(r5c_journal_mode_str[i]) == len && + strncmp(page, r5c_journal_mode_str[i], len) == 0) { + val = i; + break; + } + if (val < R5C_JOURNAL_MODE_WRITE_THROUGH || + val > R5C_JOURNAL_MODE_WRITE_BACK) + return -EINVAL; + + mddev_suspend(mddev); + conf->log->r5c_journal_mode = val; + mddev_resume(mddev); + + pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", + mdname(mddev), val, r5c_journal_mode_str[val]); + return length; +} + +struct md_sysfs_entry +r5c_journal_mode = __ATTR(journal_mode, 0644, + r5c_journal_mode_show, r5c_journal_mode_store); + /* * Try handle write operation in caching phase. This function should only * be called in write-back mode. diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 90638ba..17cf98e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6319,6 +6319,7 @@ static struct attribute *raid5_attrs[] = { &raid5_group_thread_cnt.attr, &raid5_skip_copy.attr, &raid5_rmw_level.attr, + &r5c_journal_mode.attr, NULL, }; static struct attribute_group raid5_attrs_group = { diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 35b4c0f..a698113 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -773,4 +773,5 @@ extern void r5c_make_stripe_write_out(struct stripe_head *sh); extern void r5c_flush_cache(struct r5conf *conf, int num); extern void r5c_check_stripe_cache_usage(struct r5conf *conf); extern void r5c_check_cached_full_stripe(struct r5conf *conf); +extern struct md_sysfs_entry r5c_journal_mode; #endif -- cgit v1.1 From 9ed988f5dc673f009d78f7ac55c5da88e1cf58a0 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:42 -0800 Subject: md/r5cache: refactoring journal recovery code 1. rename r5l_read_meta_block() as r5l_recovery_read_meta_block(); 2. pull the code that initialize r5l_meta_block from r5l_log_write_empty_meta_block() to a separate function r5l_recovery_create_empty_meta_block(), so that we can reuse this piece of code. Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index b9ad0e8..390be84 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1356,8 +1356,8 @@ struct r5l_recovery_ctx { u64 seq; /* recovery position seq */ }; -static int r5l_read_meta_block(struct r5l_log *log, - struct r5l_recovery_ctx *ctx) +static int r5l_recovery_read_meta_block(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) { struct page *page = ctx->meta_page; struct r5l_meta_block *mb; @@ -1530,7 +1530,7 @@ static void r5l_recovery_flush_log(struct r5l_log *log, struct r5l_recovery_ctx *ctx) { while (1) { - if (r5l_read_meta_block(log, ctx)) + if (r5l_recovery_read_meta_block(log, ctx)) return; if (r5l_recovery_flush_one_meta(log, ctx)) return; @@ -1539,17 +1539,16 @@ static void r5l_recovery_flush_log(struct r5l_log *log, } } -static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, - u64 seq) +static void +r5l_recovery_create_empty_meta_block(struct r5l_log *log, + struct page *page, + sector_t pos, u64 seq) { - struct page *page; struct r5l_meta_block *mb; u32 crc; - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - return -ENOMEM; mb = page_address(page); + clear_page(mb); mb->magic = cpu_to_le32(R5LOG_MAGIC); mb->version = R5LOG_VERSION; mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); @@ -1557,7 +1556,17 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, mb->position = cpu_to_le64(pos); crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); mb->checksum = cpu_to_le32(crc); +} +static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, + u64 seq) +{ + struct page *page; + + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + r5l_recovery_create_empty_meta_block(log, page, pos, seq); if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, WRITE_FUA, false)) { __free_page(page); -- cgit v1.1 From b4c625c67362b3940f619c1a836b4e8329106658 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:43 -0800 Subject: md/r5cache: r5cache recovery: part 1 Recovery of write-back cache has different logic to write-through only cache. Specifically, for write-back cache, the recovery need to scan through all active journal entries before flushing data out. Therefore, large portion of the recovery logic is rewritten here. To make the diffs cleaner, we split the rewrite as follows: 1. In this patch, we: - add new data to r5l_recovery_ctx - add new functions to recovery write-back cache The new functions are not used in this patch, so this patch does not change the behavior of recovery. 2. In next patch, we: - modify main recovery procedure r5l_recovery_log() to call new functions - remove old functions With cache feature, there are 2 different scenarios of recovery: 1. Data-Parity stripe: a stripe with complete parity in journal. 2. Data-Only stripe: a stripe with only data in journal (or partial parity). The code differentiate Data-Parity stripe from Data-Only stripe with flag STRIPE_R5C_CACHING. For Data-Parity stripes, we use the same procedure as raid5 journal, where all the data and parity are replayed to the RAID devices. For Data-Only strips, we need to finish complete calculate parity and finish the full reconstruct write or RMW write. For simplicity, in the recovery, we load the stripe to stripe cache. Once the array is started, the stripe cache state machine will handle these stripes through normal write path. r5c_recovery_flush_log contains the main procedure of recovery. The recovery code first scans through the journal and loads data to stripe cache. The code keeps tracks of all these stripes in a list (use sh->lru and ctx->cached_list), stripes in the list are organized in the order of its first appearance on the journal. During the scan, the recovery code assesses each stripe as Data-Parity or Data-Only. During scan, the array may run out of stripe cache. In these cases, the recovery code will also call raid5_set_cache_size to increase stripe cache size. If the array still runs out of stripe cache because there isn't enough memory, the array will not assemble. At the end of scan, the recovery code replays all Data-Parity stripes, and sets proper states for Data-Only stripes. The recovery code also increases seq number by 10 and rewrites all Data-Only stripes to journal. This is to avoid confusion after repeated crashes. More details is explained in raid5-cache.c before r5c_recovery_rewrite_data_only_stripes(). Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 602 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 602 insertions(+) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 390be84..3aa7ecc 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2015 Shaohua Li + * Copyright (C) 2016 Song Liu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1354,6 +1355,9 @@ struct r5l_recovery_ctx { sector_t meta_total_blocks; /* total size of current meta and data */ sector_t pos; /* recovery position */ u64 seq; /* recovery position seq */ + int data_parity_stripes; /* number of data_parity stripes */ + int data_only_stripes; /* number of data_only stripes */ + struct list_head cached_list; }; static int r5l_recovery_read_meta_block(struct r5l_log *log, @@ -1576,6 +1580,590 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, return 0; } +/* + * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite + * to mark valid (potentially not flushed) data in the journal. + * + * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb, + * so there should not be any mismatch here. + */ +static void r5l_recovery_load_data(struct r5l_log *log, + struct stripe_head *sh, + struct r5l_recovery_ctx *ctx, + struct r5l_payload_data_parity *payload, + sector_t log_offset) +{ + struct mddev *mddev = log->rdev->mddev; + struct r5conf *conf = mddev->private; + int dd_idx; + + raid5_compute_sector(conf, + le64_to_cpu(payload->location), 0, + &dd_idx, sh); + sync_page_io(log->rdev, log_offset, PAGE_SIZE, + sh->dev[dd_idx].page, REQ_OP_READ, 0, false); + sh->dev[dd_idx].log_checksum = + le32_to_cpu(payload->checksum[0]); + ctx->meta_total_blocks += BLOCK_SECTORS; + + set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); + set_bit(STRIPE_R5C_CACHING, &sh->state); +} + +static void r5l_recovery_load_parity(struct r5l_log *log, + struct stripe_head *sh, + struct r5l_recovery_ctx *ctx, + struct r5l_payload_data_parity *payload, + sector_t log_offset) +{ + struct mddev *mddev = log->rdev->mddev; + struct r5conf *conf = mddev->private; + + ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; + sync_page_io(log->rdev, log_offset, PAGE_SIZE, + sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false); + sh->dev[sh->pd_idx].log_checksum = + le32_to_cpu(payload->checksum[0]); + set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); + + if (sh->qd_idx >= 0) { + sync_page_io(log->rdev, + r5l_ring_add(log, log_offset, BLOCK_SECTORS), + PAGE_SIZE, sh->dev[sh->qd_idx].page, + REQ_OP_READ, 0, false); + sh->dev[sh->qd_idx].log_checksum = + le32_to_cpu(payload->checksum[1]); + set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); + } + clear_bit(STRIPE_R5C_CACHING, &sh->state); +} + +static void r5l_recovery_reset_stripe(struct stripe_head *sh) +{ + int i; + + sh->state = 0; + sh->log_start = MaxSector; + for (i = sh->disks; i--; ) + sh->dev[i].flags = 0; +} + +static void +r5l_recovery_replay_one_stripe(struct r5conf *conf, + struct stripe_head *sh, + struct r5l_recovery_ctx *ctx) +{ + struct md_rdev *rdev, *rrdev; + int disk_index; + int data_count = 0; + + for (disk_index = 0; disk_index < sh->disks; disk_index++) { + if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) + continue; + if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) + continue; + data_count++; + } + + /* + * stripes that only have parity must have been flushed + * before the crash that we are now recovering from, so + * there is nothing more to recovery. + */ + if (data_count == 0) + goto out; + + for (disk_index = 0; disk_index < sh->disks; disk_index++) { + if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) + continue; + + /* in case device is broken */ + rcu_read_lock(); + rdev = rcu_dereference(conf->disks[disk_index].rdev); + if (rdev) { + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + sync_page_io(rdev, sh->sector, PAGE_SIZE, + sh->dev[disk_index].page, REQ_OP_WRITE, 0, + false); + rdev_dec_pending(rdev, rdev->mddev); + rcu_read_lock(); + } + rrdev = rcu_dereference(conf->disks[disk_index].replacement); + if (rrdev) { + atomic_inc(&rrdev->nr_pending); + rcu_read_unlock(); + sync_page_io(rrdev, sh->sector, PAGE_SIZE, + sh->dev[disk_index].page, REQ_OP_WRITE, 0, + false); + rdev_dec_pending(rrdev, rrdev->mddev); + rcu_read_lock(); + } + rcu_read_unlock(); + } + ctx->data_parity_stripes++; +out: + r5l_recovery_reset_stripe(sh); +} + +static struct stripe_head * +r5c_recovery_alloc_stripe(struct r5conf *conf, + struct list_head *recovery_list, + sector_t stripe_sect, + sector_t log_start) +{ + struct stripe_head *sh; + + sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); + if (!sh) + return NULL; /* no more stripe available */ + + r5l_recovery_reset_stripe(sh); + sh->log_start = log_start; + + return sh; +} + +static struct stripe_head * +r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) +{ + struct stripe_head *sh; + + list_for_each_entry(sh, list, lru) + if (sh->sector == sect) + return sh; + return NULL; +} + +static void +r5c_recovery_drop_stripes(struct list_head *cached_stripe_list, + struct r5l_recovery_ctx *ctx) +{ + struct stripe_head *sh, *next; + + list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { + r5l_recovery_reset_stripe(sh); + list_del_init(&sh->lru); + raid5_release_stripe(sh); + } +} + +static void +r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, + struct r5l_recovery_ctx *ctx) +{ + struct stripe_head *sh, *next; + + list_for_each_entry_safe(sh, next, cached_stripe_list, lru) + if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { + r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); + list_del_init(&sh->lru); + raid5_release_stripe(sh); + } +} + +/* if matches return 0; otherwise return -EINVAL */ +static int +r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page, + sector_t log_offset, __le32 log_checksum) +{ + void *addr; + u32 checksum; + + sync_page_io(log->rdev, log_offset, PAGE_SIZE, + page, REQ_OP_READ, 0, false); + addr = kmap_atomic(page); + checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); + kunmap_atomic(addr); + return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL; +} + +/* + * before loading data to stripe cache, we need verify checksum for all data, + * if there is mismatch for any data page, we drop all data in the mata block + */ +static int +r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) +{ + struct mddev *mddev = log->rdev->mddev; + struct r5conf *conf = mddev->private; + struct r5l_meta_block *mb = page_address(ctx->meta_page); + sector_t mb_offset = sizeof(struct r5l_meta_block); + sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); + struct page *page; + struct r5l_payload_data_parity *payload; + + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + while (mb_offset < le32_to_cpu(mb->meta_size)) { + payload = (void *)mb + mb_offset; + + if (payload->header.type == R5LOG_PAYLOAD_DATA) { + if (r5l_recovery_verify_data_checksum( + log, page, log_offset, + payload->checksum[0]) < 0) + goto mismatch; + } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) { + if (r5l_recovery_verify_data_checksum( + log, page, log_offset, + payload->checksum[0]) < 0) + goto mismatch; + if (conf->max_degraded == 2 && /* q for RAID 6 */ + r5l_recovery_verify_data_checksum( + log, page, + r5l_ring_add(log, log_offset, + BLOCK_SECTORS), + payload->checksum[1]) < 0) + goto mismatch; + } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */ + goto mismatch; + + log_offset = r5l_ring_add(log, log_offset, + le32_to_cpu(payload->size)); + + mb_offset += sizeof(struct r5l_payload_data_parity) + + sizeof(__le32) * + (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); + } + + put_page(page); + return 0; + +mismatch: + put_page(page); + return -EINVAL; +} + +/* + * Analyze all data/parity pages in one meta block + * Returns: + * 0 for success + * -EINVAL for unknown playload type + * -EAGAIN for checksum mismatch of data page + * -ENOMEM for run out of memory (alloc_page failed or run out of stripes) + */ +static int +r5c_recovery_analyze_meta_block(struct r5l_log *log, + struct r5l_recovery_ctx *ctx, + struct list_head *cached_stripe_list) +{ + struct mddev *mddev = log->rdev->mddev; + struct r5conf *conf = mddev->private; + struct r5l_meta_block *mb; + struct r5l_payload_data_parity *payload; + int mb_offset; + sector_t log_offset; + sector_t stripe_sect; + struct stripe_head *sh; + int ret; + + /* + * for mismatch in data blocks, we will drop all data in this mb, but + * we will still read next mb for other data with FLUSH flag, as + * io_unit could finish out of order. + */ + ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); + if (ret == -EINVAL) + return -EAGAIN; + else if (ret) + return ret; /* -ENOMEM duo to alloc_page() failed */ + + mb = page_address(ctx->meta_page); + mb_offset = sizeof(struct r5l_meta_block); + log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); + + while (mb_offset < le32_to_cpu(mb->meta_size)) { + int dd; + + payload = (void *)mb + mb_offset; + stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ? + raid5_compute_sector( + conf, le64_to_cpu(payload->location), 0, &dd, + NULL) + : le64_to_cpu(payload->location); + + sh = r5c_recovery_lookup_stripe(cached_stripe_list, + stripe_sect); + + if (!sh) { + sh = r5c_recovery_alloc_stripe(conf, cached_stripe_list, + stripe_sect, ctx->pos); + /* + * cannot get stripe from raid5_get_active_stripe + * try replay some stripes + */ + if (!sh) { + r5c_recovery_replay_stripes( + cached_stripe_list, ctx); + sh = r5c_recovery_alloc_stripe( + conf, cached_stripe_list, + stripe_sect, ctx->pos); + } + if (!sh) { + pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", + mdname(mddev), + conf->min_nr_stripes * 2); + raid5_set_cache_size(mddev, + conf->min_nr_stripes * 2); + sh = r5c_recovery_alloc_stripe( + conf, cached_stripe_list, stripe_sect, + ctx->pos); + } + if (!sh) { + pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", + mdname(mddev)); + return -ENOMEM; + } + list_add_tail(&sh->lru, cached_stripe_list); + } + + if (payload->header.type == R5LOG_PAYLOAD_DATA) { + if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { + r5l_recovery_replay_one_stripe(conf, sh, ctx); + r5l_recovery_reset_stripe(sh); + sh->log_start = ctx->pos; + list_move_tail(&sh->lru, cached_stripe_list); + } + r5l_recovery_load_data(log, sh, ctx, payload, + log_offset); + } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) + r5l_recovery_load_parity(log, sh, ctx, payload, + log_offset); + else + return -EINVAL; + + log_offset = r5l_ring_add(log, log_offset, + le32_to_cpu(payload->size)); + + mb_offset += sizeof(struct r5l_payload_data_parity) + + sizeof(__le32) * + (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); + } + + return 0; +} + +/* + * Load the stripe into cache. The stripe will be written out later by + * the stripe cache state machine. + */ +static void r5c_recovery_load_one_stripe(struct r5l_log *log, + struct stripe_head *sh) +{ + struct r5conf *conf = sh->raid_conf; + struct r5dev *dev; + int i; + + for (i = sh->disks; i--; ) { + dev = sh->dev + i; + if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) { + set_bit(R5_InJournal, &dev->flags); + set_bit(R5_UPTODATE, &dev->flags); + } + } + set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); + atomic_inc(&conf->r5c_cached_partial_stripes); + list_add_tail(&sh->r5c, &log->stripe_in_journal_list); +} + +/* + * Scan through the log for all to-be-flushed data + * + * For stripes with data and parity, namely Data-Parity stripe + * (STRIPE_R5C_CACHING == 0), we simply replay all the writes. + * + * For stripes with only data, namely Data-Only stripe + * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine. + * + * For a stripe, if we see data after parity, we should discard all previous + * data and parity for this stripe, as these data are already flushed to + * the array. + * + * At the end of the scan, we return the new journal_tail, which points to + * first data-only stripe on the journal device, or next invalid meta block. + */ +static int r5c_recovery_flush_log(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) +{ + struct stripe_head *sh, *next; + int ret = 0; + + /* scan through the log */ + while (1) { + if (r5l_recovery_read_meta_block(log, ctx)) + break; + + ret = r5c_recovery_analyze_meta_block(log, ctx, + &ctx->cached_list); + /* + * -EAGAIN means mismatch in data block, in this case, we still + * try scan the next metablock + */ + if (ret && ret != -EAGAIN) + break; /* ret == -EINVAL or -ENOMEM */ + ctx->seq++; + ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); + } + + if (ret == -ENOMEM) { + r5c_recovery_drop_stripes(&ctx->cached_list, ctx); + return ret; + } + + /* replay data-parity stripes */ + r5c_recovery_replay_stripes(&ctx->cached_list, ctx); + + /* load data-only stripes to stripe cache */ + list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { + WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); + r5c_recovery_load_one_stripe(log, sh); + list_del_init(&sh->lru); + raid5_release_stripe(sh); + ctx->data_only_stripes++; + } + + return 0; +} + +/* + * we did a recovery. Now ctx.pos points to an invalid meta block. New + * log will start here. but we can't let superblock point to last valid + * meta block. The log might looks like: + * | meta 1| meta 2| meta 3| + * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If + * superblock points to meta 1, we write a new valid meta 2n. if crash + * happens again, new recovery will start from meta 1. Since meta 2n is + * valid now, recovery will think meta 3 is valid, which is wrong. + * The solution is we create a new meta in meta2 with its seq == meta + * 1's seq + 10 and let superblock points to meta2. The same recovery will + * not think meta 3 is a valid meta, because its seq doesn't match + */ + +/* + * Before recovery, the log looks like the following + * + * --------------------------------------------- + * | valid log | invalid log | + * --------------------------------------------- + * ^ + * |- log->last_checkpoint + * |- log->last_cp_seq + * + * Now we scan through the log until we see invalid entry + * + * --------------------------------------------- + * | valid log | invalid log | + * --------------------------------------------- + * ^ ^ + * |- log->last_checkpoint |- ctx->pos + * |- log->last_cp_seq |- ctx->seq + * + * From this point, we need to increase seq number by 10 to avoid + * confusing next recovery. + * + * --------------------------------------------- + * | valid log | invalid log | + * --------------------------------------------- + * ^ ^ + * |- log->last_checkpoint |- ctx->pos+1 + * |- log->last_cp_seq |- ctx->seq+11 + * + * However, it is not safe to start the state machine yet, because data only + * parities are not yet secured in RAID. To save these data only parities, we + * rewrite them from seq+11. + * + * ----------------------------------------------------------------- + * | valid log | data only stripes | invalid log | + * ----------------------------------------------------------------- + * ^ ^ + * |- log->last_checkpoint |- ctx->pos+n + * |- log->last_cp_seq |- ctx->seq+10+n + * + * If failure happens again during this process, the recovery can safe start + * again from log->last_checkpoint. + * + * Once data only stripes are rewritten to journal, we move log_tail + * + * ----------------------------------------------------------------- + * | old log | data only stripes | invalid log | + * ----------------------------------------------------------------- + * ^ ^ + * |- log->last_checkpoint |- ctx->pos+n + * |- log->last_cp_seq |- ctx->seq+10+n + * + * Then we can safely start the state machine. If failure happens from this + * point on, the recovery will start from new log->last_checkpoint. + */ +static int +r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, + struct r5l_recovery_ctx *ctx) +{ + struct stripe_head *sh; + struct mddev *mddev = log->rdev->mddev; + struct page *page; + + page = alloc_page(GFP_KERNEL); + if (!page) { + pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n", + mdname(mddev)); + return -ENOMEM; + } + + ctx->seq += 10; + list_for_each_entry(sh, &ctx->cached_list, lru) { + struct r5l_meta_block *mb; + int i; + int offset; + sector_t write_pos; + + WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); + r5l_recovery_create_empty_meta_block(log, page, + ctx->pos, ctx->seq); + mb = page_address(page); + offset = le32_to_cpu(mb->meta_size); + write_pos = ctx->pos + BLOCK_SECTORS; + + for (i = sh->disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + struct r5l_payload_data_parity *payload; + void *addr; + + if (test_bit(R5_InJournal, &dev->flags)) { + payload = (void *)mb + offset; + payload->header.type = cpu_to_le16( + R5LOG_PAYLOAD_DATA); + payload->size = BLOCK_SECTORS; + payload->location = cpu_to_le64( + raid5_compute_blocknr(sh, i, 0)); + addr = kmap_atomic(dev->page); + payload->checksum[0] = cpu_to_le32( + crc32c_le(log->uuid_checksum, addr, + PAGE_SIZE)); + kunmap_atomic(addr); + sync_page_io(log->rdev, write_pos, PAGE_SIZE, + dev->page, REQ_OP_WRITE, 0, false); + write_pos = r5l_ring_add(log, write_pos, + BLOCK_SECTORS); + offset += sizeof(__le32) + + sizeof(struct r5l_payload_data_parity); + + } + } + mb->meta_size = cpu_to_le32(offset); + mb->checksum = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); + sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, + REQ_OP_WRITE, WRITE_FUA, false); + sh->log_start = ctx->pos; + ctx->pos = write_pos; + ctx->seq += 1; + } + __free_page(page); + return 0; +} + static int r5l_recovery_log(struct r5l_log *log) { struct r5l_recovery_ctx ctx; @@ -1583,6 +2171,10 @@ static int r5l_recovery_log(struct r5l_log *log) ctx.pos = log->last_checkpoint; ctx.seq = log->last_cp_seq; ctx.meta_page = alloc_page(GFP_KERNEL); + ctx.data_only_stripes = 0; + ctx.data_parity_stripes = 0; + INIT_LIST_HEAD(&ctx.cached_list); + if (!ctx.meta_page) return -ENOMEM; @@ -1617,6 +2209,16 @@ static int r5l_recovery_log(struct r5l_log *log) log->log_start = ctx.pos; log->seq = ctx.seq; } + + /* + * This is to suppress "function defined but not used" warning. + * It will be removed when the two functions are used (next patch). + */ + if (!log) { + r5c_recovery_flush_log(log, &ctx); + r5c_recovery_rewrite_data_only_stripes(log, &ctx); + } + return 0; } -- cgit v1.1 From 5aabf7c49d9ebe54a318976276b187637177a03e Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 17 Nov 2016 15:24:44 -0800 Subject: md/r5cache: r5cache recovery: part 2 1. In previous patch, we: - add new data to r5l_recovery_ctx - add new functions to recovery write-back cache The new functions are not used in this patch, so this patch does not change the behavior of recovery. 2. In this patchpatch, we: - modify main recovery procedure r5l_recovery_log() to call new functions - remove old functions Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 210 ++++++----------------------------------------- drivers/md/raid5.c | 3 +- 2 files changed, 26 insertions(+), 187 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 3aa7ecc..6b99570 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1393,156 +1393,6 @@ static int r5l_recovery_read_meta_block(struct r5l_log *log, return 0; } -static int r5l_recovery_flush_one_stripe(struct r5l_log *log, - struct r5l_recovery_ctx *ctx, - sector_t stripe_sect, - int *offset) -{ - struct r5conf *conf = log->rdev->mddev->private; - struct stripe_head *sh; - struct r5l_payload_data_parity *payload; - int disk_index; - - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); - while (1) { - sector_t log_offset = r5l_ring_add(log, ctx->pos, - ctx->meta_total_blocks); - payload = page_address(ctx->meta_page) + *offset; - - if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { - raid5_compute_sector(conf, - le64_to_cpu(payload->location), 0, - &disk_index, sh); - - sync_page_io(log->rdev, log_offset, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_READ, 0, - false); - sh->dev[disk_index].log_checksum = - le32_to_cpu(payload->checksum[0]); - set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); - } else { - disk_index = sh->pd_idx; - sync_page_io(log->rdev, log_offset, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_READ, 0, - false); - sh->dev[disk_index].log_checksum = - le32_to_cpu(payload->checksum[0]); - set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); - - if (sh->qd_idx >= 0) { - disk_index = sh->qd_idx; - sync_page_io(log->rdev, - r5l_ring_add(log, log_offset, BLOCK_SECTORS), - PAGE_SIZE, sh->dev[disk_index].page, - REQ_OP_READ, 0, false); - sh->dev[disk_index].log_checksum = - le32_to_cpu(payload->checksum[1]); - set_bit(R5_Wantwrite, - &sh->dev[disk_index].flags); - } - } - - ctx->meta_total_blocks += le32_to_cpu(payload->size); - *offset += sizeof(struct r5l_payload_data_parity) + - sizeof(__le32) * - (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); - if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) - break; - } - - for (disk_index = 0; disk_index < sh->disks; disk_index++) { - void *addr; - u32 checksum; - - if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) - continue; - addr = kmap_atomic(sh->dev[disk_index].page); - checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); - kunmap_atomic(addr); - if (checksum != sh->dev[disk_index].log_checksum) - goto error; - } - - for (disk_index = 0; disk_index < sh->disks; disk_index++) { - struct md_rdev *rdev, *rrdev; - - if (!test_and_clear_bit(R5_Wantwrite, - &sh->dev[disk_index].flags)) - continue; - - /* in case device is broken */ - rcu_read_lock(); - rdev = rcu_dereference(conf->disks[disk_index].rdev); - if (rdev) { - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - sync_page_io(rdev, stripe_sect, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_WRITE, 0, - false); - rdev_dec_pending(rdev, rdev->mddev); - rcu_read_lock(); - } - rrdev = rcu_dereference(conf->disks[disk_index].replacement); - if (rrdev) { - atomic_inc(&rrdev->nr_pending); - rcu_read_unlock(); - sync_page_io(rrdev, stripe_sect, PAGE_SIZE, - sh->dev[disk_index].page, REQ_OP_WRITE, 0, - false); - rdev_dec_pending(rrdev, rrdev->mddev); - rcu_read_lock(); - } - rcu_read_unlock(); - } - raid5_release_stripe(sh); - return 0; - -error: - for (disk_index = 0; disk_index < sh->disks; disk_index++) - sh->dev[disk_index].flags = 0; - raid5_release_stripe(sh); - return -EINVAL; -} - -static int r5l_recovery_flush_one_meta(struct r5l_log *log, - struct r5l_recovery_ctx *ctx) -{ - struct r5conf *conf = log->rdev->mddev->private; - struct r5l_payload_data_parity *payload; - struct r5l_meta_block *mb; - int offset; - sector_t stripe_sector; - - mb = page_address(ctx->meta_page); - offset = sizeof(struct r5l_meta_block); - - while (offset < le32_to_cpu(mb->meta_size)) { - int dd; - - payload = (void *)mb + offset; - stripe_sector = raid5_compute_sector(conf, - le64_to_cpu(payload->location), 0, &dd, NULL); - if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, - &offset)) - return -EINVAL; - } - return 0; -} - -/* copy data/parity from log to raid disks */ -static void r5l_recovery_flush_log(struct r5l_log *log, - struct r5l_recovery_ctx *ctx) -{ - while (1) { - if (r5l_recovery_read_meta_block(log, ctx)) - return; - if (r5l_recovery_flush_one_meta(log, ctx)) - return; - ctx->seq++; - ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); - } -} - static void r5l_recovery_create_empty_meta_block(struct r5l_log *log, struct page *page, @@ -2166,7 +2016,9 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, static int r5l_recovery_log(struct r5l_log *log) { + struct mddev *mddev = log->rdev->mddev; struct r5l_recovery_ctx ctx; + int ret; ctx.pos = log->last_checkpoint; ctx.seq = log->last_cp_seq; @@ -2178,47 +2030,33 @@ static int r5l_recovery_log(struct r5l_log *log) if (!ctx.meta_page) return -ENOMEM; - r5l_recovery_flush_log(log, &ctx); + ret = r5c_recovery_flush_log(log, &ctx); __free_page(ctx.meta_page); - /* - * we did a recovery. Now ctx.pos points to an invalid meta block. New - * log will start here. but we can't let superblock point to last valid - * meta block. The log might looks like: - * | meta 1| meta 2| meta 3| - * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If - * superblock points to meta 1, we write a new valid meta 2n. if crash - * happens again, new recovery will start from meta 1. Since meta 2n is - * valid now, recovery will think meta 3 is valid, which is wrong. - * The solution is we create a new meta in meta2 with its seq == meta - * 1's seq + 10 and let superblock points to meta2. The same recovery will - * not think meta 3 is a valid meta, because its seq doesn't match - */ - if (ctx.seq > log->last_cp_seq) { - int ret; - - ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); - if (ret) - return ret; - log->seq = ctx.seq + 11; - log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); - r5l_write_super(log, ctx.pos); - log->last_checkpoint = ctx.pos; - log->next_checkpoint = ctx.pos; - } else { - log->log_start = ctx.pos; - log->seq = ctx.seq; - } + if (ret) + return ret; - /* - * This is to suppress "function defined but not used" warning. - * It will be removed when the two functions are used (next patch). - */ - if (!log) { - r5c_recovery_flush_log(log, &ctx); - r5c_recovery_rewrite_data_only_stripes(log, &ctx); + if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) + pr_debug("md/raid:%s: starting from clean shutdown\n", + mdname(mddev)); + else { + pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n", + mdname(mddev), ctx.data_only_stripes, + ctx.data_parity_stripes); + + if (ctx.data_only_stripes > 0) + if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { + pr_err("md/raid:%s: failed to rewrite stripes to journal\n", + mdname(mddev)); + return -EIO; + } } + log->log_start = ctx.pos; + log->next_checkpoint = ctx.pos; + log->seq = ctx.seq; + r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq); + r5l_write_super(log, ctx.pos); return 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 17cf98e..aa4968c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7100,7 +7100,8 @@ static int raid5_run(struct mddev *mddev) pr_debug("md/raid:%s: using device %s as journal\n", mdname(mddev), bdevname(journal_dev->bdev, b)); - r5l_init_log(conf, journal_dev); + if (r5l_init_log(conf, journal_dev)) + goto abort; } return 0; -- cgit v1.1 From 3bddb7f8f264ec58dc86e11ca97341c24f9d38f6 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 18 Nov 2016 16:46:50 -0800 Subject: md/r5cache: handle FLUSH and FUA With raid5 cache, we committing data from journal device. When there is flush request, we need to flush journal device's cache. This was not needed in raid5 journal, because we will flush the journal before committing data to raid disks. This is similar to FUA, except that we also need flush journal for FUA. Otherwise, corruptions in earlier meta data will stop recovery from reaching FUA data. slightly changed the code by Shaohua Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 162 +++++++++++++++++++++++++++++++++++++++++------ drivers/md/raid5.c | 12 ++++ drivers/md/raid5.h | 1 + 3 files changed, 157 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 6b99570..8cb79fc 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -159,6 +159,9 @@ struct r5l_log { spinlock_t stripe_in_journal_lock; atomic_t stripe_in_journal_count; + + /* to submit async io_units, to fulfill ordering of flush */ + struct work_struct deferred_io_work; }; /* @@ -185,6 +188,18 @@ struct r5l_io_unit { int state; bool need_split_bio; + struct bio *split_bio; + + unsigned int has_flush:1; /* include flush request */ + unsigned int has_fua:1; /* include fua request */ + unsigned int has_null_flush:1; /* include empty flush request */ + /* + * io isn't sent yet, flush/fua request can only be submitted till it's + * the first IO in running_ios list + */ + unsigned int io_deferred:1; + + struct bio_list flush_barriers; /* size == 0 flush bios */ }; /* r5l_io_unit state */ @@ -494,9 +509,11 @@ static void r5l_move_to_end_ios(struct r5l_log *log) } } +static void __r5l_stripe_write_finished(struct r5l_io_unit *io); static void r5l_log_endio(struct bio *bio) { struct r5l_io_unit *io = bio->bi_private; + struct r5l_io_unit *io_deferred; struct r5l_log *log = io->log; unsigned long flags; @@ -512,18 +529,89 @@ static void r5l_log_endio(struct bio *bio) r5l_move_to_end_ios(log); else r5l_log_run_stripes(log); + if (!list_empty(&log->running_ios)) { + /* + * FLUSH/FUA io_unit is deferred because of ordering, now we + * can dispatch it + */ + io_deferred = list_first_entry(&log->running_ios, + struct r5l_io_unit, log_sibling); + if (io_deferred->io_deferred) + schedule_work(&log->deferred_io_work); + } + spin_unlock_irqrestore(&log->io_list_lock, flags); if (log->need_cache_flush) md_wakeup_thread(log->rdev->mddev->thread); + + if (io->has_null_flush) { + struct bio *bi; + + WARN_ON(bio_list_empty(&io->flush_barriers)); + while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { + bio_endio(bi); + atomic_dec(&io->pending_stripe); + } + if (atomic_read(&io->pending_stripe) == 0) + __r5l_stripe_write_finished(io); + } +} + +static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) +{ + unsigned long flags; + + spin_lock_irqsave(&log->io_list_lock, flags); + __r5l_set_io_unit_state(io, IO_UNIT_IO_START); + spin_unlock_irqrestore(&log->io_list_lock, flags); + + if (io->has_flush) + bio_set_op_attrs(io->current_bio, REQ_OP_WRITE, WRITE_FLUSH); + if (io->has_fua) + bio_set_op_attrs(io->current_bio, REQ_OP_WRITE, WRITE_FUA); + submit_bio(io->current_bio); + + if (!io->split_bio) + return; + + if (io->has_flush) + bio_set_op_attrs(io->split_bio, REQ_OP_WRITE, WRITE_FLUSH); + if (io->has_fua) + bio_set_op_attrs(io->split_bio, REQ_OP_WRITE, WRITE_FUA); + submit_bio(io->split_bio); +} + +/* deferred io_unit will be dispatched here */ +static void r5l_submit_io_async(struct work_struct *work) +{ + struct r5l_log *log = container_of(work, struct r5l_log, + deferred_io_work); + struct r5l_io_unit *io = NULL; + unsigned long flags; + + spin_lock_irqsave(&log->io_list_lock, flags); + if (!list_empty(&log->running_ios)) { + io = list_first_entry(&log->running_ios, struct r5l_io_unit, + log_sibling); + if (!io->io_deferred) + io = NULL; + else + io->io_deferred = 0; + } + spin_unlock_irqrestore(&log->io_list_lock, flags); + if (io) + r5l_do_submit_io(log, io); } static void r5l_submit_current_io(struct r5l_log *log) { struct r5l_io_unit *io = log->current_io; + struct bio *bio; struct r5l_meta_block *block; unsigned long flags; u32 crc; + bool do_submit = true; if (!io) return; @@ -532,13 +620,20 @@ static void r5l_submit_current_io(struct r5l_log *log) block->meta_size = cpu_to_le32(io->meta_offset); crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); block->checksum = cpu_to_le32(crc); + bio = io->current_bio; log->current_io = NULL; spin_lock_irqsave(&log->io_list_lock, flags); - __r5l_set_io_unit_state(io, IO_UNIT_IO_START); + if (io->has_flush || io->has_fua) { + if (io != list_first_entry(&log->running_ios, + struct r5l_io_unit, log_sibling)) { + io->io_deferred = 1; + do_submit = false; + } + } spin_unlock_irqrestore(&log->io_list_lock, flags); - - submit_bio(io->current_bio); + if (do_submit) + r5l_do_submit_io(log, io); } static struct bio *r5l_bio_alloc(struct r5l_log *log) @@ -583,6 +678,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) io->log = log; INIT_LIST_HEAD(&io->log_sibling); INIT_LIST_HEAD(&io->stripe_list); + bio_list_init(&io->flush_barriers); io->state = IO_UNIT_RUNNING; io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO); @@ -653,12 +749,11 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page) struct r5l_io_unit *io = log->current_io; if (io->need_split_bio) { - struct bio *prev = io->current_bio; - + BUG_ON(io->split_bio); + io->split_bio = io->current_bio; io->current_bio = r5l_bio_alloc(log); - bio_chain(io->current_bio, prev); - - submit_bio(prev); + bio_chain(io->current_bio, io->split_bio); + io->need_split_bio = false; } if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) @@ -687,12 +782,24 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, io = log->current_io; + if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) + io->has_flush = 1; + for (i = 0; i < sh->disks; i++) { if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || test_bit(R5_InJournal, &sh->dev[i].flags)) continue; if (i == sh->pd_idx || i == sh->qd_idx) continue; + if (test_bit(R5_WantFUA, &sh->dev[i].flags) && + log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { + io->has_fua = 1; + /* + * we need to flush journal to make sure recovery can + * reach the data with fua flag + */ + io->has_flush = 1; + } r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, raid5_compute_blocknr(sh, i, 0), sh->dev[i].log_checksum, 0, false); @@ -856,17 +963,34 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) { if (!log) return -ENODEV; - /* - * we flush log disk cache first, then write stripe data to raid disks. - * So if bio is finished, the log disk cache is flushed already. The - * recovery guarantees we can recovery the bio from log disk, so we - * don't need to flush again - */ - if (bio->bi_iter.bi_size == 0) { - bio_endio(bio); - return 0; + + if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { + /* + * in write through (journal only) + * we flush log disk cache first, then write stripe data to + * raid disks. So if bio is finished, the log disk cache is + * flushed already. The recovery guarantees we can recovery + * the bio from log disk, so we don't need to flush again + */ + if (bio->bi_iter.bi_size == 0) { + bio_endio(bio); + return 0; + } + bio->bi_opf &= ~REQ_PREFLUSH; + } else { + /* write back (with cache) */ + if (bio->bi_iter.bi_size == 0) { + mutex_lock(&log->io_mutex); + r5l_get_meta(log, 0); + bio_list_add(&log->current_io->flush_barriers, bio); + log->current_io->has_flush = 1; + log->current_io->has_null_flush = 1; + atomic_inc(&log->current_io->pending_stripe); + r5l_submit_current_io(log); + mutex_unlock(&log->io_mutex); + return 0; + } } - bio->bi_opf &= ~REQ_PREFLUSH; return -EAGAIN; } @@ -2470,6 +2594,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) INIT_LIST_HEAD(&log->no_space_stripes); spin_lock_init(&log->no_space_stripes_lock); + INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); + log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; INIT_LIST_HEAD(&log->stripe_in_journal_list); spin_lock_init(&log->stripe_in_journal_lock); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index aa4968c..dbab8c7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5248,6 +5248,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) int remaining; DEFINE_WAIT(w); bool do_prepare; + bool do_flush = false; if (unlikely(bi->bi_opf & REQ_PREFLUSH)) { int ret = r5l_handle_flush_request(conf->log, bi); @@ -5259,6 +5260,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) return; } /* ret == -EAGAIN, fallback */ + /* + * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH, + * we need to flush journal device + */ + do_flush = bi->bi_opf & REQ_PREFLUSH; } md_write_start(mddev, bi); @@ -5398,6 +5404,12 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) do_prepare = true; goto retry; } + if (do_flush) { + set_bit(STRIPE_R5C_PREFLUSH, &sh->state); + /* we only need flush for one stripe */ + do_flush = false; + } + set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); if ((!sh->batch_head || sh == sh->batch_head) && diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index a698113..d13fe45 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -376,6 +376,7 @@ enum { STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or * in conf->r5c_full_stripe_list) */ + STRIPE_R5C_PREFLUSH, /* need to flush journal device */ }; #define STRIPE_EXPAND_SYNC_FLAGS \ -- cgit v1.1 From 688834e6ae6b21e3d98b5cf2586aa4a9b515c3a0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:11 +1100 Subject: md/failfast: add failfast flag for md to be used by some personalities. This patch just adds a 'failfast' per-device flag which can be stored in v0.90 or v1.x metadata. The flag is not used yet but the intent is that it can be used for mirrored (raid1/raid10) arrays where low latency is more important than keeping all devices on-line. Setting the flag for a device effectively gives permission for that device to be marked as Faulty and excluded from the array on the first error. The underlying driver will be directed not to retry requests that result in failures. There is a proviso that the device must not be marked faulty if that would cause the array as a whole to fail, it may only be marked Faulty if the array remains functional, but is degraded. Failures on read requests will cause the device to be marked as Faulty immediately so that further reads will avoid that device. No attempt will be made to correct read errors by over-writing with the correct data. It is expected that if transient errors, such as cable unplug, are possible, then something in user-space will revalidate failed devices and re-add them when they appear to be working again. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 27 +++++++++++++++++++++++++++ drivers/md/md.h | 6 ++++++ 2 files changed, 33 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index d3cef77..2cf0e89 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1164,6 +1164,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) } if (desc->state & (1<flags); + if (desc->state & (1<flags); } else /* MULTIPATH are always insync */ set_bit(In_sync, &rdev->flags); return 0; @@ -1289,6 +1291,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) } if (test_bit(WriteMostly, &rdev2->flags)) d->state |= (1<flags)) + d->state |= (1<raid_disks ; i++) { @@ -1673,6 +1677,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) } if (sb->devflags & WriteMostly1) set_bit(WriteMostly, &rdev->flags); + if (sb->devflags & FailFast1) + set_bit(FailFast, &rdev->flags); if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) set_bit(Replacement, &rdev->flags); } else /* MULTIPATH are always insync */ @@ -1711,6 +1717,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->chunksize = cpu_to_le32(mddev->chunk_sectors); sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); + if (test_bit(FailFast, &rdev->flags)) + sb->devflags |= FailFast1; + else + sb->devflags &= ~FailFast1; if (test_bit(WriteMostly, &rdev->flags)) sb->devflags |= WriteMostly1; @@ -2557,6 +2567,8 @@ state_show(struct md_rdev *rdev, char *page) len += sprintf(page+len, "replacement%s", sep); if (test_bit(ExternalBbl, &flags)) len += sprintf(page+len, "external_bbl%s", sep); + if (test_bit(FailFast, &flags)) + len += sprintf(page+len, "failfast%s", sep); if (len) len -= strlen(sep); @@ -2579,6 +2591,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) * so that it gets rebuilt based on bitmap * write_error - sets WriteErrorSeen * -write_error - clears WriteErrorSeen + * {,-}failfast - set/clear FailFast */ int err = -EINVAL; if (cmd_match(buf, "faulty") && rdev->mddev->pers) { @@ -2637,6 +2650,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { set_bit(In_sync, &rdev->flags); err = 0; + } else if (cmd_match(buf, "failfast")) { + set_bit(FailFast, &rdev->flags); + err = 0; + } else if (cmd_match(buf, "-failfast")) { + clear_bit(FailFast, &rdev->flags); + err = 0; } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags)) { if (rdev->mddev->pers == NULL) { @@ -5942,6 +5961,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg) info.state |= (1<flags)) info.state |= (1<flags)) + info.state |= (1<flags); else clear_bit(WriteMostly, &rdev->flags); + if (info->state & (1<flags); + else + clear_bit(FailFast, &rdev->flags); if (info->state & (1<state & (1<flags); + if (info->state & (1<flags); if (!mddev->persistent) { pr_debug("md: nonpersistent superblock ...\n"); diff --git a/drivers/md/md.h b/drivers/md/md.h index af6b33c..bc6712e 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -171,6 +171,12 @@ enum flag_bits { ExternalBbl, /* External metadata provides bad * block management for a disk */ + FailFast, /* Minimal retries should be attempted on + * this device, so use REQ_FAILFAST_DEV. + * Also don't try to repair failed reads. + * It is expects that no bad block log + * is present. + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, -- cgit v1.1 From 46533ff7fefb7e9e3539494f5873b00091caa8eb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:11 +1100 Subject: md: Use REQ_FAILFAST_* on metadata writes where appropriate This can only be supported on personalities which ensure that md_error() never causes an array to enter the 'failed' state. i.e. if marking a device Faulty would cause some data to be inaccessible, the device is status is left as non-Faulty. This is true for RAID1 and RAID10. If we get a failure writing metadata but the device doesn't fail, it must be the last device so we re-write without FAILFAST to improve chance of success. We also flag the device as LastDev so that future metadata updates don't waste time on failfast writes. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 15 ++++++++++++--- drivers/md/md.c | 44 ++++++++++++++++++++++++++++++++++---------- drivers/md/md.h | 21 ++++++++++++++++++++- drivers/md/raid1.c | 1 + drivers/md/raid10.c | 1 + 5 files changed, 68 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index cf77cbf..c462157 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -209,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) { - struct md_rdev *rdev = NULL; + struct md_rdev *rdev; struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; +restart: + rdev = NULL; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; loff_t offset = mddev->bitmap_info.offset; @@ -269,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) page); } - if (wait) - md_super_wait(mddev); + if (wait && md_super_wait(mddev) < 0) + goto restart; return 0; bad_alignment: @@ -428,6 +430,13 @@ static void bitmap_wait_writes(struct bitmap *bitmap) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); else + /* Note that we ignore the return value. The writes + * might have failed, but that would just mean that + * some bits which should be cleared haven't been, + * which is safe. The relevant bitmap blocks will + * probably get written again, but there is no great + * loss if they aren't. + */ md_super_wait(bitmap->mddev); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 2cf0e89..62f3fb9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -727,7 +727,13 @@ static void super_written(struct bio *bio) if (bio->bi_error) { pr_err("md: super_written gets error=%d\n", bio->bi_error); md_error(mddev, rdev); - } + if (!test_bit(Faulty, &rdev->flags) + && (bio->bi_opf & MD_FAILFAST)) { + set_bit(MD_NEED_REWRITE, &mddev->flags); + set_bit(LastDev, &rdev->flags); + } + } else + clear_bit(LastDev, &rdev->flags); if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); @@ -744,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, * if zero is reached. * If an error occurred, call md_error */ - struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); + struct bio *bio; + int ff = 0; + + if (test_bit(Faulty, &rdev->flags)) + return; + + bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); atomic_inc(&rdev->nr_pending); @@ -753,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); + + if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && + test_bit(FailFast, &rdev->flags) && + !test_bit(LastDev, &rdev->flags)) + ff = MD_FAILFAST; + bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA | ff); atomic_inc(&mddev->pending_writes); submit_bio(bio); } -void md_super_wait(struct mddev *mddev) +int md_super_wait(struct mddev *mddev) { /* wait for all superblock writes that were scheduled to complete */ wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + if (test_and_clear_bit(MD_NEED_REWRITE, &mddev->flags)) + return -EAGAIN; + return 0; } int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, @@ -1334,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (sector_t)(2ULL << 32) - 2; - md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + do { + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); - md_super_wait(rdev->mddev); + } while (md_super_wait(rdev->mddev) < 0); return num_sectors; } @@ -1877,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) sb->data_size = cpu_to_le64(num_sectors); sb->super_offset = rdev->sb_start; sb->sb_csum = calc_sb_1_csum(sb); - md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, - rdev->sb_page); - md_super_wait(rdev->mddev); + do { + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + rdev->sb_page); + } while (md_super_wait(rdev->mddev) < 0); return num_sectors; } @@ -2416,6 +2438,7 @@ repeat: if (mddev->queue) blk_add_trace_msg(mddev->queue, "md md_update_sb"); +rewrite: bitmap_update_sb(mddev->bitmap); rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; @@ -2447,7 +2470,8 @@ repeat: /* only need to write one superblock... */ break; } - md_super_wait(mddev); + if (md_super_wait(mddev) < 0) + goto rewrite; /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ if (mddev_is_clustered(mddev) && ret == 0) diff --git a/drivers/md/md.h b/drivers/md/md.h index bc6712e..5c08f84 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -30,6 +30,16 @@ #define MaxSector (~(sector_t)0) /* + * These flags should really be called "NO_RETRY" rather than + * "FAILFAST" because they don't make any promise about time lapse, + * only about the number of retries, which will be zero. + * REQ_FAILFAST_DRIVER is not included because + * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.") + * seems to suggest that the errors it avoids retrying should usually + * be retried. + */ +#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) +/* * MD's 'extended' device */ struct md_rdev { @@ -177,6 +187,10 @@ enum flag_bits { * It is expects that no bad block log * is present. */ + LastDev, /* Seems to be the last working dev as + * it didn't fail, so don't use FailFast + * any more for metadata + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, @@ -213,6 +227,11 @@ enum mddev_flags { MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node * already took resync lock, need to * release the lock */ + MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is + * supported as calls to md_error() will + * never cause the array to become failed. + */ + MD_NEED_REWRITE, /* metadata write needs to be repeated */ }; #define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ BIT(MD_CHANGE_CLEAN) | \ @@ -628,7 +647,7 @@ extern int mddev_congested(struct mddev *mddev, int bits); extern void md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); -extern void md_super_wait(struct mddev *mddev); +extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, int op, int op_flags, bool metadata_op); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d24adc5..4006a9b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2988,6 +2988,7 @@ static int raid1_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; mddev->private = conf; + set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index bd8c884..af50866 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3729,6 +3729,7 @@ static int raid10_run(struct mddev *mddev) size = raid10_size(mddev, 0, 0); md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; + set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); if (mddev->queue) { int stripe = conf->geo.raid_disks * -- cgit v1.1 From 2e52d449bcec31cb66d80aa8c798b15f76f1f5e0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:12 +1100 Subject: md/raid1: add failfast handling for reads. If a device is marked FailFast and it is not the only device we can read from, we mark the bio with REQ_FAILFAST_* flags. If this does fail, we don't try read repair but just allow failure. If it was the last device it doesn't fail of course, so the retry happens on the same device - this time without FAILFAST. A subsequent failure will not retry but will just pass up the error. During resync we may use FAILFAST requests and on a failure we will simply use the other device(s). During recovery we will only use FAILFAST in the unusual case were there are multiple places to read from - i.e. if there are > 2 devices. If we get a failure we will fail the device and complete the resync/recovery with remaining devices. The new R1BIO_FailFast flag is set on read reqest to suggest the a FAILFAST request might be acceptable. The rdev needs to have FailFast set as well for the read to actually use REQ_FAILFAST_*. We need to know there are at least two working devices before we can set R1BIO_FailFast, so we mustn't stop looking at the first device we find. So the "min_pending == 0" handling to not exit early, but too always choose the best_pending_disk if min_pending == 0. The spinlocked region in raid1_error() in enlarged to ensure that if two bios, reading from two different devices, fail at the same time, then there is no risk that both devices will be marked faulty, leaving zero "In_sync" devices. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 52 ++++++++++++++++++++++++++++++++++++++++++---------- drivers/md/raid1.h | 1 + 2 files changed, 43 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4006a9b..1f22df0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -329,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio) if (uptodate) set_bit(R1BIO_Uptodate, &r1_bio->state); + else if (test_bit(FailFast, &rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + /* This was a fail-fast read so we definitely + * want to retry */ + ; else { /* If all other devices have failed, we want to return * the error upwards rather than fail the last device. @@ -535,6 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect best_good_sectors = 0; has_nonrot_disk = 0; choose_next_idle = 0; + clear_bit(R1BIO_FailFast, &r1_bio->state); if ((conf->mddev->recovery_cp < this_sector + sectors) || (mddev_is_clustered(conf->mddev) && @@ -608,6 +614,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } else best_good_sectors = sectors; + if (best_disk >= 0) + /* At least two disks to choose from so failfast is OK */ + set_bit(R1BIO_FailFast, &r1_bio->state); + nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); has_nonrot_disk |= nonrot; pending = atomic_read(&rdev->nr_pending); @@ -646,11 +656,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } break; } - /* If device is idle, use it */ - if (pending == 0) { - best_disk = disk; - break; - } if (choose_next_idle) continue; @@ -673,7 +678,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect * mixed ratation/non-rotational disks depending on workload. */ if (best_disk == -1) { - if (has_nonrot_disk) + if (has_nonrot_disk || min_pending == 0) best_disk = best_pending_disk; else best_disk = best_dist_disk; @@ -1167,6 +1172,9 @@ read_again: read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &mirror->rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; read_bio->bi_private = r1_bio; if (mddev->gendisk) @@ -1464,6 +1472,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * next level up know. * else mark the drive as failed */ + spin_lock_irqsave(&conf->device_lock, flags); if (test_bit(In_sync, &rdev->flags) && (conf->raid_disks - mddev->degraded) == 1) { /* @@ -1473,10 +1482,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * it is very likely to fail. */ conf->recovery_disabled = mddev->recovery_disabled; + spin_unlock_irqrestore(&conf->device_lock, flags); return; } set_bit(Blocked, &rdev->flags); - spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { mddev->degraded++; set_bit(Faulty, &rdev->flags); @@ -1815,12 +1824,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio) sector_t sect = r1_bio->sector; int sectors = r1_bio->sectors; int idx = 0; + struct md_rdev *rdev; + + rdev = conf->mirrors[r1_bio->read_disk].rdev; + if (test_bit(FailFast, &rdev->flags)) { + /* Don't try recovering from here - just fail it + * ... unless it is the last working device of course */ + md_error(mddev, rdev); + if (test_bit(Faulty, &rdev->flags)) + /* Don't try to read from here, but make sure + * put_buf does it's thing + */ + bio->bi_end_io = end_sync_write; + } while(sectors) { int s = sectors; int d = r1_bio->read_disk; int success = 0; - struct md_rdev *rdev; int start; if (s > (PAGE_SIZE>>9)) @@ -2331,7 +2352,9 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) bio_put(bio); r1_bio->bios[r1_bio->read_disk] = NULL; - if (mddev->ro == 0) { + rdev = conf->mirrors[r1_bio->read_disk].rdev; + if (mddev->ro == 0 + && !test_bit(FailFast, &rdev->flags)) { freeze_array(conf, 1); fix_read_error(conf, r1_bio->read_disk, r1_bio->sector, r1_bio->sectors); @@ -2340,7 +2363,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; } - rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); read_more: disk = read_balance(conf, r1_bio, &max_sectors); @@ -2365,6 +2388,9 @@ read_more: bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; bio_set_op_attrs(bio, REQ_OP_READ, do_sync); + if (test_bit(FailFast, &rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + bio->bi_opf |= MD_FAILFAST; bio->bi_private = r1_bio; if (max_sectors < r1_bio->sectors) { /* Drat - have to split this up more */ @@ -2653,6 +2679,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_private = r1_bio; + if (test_bit(FailFast, &rdev->flags)) + bio->bi_opf |= MD_FAILFAST; } } rcu_read_unlock(); @@ -2783,6 +2811,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io == end_sync_read) { read_targets--; md_sync_acct(bio->bi_bdev, nr_sectors); + if (read_targets == 1) + bio->bi_opf &= ~MD_FAILFAST; generic_make_request(bio); } } @@ -2790,6 +2820,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, atomic_set(&r1_bio->remaining, 1); bio = r1_bio->bios[r1_bio->read_disk]; md_sync_acct(bio->bi_bdev, nr_sectors); + if (read_targets == 1) + bio->bi_opf &= ~MD_FAILFAST; generic_make_request(bio); } diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 5ec1944..c52ef42 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -183,5 +183,6 @@ enum r1bio_state { */ R1BIO_MadeGood, R1BIO_WriteError, + R1BIO_FailFast, }; #endif -- cgit v1.1 From 212e7eb7a3403464a796c05c2fc46cae3b62d803 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:12 +1100 Subject: md/raid1: add failfast handling for writes. When writing to a fastfail device we use MD_FASTFAIL unless it is the only device being written to. For resync/recovery, assume there was a working device to read from so always use REQ_FASTFAIL_DEV. If a write for resync/recovery fails, we just fail the device - there is not much else to do. If a normal failfast write fails, but the device cannot be failed (must be only one left), we queue for write error handling. This will call narrow_write_error() to retry the write synchronously and without any FAILFAST flags. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid1.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1f22df0..94e0afc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -422,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio) set_bit(MD_RECOVERY_NEEDED, & conf->mddev->recovery); - set_bit(R1BIO_WriteError, &r1_bio->state); + if (test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST) && + /* We never try FailFast to WriteMostly devices */ + !test_bit(WriteMostly, &rdev->flags)) { + md_error(r1_bio->mddev, rdev); + if (!test_bit(Faulty, &rdev->flags)) + /* This is the only remaining device, + * We need to retry the write without + * FailFast + */ + set_bit(R1BIO_WriteError, &r1_bio->state); + else { + /* Finished with this branch */ + r1_bio->bios[mirror] = NULL; + to_put = bio; + } + } else + set_bit(R1BIO_WriteError, &r1_bio->state); } else { /* * Set R1BIO_Uptodate in our master bio, so that we @@ -1392,6 +1409,10 @@ read_again: mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); + if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && + !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && + conf->raid_disks - mddev->degraded > 1) + mbio->bi_opf |= MD_FAILFAST; mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); @@ -2060,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) continue; bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); + if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) + wbio->bi_opf |= MD_FAILFAST; + wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); -- cgit v1.1 From 8d3ca83dcf9ca3d58822eddd279918d46f41e9ff Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:12 +1100 Subject: md/raid10: add failfast handling for reads. If a device is marked FailFast, and it is not the only device we can read from, we mark the bio as MD_FAILFAST. If this does fail-fast, we don't try read repair but just allow failure. If it was the last device, it doesn't get marked Faulty so the retry happens on the same device - this time without FAILFAST. A subsequent failure will not retry but will just pass up the error. During resync we may use FAILFAST requests, and on a failure we will simply use the other device(s). During recovery we will only use FAILFAST in the unusual case were there are multiple places to read from - i.e. if there are > 2 devices. If we get a failure we will fail the device and complete the resync/recovery with remaining devices. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid10.c | 49 ++++++++++++++++++++++++++++++++++++++++++++----- drivers/md/raid10.h | 2 ++ 2 files changed, 46 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index af50866..7cdc9bc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -719,6 +719,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, best_dist = MaxSector; best_good_sectors = 0; do_balance = 1; + clear_bit(R10BIO_FailFast, &r10_bio->state); /* * Check if we can balance. We can balance on the whole * device if no resync is going on (recovery is ok), or below @@ -783,15 +784,18 @@ static struct md_rdev *read_balance(struct r10conf *conf, if (!do_balance) break; + if (best_slot >= 0) + /* At least 2 disks to choose from so failfast is OK */ + set_bit(R10BIO_FailFast, &r10_bio->state); /* This optimisation is debatable, and completely destroys * sequential read speed for 'far copies' arrays. So only * keep it for 'near' arrays, and review those later. */ if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) - break; + new_distance = 0; /* for far > 1 always use the lowest address */ - if (geo->far_copies > 1) + else if (geo->far_copies > 1) new_distance = r10_bio->devs[slot].addr; else new_distance = abs(r10_bio->devs[slot].addr - @@ -1170,6 +1174,9 @@ read_again: read_bio->bi_bdev = rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &rdev->flags) && + test_bit(R10BIO_FailFast, &r10_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; read_bio->bi_private = r10_bio; if (mddev->gendisk) @@ -1988,6 +1995,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) /* now find blocks with errors */ for (i=0 ; i < conf->copies ; i++) { int j, d; + struct md_rdev *rdev; tbio = r10_bio->devs[i].bio; @@ -1995,6 +2003,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) continue; if (i == first) continue; + d = r10_bio->devs[i].devnum; + rdev = conf->mirrors[d].rdev; if (!r10_bio->devs[i].bio->bi_error) { /* We know that the bi_io_vec layout is the same for * both 'first' and 'i', so we just compare them. @@ -2017,6 +2027,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) /* Don't fix anything. */ continue; + } else if (test_bit(FailFast, &rdev->flags)) { + /* Just give up on this device */ + md_error(rdev->mddev, rdev); + continue; } /* Ok, we need to write this bio, either to correct an * inconsistency or to correct an unreadable block. @@ -2034,7 +2048,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_copy_data(tbio, fbio); - d = r10_bio->devs[i].devnum; atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); @@ -2541,12 +2554,14 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) bio_put(bio); r10_bio->devs[slot].bio = NULL; - if (mddev->ro == 0) { + if (mddev->ro) + r10_bio->devs[slot].bio = IO_BLOCKED; + else if (!test_bit(FailFast, &rdev->flags)) { freeze_array(conf, 1); fix_read_error(conf, mddev, r10_bio); unfreeze_array(conf); } else - r10_bio->devs[slot].bio = IO_BLOCKED; + md_error(mddev, rdev); rdev_dec_pending(rdev, mddev); @@ -2575,6 +2590,9 @@ read_more: + choose_data_offset(r10_bio, rdev); bio->bi_bdev = rdev->bdev; bio_set_op_attrs(bio, REQ_OP_READ, do_sync); + if (test_bit(FailFast, &rdev->flags) && + test_bit(R10BIO_FailFast, &r10_bio->state)) + bio->bi_opf |= MD_FAILFAST; bio->bi_private = r10_bio; bio->bi_end_io = raid10_end_read_request; trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), @@ -3096,6 +3114,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio_set_op_attrs(bio, REQ_OP_READ, 0); + if (test_bit(FailFast, &rdev->flags)) + bio->bi_opf |= MD_FAILFAST; from_addr = r10_bio->devs[j].addr; bio->bi_iter.bi_sector = from_addr + rdev->data_offset; @@ -3201,6 +3221,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, rdev_dec_pending(mrdev, mddev); if (mreplace) rdev_dec_pending(mreplace, mddev); + if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { + /* Only want this if there is elsewhere to + * read from. 'j' is currently the first + * readable copy. + */ + int targets = 1; + for (; j < conf->copies; j++) { + int d = r10_bio->devs[j].devnum; + if (conf->mirrors[d].rdev && + test_bit(In_sync, + &conf->mirrors[d].rdev->flags)) + targets++; + } + if (targets == 1) + r10_bio->devs[0].bio->bi_opf + &= ~MD_FAILFAST; + } } if (biolist == NULL) { while (r10_bio) { @@ -3279,6 +3316,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_read; bio_set_op_attrs(bio, REQ_OP_READ, 0); + if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) + bio->bi_opf |= MD_FAILFAST; bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; count++; diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 18ec1f7..3162615 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -156,5 +156,7 @@ enum r10bio_state { * flag is set */ R10BIO_Previous, +/* failfast devices did receive failfast requests. */ + R10BIO_FailFast, }; #endif -- cgit v1.1 From 1919cbb23bf1b3e0fdb7b6edfb7369f920744087 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:12 +1100 Subject: md/raid10: add failfast handling for writes. When writing to a fastfail device, we use MD_FASTFAIL unless it is the only device being written to. For resync/recovery, assume there was a working device to read from so always use MD_FASTFAIL. If a write for resync/recovery fails, we just fail the device - there is not much else to do. If a normal write fails, but the device cannot be marked Faulty (must be only one left), we queue for write error handling which calls narrow_write_error() to write the block synchronously without any failfast flags. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid10.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7cdc9bc..525ca99 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -100,6 +100,7 @@ static int max_queued_requests = 1024; static void allow_barrier(struct r10conf *conf); static void lower_barrier(struct r10conf *conf); static int _enough(struct r10conf *conf, int previous, int ignore); +static int enough(struct r10conf *conf, int ignore); static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped); static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); @@ -450,6 +451,7 @@ static void raid10_end_write_request(struct bio *bio) struct r10conf *conf = r10_bio->mddev->private; int slot, repl; struct md_rdev *rdev = NULL; + struct bio *to_put = NULL; bool discard_error; discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; @@ -477,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio) if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); - set_bit(R10BIO_WriteError, &r10_bio->state); + dec_rdev = 0; + if (test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST)) { + md_error(rdev->mddev, rdev); + if (!test_bit(Faulty, &rdev->flags)) + /* This is the only remaining device, + * We need to retry the write without + * FailFast + */ + set_bit(R10BIO_WriteError, &r10_bio->state); + else { + r10_bio->devs[slot].bio = NULL; + to_put = bio; + dec_rdev = 1; + } + } else + set_bit(R10BIO_WriteError, &r10_bio->state); } } else { /* @@ -528,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio) one_write_done(r10_bio); if (dec_rdev) rdev_dec_pending(rdev, conf->mddev); + if (to_put) + bio_put(to_put); } /* @@ -1390,6 +1410,9 @@ retry_write: mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); + if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) && + enough(conf, d)) + mbio->bi_opf |= MD_FAILFAST; mbio->bi_private = r10_bio; if (conf->mddev->gendisk) @@ -2052,6 +2075,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); + if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) + tbio->bi_opf |= MD_FAILFAST; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_bdev = conf->mirrors[d].rdev->bdev; generic_make_request(tbio); @@ -3341,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, bio->bi_private = r10_bio; bio->bi_end_io = end_sync_write; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) + bio->bi_opf |= MD_FAILFAST; bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; count++; -- cgit v1.1 From ce1ccd079fac0336191c0fd516ebf0e4985d59d4 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 21 Nov 2016 10:29:18 -0800 Subject: raid5-cache: suspend reclaim thread instead of shutdown There is mechanism to suspend a kernel thread. Use it instead of playing create/destroy game. Signed-off-by: Shaohua Li Reviewed-by: NeilBrown Cc: Song Liu --- drivers/md/md.c | 4 +++- drivers/md/raid5-cache.c | 18 +++++------------- 2 files changed, 8 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 62f3fb9..297757a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7187,10 +7187,12 @@ static int md_thread(void *arg) wait_event_interruptible_timeout (thread->wqueue, test_bit(THREAD_WAKEUP, &thread->flags) - || kthread_should_stop(), + || kthread_should_stop() || kthread_should_park(), thread->timeout); clear_bit(THREAD_WAKEUP, &thread->flags); + if (kthread_should_park()) + kthread_parkme(); if (!kthread_should_stop()) thread->run(thread); } diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 8cb79fc..5f817bd 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "md.h" #include "raid5.h" #include "bitmap.h" @@ -1437,23 +1438,14 @@ void r5l_quiesce(struct r5l_log *log, int state) struct mddev *mddev; if (!log || state == 2) return; - if (state == 0) { - /* - * This is a special case for hotadd. In suspend, the array has - * no journal. In resume, journal is initialized as well as the - * reclaim thread. - */ - if (log->reclaim_thread) - return; - log->reclaim_thread = md_register_thread(r5l_reclaim_thread, - log->rdev->mddev, "reclaim"); - log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; - } else if (state == 1) { + if (state == 0) + kthread_unpark(log->reclaim_thread->tsk); + else if (state == 1) { /* make sure r5l_write_super_and_discard_space exits */ mddev = log->rdev->mddev; wake_up(&mddev->sb_wait); + kthread_park(log->reclaim_thread->tsk); r5l_wake_reclaim(log, MaxSector); - md_unregister_thread(&log->reclaim_thread); r5l_do_reclaim(log); } } -- cgit v1.1 From 034e33f5eda3c61edb838471f69ec42d64e1e94e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 21 Nov 2016 10:29:19 -0800 Subject: md: stop write should stop journal reclaim __md_stop_writes currently doesn't stop raid5-cache reclaim thread. It's possible the reclaim thread is still running and doing write, which doesn't match what __md_stop_writes should do. The extra ->quiesce() call should not harm any raid types. For raid5-cache, this will guarantee we reclaim all caches before we update superblock. Signed-off-by: Shaohua Li Reviewed-by: NeilBrown Cc: Song Liu --- drivers/md/md.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 297757a..c7894fb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5517,6 +5517,10 @@ static void __md_stop_writes(struct mddev *mddev) del_timer_sync(&mddev->safemode_timer); + if (mddev->pers && mddev->pers->quiesce) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } bitmap_flush(mddev); if (mddev->ro == 0 && -- cgit v1.1 From d7bd398e97f236a2353689eca5e8950f67cd34d5 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 23 Nov 2016 22:50:39 -0800 Subject: md/r5cache: handle alloc_page failure RMW of r5c write back cache uses an extra page to store old data for prexor. handle_stripe_dirtying() allocates this page by calling alloc_page(). However, alloc_page() may fail. To handle alloc_page() failures, this patch adds an extra page to disk_info. When alloc_page fails, handle_stripe() trys to use these pages. When these pages are used by other stripe (R5C_EXTRA_PAGE_IN_USE), the stripe is added to delayed_list. Signed-off-by: Song Liu Reviewed-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 27 ++++++++++++++++- drivers/md/raid5.c | 78 ++++++++++++++++++++++++++++++++++++++++-------- drivers/md/raid5.h | 6 ++++ 3 files changed, 98 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 5f817bd..5d3d238 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2326,15 +2326,40 @@ int r5c_try_caching_write(struct r5conf *conf, */ void r5c_release_extra_page(struct stripe_head *sh) { + struct r5conf *conf = sh->raid_conf; int i; + bool using_disk_info_extra_page; + + using_disk_info_extra_page = + sh->dev[0].orig_page == conf->disks[0].extra_page; for (i = sh->disks; i--; ) if (sh->dev[i].page != sh->dev[i].orig_page) { struct page *p = sh->dev[i].orig_page; sh->dev[i].orig_page = sh->dev[i].page; - put_page(p); + if (!using_disk_info_extra_page) + put_page(p); } + + if (using_disk_info_extra_page) { + clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state); + md_wakeup_thread(conf->mddev->thread); + } +} + +void r5c_use_extra_page(struct stripe_head *sh) +{ + struct r5conf *conf = sh->raid_conf; + int i; + struct r5dev *dev; + + for (i = sh->disks; i--; ) { + dev = &sh->dev[i]; + if (dev->orig_page != dev->page) + put_page(dev->orig_page); + dev->orig_page = conf->disks[i].extra_page; + } } /* diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index dbab8c7..db909b9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -876,6 +876,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { /* writing out phase */ + if (s->waiting_extra_page) + return; if (r5l_write_stripe(conf->log, sh) == 0) return; } else { /* caching phase */ @@ -2007,6 +2009,7 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, INIT_LIST_HEAD(&sh->batch_list); INIT_LIST_HEAD(&sh->lru); INIT_LIST_HEAD(&sh->r5c); + INIT_LIST_HEAD(&sh->log_list); atomic_set(&sh->count, 1); sh->log_start = MaxSector; for (i = 0; i < disks; i++) { @@ -2253,10 +2256,24 @@ static int resize_stripes(struct r5conf *conf, int newsize) */ ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); if (ndisks) { - for (i=0; iraid_disks; i++) + for (i = 0; i < conf->pool_size; i++) ndisks[i] = conf->disks[i]; - kfree(conf->disks); - conf->disks = ndisks; + + for (i = conf->pool_size; i < newsize; i++) { + ndisks[i].extra_page = alloc_page(GFP_NOIO); + if (!ndisks[i].extra_page) + err = -ENOMEM; + } + + if (err) { + for (i = conf->pool_size; i < newsize; i++) + if (ndisks[i].extra_page) + put_page(ndisks[i].extra_page); + kfree(ndisks); + } else { + kfree(conf->disks); + conf->disks = ndisks; + } } else err = -ENOMEM; @@ -3580,10 +3597,10 @@ unhash: break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); } -static void handle_stripe_dirtying(struct r5conf *conf, - struct stripe_head *sh, - struct stripe_head_state *s, - int disks) +static int handle_stripe_dirtying(struct r5conf *conf, + struct stripe_head *sh, + struct stripe_head_state *s, + int disks) { int rmw = 0, rcw = 0, i; sector_t recovery_cp = conf->mddev->recovery_cp; @@ -3649,12 +3666,32 @@ static void handle_stripe_dirtying(struct r5conf *conf, dev->page == dev->orig_page && !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { /* alloc page for prexor */ - dev->orig_page = alloc_page(GFP_NOIO); + struct page *p = alloc_page(GFP_NOIO); + + if (p) { + dev->orig_page = p; + continue; + } - /* will handle failure in a later patch*/ - BUG_ON(!dev->orig_page); + /* + * alloc_page() failed, try use + * disk_info->extra_page + */ + if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE, + &conf->cache_state)) { + r5c_use_extra_page(sh); + break; + } + + /* extra_page in use, add to delayed_list */ + set_bit(STRIPE_DELAYED, &sh->state); + s->waiting_extra_page = 1; + return -EAGAIN; } + } + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx || test_bit(R5_InJournal, &dev->flags)) && @@ -3730,6 +3767,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, (s->locked == 0 && (rcw == 0 || rmw == 0) && !test_bit(STRIPE_BIT_DELAY, &sh->state))) schedule_reconstruction(sh, s, rcw == 0, 0); + return 0; } static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, @@ -4545,8 +4583,12 @@ static void handle_stripe(struct stripe_head *sh) if (ret == -EAGAIN || /* stripe under reclaim: !caching && injournal */ (!test_bit(STRIPE_R5C_CACHING, &sh->state) && - s.injournal > 0)) - handle_stripe_dirtying(conf, sh, &s, disks); + s.injournal > 0)) { + ret = handle_stripe_dirtying(conf, sh, &s, + disks); + if (ret == -EAGAIN) + goto finish; + } } } @@ -6458,6 +6500,8 @@ static void raid5_free_percpu(struct r5conf *conf) static void free_conf(struct r5conf *conf) { + int i; + if (conf->log) r5l_exit_log(conf->log); if (conf->shrinker.nr_deferred) @@ -6466,6 +6510,9 @@ static void free_conf(struct r5conf *conf) free_thread_groups(conf); shrink_stripes(conf); raid5_free_percpu(conf); + for (i = 0; i < conf->pool_size; i++) + if (conf->disks[i].extra_page) + put_page(conf->disks[i].extra_page); kfree(conf->disks); kfree(conf->stripe_hashtbl); kfree(conf); @@ -6612,9 +6659,16 @@ static struct r5conf *setup_conf(struct mddev *mddev) conf->disks = kzalloc(max_disks * sizeof(struct disk_info), GFP_KERNEL); + if (!conf->disks) goto abort; + for (i = 0; i < max_disks; i++) { + conf->disks[i].extra_page = alloc_page(GFP_KERNEL); + if (!conf->disks[i].extra_page) + goto abort; + } + conf->mddev = mddev; if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index d13fe45..ed8e136 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -276,6 +276,7 @@ struct stripe_head_state { struct md_rdev *blocked_rdev; int handle_bad_blocks; int log_failed; + int waiting_extra_page; }; /* Flags for struct r5dev.flags */ @@ -439,6 +440,7 @@ enum { struct disk_info { struct md_rdev *rdev, *replacement; + struct page *extra_page; /* extra page to use in prexor */ }; /* @@ -559,6 +561,9 @@ enum r5_cache_state { * only process stripes that are already * occupying the log */ + R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page + * for prexor + */ }; struct r5conf { @@ -765,6 +770,7 @@ extern void r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s); extern void r5c_release_extra_page(struct stripe_head *sh); +extern void r5c_use_extra_page(struct stripe_head *sh); extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space); extern void r5c_handle_cached_data_endio(struct r5conf *conf, struct stripe_head *sh, int disks, struct bio_list *return_bi); -- cgit v1.1 From d3014e21e18bfaf5b22144a45c399c8eb21aaba9 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 24 Nov 2016 14:13:04 +0300 Subject: md/r5cache: enable IRQs on error path We need to re-enable the IRQs here before returning. Fixes: a39f7afde358 ("md/r5cache: write-out phase and reclaim support") Signed-off-by: Dan Carpenter Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 5d3d238..874749d 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1029,7 +1029,7 @@ static sector_t r5c_calculate_new_cp(struct r5conf *conf) spin_lock_irqsave(&log->stripe_in_journal_lock, flags); if (list_empty(&conf->log->stripe_in_journal_list)) { /* all stripes flushed */ - spin_unlock(&log->stripe_in_journal_lock); + spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); return log->next_checkpoint; } sh = list_first_entry(&conf->log->stripe_in_journal_list, -- cgit v1.1 From f7b7bee75e06cbdce864f7b313ac05555e7eff6b Mon Sep 17 00:00:00 2001 From: Zhengyuan Liu Date: Sat, 26 Nov 2016 10:57:13 +0800 Subject: raid5-cache: add another check conditon before replaying one stripe New stripe that was just allocated has no STRIPE_R5C_CACHING state too, add this check condition could avoid unnecessary replaying for empty stripe. r5l_recovery_replay_one_stripe would reset stripe for any case, delete it to make code more clean. Signed-off-by: Zhengyuan Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 874749d..1ff3859b 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1887,9 +1887,9 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, } if (payload->header.type == R5LOG_PAYLOAD_DATA) { - if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { + if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && + test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { r5l_recovery_replay_one_stripe(conf, sh, ctx); - r5l_recovery_reset_stripe(sh); sh->log_start = ctx->pos; list_move_tail(&sh->lru, cached_stripe_list); } -- cgit v1.1 From 462eb7d87297dae5837f3445b68b79e835ab0d6c Mon Sep 17 00:00:00 2001 From: Zhengyuan Liu Date: Sat, 26 Nov 2016 10:57:14 +0800 Subject: raid5-cache: don't set STRIPE_R5C_PARTIAL_STRIPE flag while load stripe into cache r5c_recovery_load_one_stripe should not set STRIPE_R5C_PARTIAL_STRIPE flag,as the data-only stripe may be STRIPE_R5C_FULL_STRIPE stripe. The state machine would release the stripe later and add it into neither r5c_cached_full_stripes list or r5c_cached_partial_stripes list and set correct flag. Reviewed-by: JackieLiu Signed-off-by: Zhengyuan Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 1ff3859b..b34b26d 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1919,7 +1919,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, static void r5c_recovery_load_one_stripe(struct r5l_log *log, struct stripe_head *sh) { - struct r5conf *conf = sh->raid_conf; struct r5dev *dev; int i; @@ -1930,9 +1929,8 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log, set_bit(R5_UPTODATE, &dev->flags); } } - set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); - atomic_inc(&conf->r5c_cached_partial_stripes); list_add_tail(&sh->r5c, &log->stripe_in_journal_list); + atomic_inc(&log->stripe_in_journal_count); } /* -- cgit v1.1 From 9b69173e5c6000b2c6fafc5085dcd7b173f073c8 Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Mon, 28 Nov 2016 16:19:18 +0800 Subject: md/raid5-cache: remove unnecessary function parameters The function parameter 'recovery_list' is not used in body, we can delete it Signed-off-by: JackieLiu Reviewed-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index b34b26d..1842ecb 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -67,7 +67,7 @@ static char *r5c_journal_mode_str[] = {"write-through", /* * raid5 cache state machine * - * With rhe RAID cache, each stripe works in two phases: + * With the RAID cache, each stripe works in two phases: * - caching phase * - writing-out phase * @@ -1674,7 +1674,6 @@ out: static struct stripe_head * r5c_recovery_alloc_stripe(struct r5conf *conf, - struct list_head *recovery_list, sector_t stripe_sect, sector_t log_start) { @@ -1855,8 +1854,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, cached_stripe_list, - stripe_sect, ctx->pos); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -1865,8 +1863,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, cached_stripe_list, - stripe_sect, ctx->pos); + conf, stripe_sect, ctx->pos); } if (!sh) { pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", @@ -1875,8 +1872,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, raid5_set_cache_size(mddev, conf->min_nr_stripes * 2); sh = r5c_recovery_alloc_stripe( - conf, cached_stripe_list, stripe_sect, - ctx->pos); + conf, stripe_sect, ctx->pos); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", -- cgit v1.1 From fc833c2a2f4129c42efdaed64b9eb6e9ae5fdcee Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Mon, 28 Nov 2016 16:19:19 +0800 Subject: md/raid5-cache: use ring add to prevent overflow 'write_pos' must be protected with 'r5l_ring_add', or it may overflow Signed-off-by: JackieLiu Reviewed-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 1842ecb..5ef20c5 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2084,7 +2084,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, ctx->pos, ctx->seq); mb = page_address(page); offset = le32_to_cpu(mb->meta_size); - write_pos = ctx->pos + BLOCK_SECTORS; + write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; -- cgit v1.1 From bc8f167f9c4656b7a972936237e9c38e6ab80c67 Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Mon, 28 Nov 2016 16:19:20 +0800 Subject: md/raid5-cache: release the stripe_head at the appropriate location If we released the 'stripe_head' in r5c_recovery_flush_log, ctx->cached_list will both release the data-parity stripes and data-only stripes, which will become empty. And we also need to use the data-only stripes in r5c_recovery_rewrite_data_only_stripes, so we should wait util rewrite data-only stripes is done before releasing them. Reviewed-by: Zhengyuan Liu Reviewed-by: Song Liu Signed-off-by: JackieLiu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 5ef20c5..7b9149e 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1948,7 +1948,7 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log, static int r5c_recovery_flush_log(struct r5l_log *log, struct r5l_recovery_ctx *ctx) { - struct stripe_head *sh, *next; + struct stripe_head *sh; int ret = 0; /* scan through the log */ @@ -1977,11 +1977,9 @@ static int r5c_recovery_flush_log(struct r5l_log *log, r5c_recovery_replay_stripes(&ctx->cached_list, ctx); /* load data-only stripes to stripe cache */ - list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { + list_for_each_entry(sh, &ctx->cached_list, lru) { WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); r5c_recovery_load_one_stripe(log, sh); - list_del_init(&sh->lru); - raid5_release_stripe(sh); ctx->data_only_stripes++; } @@ -2061,7 +2059,7 @@ static int r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, struct r5l_recovery_ctx *ctx) { - struct stripe_head *sh; + struct stripe_head *sh, *next; struct mddev *mddev = log->rdev->mddev; struct page *page; @@ -2073,7 +2071,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, } ctx->seq += 10; - list_for_each_entry(sh, &ctx->cached_list, lru) { + list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { struct r5l_meta_block *mb; int i; int offset; @@ -2119,6 +2117,9 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, sh->log_start = ctx->pos; ctx->pos = write_pos; ctx->seq += 1; + + list_del_init(&sh->lru); + raid5_release_stripe(sh); } __free_page(page); return 0; -- cgit v1.1 From dbd22c8d7fc6276a48627e57a5605cf9565de78a Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Tue, 29 Nov 2016 11:13:20 +0800 Subject: md/raid5-cache: remove the unnecessary next_cp_seq field from the r5l_log The next_cp_seq field is useless, remove it. Signed-off-by: JackieLiu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 7b9149e..a46f547 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -113,7 +113,6 @@ struct r5l_log { u64 seq; /* log head sequence */ sector_t next_checkpoint; - u64 next_cp_seq; struct mutex io_mutex; struct r5l_io_unit *current_io; /* current io_unit accepting new data */ @@ -1075,7 +1074,6 @@ static bool r5l_complete_finished_ios(struct r5l_log *log) break; log->next_checkpoint = io->log_start; - log->next_cp_seq = io->seq; list_del(&io->log_sibling); mempool_free(io, log->io_pool); -- cgit v1.1 From 1a0ec5c30c37d29e4435a45e75c896f91af970bd Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Tue, 29 Nov 2016 11:57:30 +0800 Subject: md/raid5-cache: do not need to set STRIPE_PREREAD_ACTIVE repeatedly R5c_make_stripe_write_out has set this flag, do not need to set again. Signed-off-by: JackieLiu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a46f547..e786d4e 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1247,8 +1247,6 @@ static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) atomic_inc(&conf->active_stripes); r5c_make_stripe_write_out(sh); - if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - atomic_inc(&conf->preread_active_stripes); raid5_release_stripe(sh); } -- cgit v1.1 From e8d7c33232e5fdfa761c3416539bc5b4acd12db5 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Sun, 27 Nov 2016 19:32:32 +0300 Subject: md/raid5: limit request size according to implementation limits Current implementation employ 16bit counter of active stripes in lower bits of bio->bi_phys_segments. If request is big enough to overflow this counter bio will be completed and freed too early. Fortunately this not happens in default configuration because several other limits prevent that: stripe_cache_size * nr_disks effectively limits count of active stripes. And small max_sectors_kb at lower disks prevent that during normal read/write operations. Overflow easily happens in discard if it's enabled by module parameter "devices_handle_discard_safely" and stripe_cache_size is set big enough. This patch limits requests size with 256Mb - 8Kb to prevent overflows. Signed-off-by: Konstantin Khlebnikov Cc: Shaohua Li Cc: Neil Brown Cc: stable@vger.kernel.org Signed-off-by: Shaohua Li --- drivers/md/raid5.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index db909b9..6bf3c26 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7108,6 +7108,15 @@ static int raid5_run(struct mddev *mddev) stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; + + /* + * We use 16-bit counter of active stripes in bi_phys_segments + * (minus one for over-loaded initialization) + */ + blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); + blk_queue_max_discard_sectors(mddev->queue, + 0xfffe * STRIPE_SECTORS); + /* * unaligned part of discard request will be ignored, so can't * guarantee discard_zeroes_data -- cgit v1.1 From f687a33ef02d3e0f13c4fa9e3b2e90f656bbfb26 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 30 Nov 2016 16:57:54 -0800 Subject: md/r5cache: run_no_space_stripes() when R5C_LOG_CRITICAL == 0 With writeback cache, we define log space critical as free_space < 2 * reclaim_required_space So the deassert of R5C_LOG_CRITICAL could happen when 1. free_space increases 2. reclaim_required_space decreases Currently, run_no_space_stripes() is called when 1 happens, but not (always) when 2 happens. With this patch, run_no_space_stripes() is call when R5C_LOG_CRITICAL is cleared. Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index e786d4e..c36f86b 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -370,6 +370,7 @@ static inline void r5c_update_log_state(struct r5l_log *log) struct r5conf *conf = log->rdev->mddev->private; sector_t free_space; sector_t reclaim_space; + bool wake_reclaim = false; if (!r5c_is_writeback(log)) return; @@ -379,12 +380,18 @@ static inline void r5c_update_log_state(struct r5l_log *log) reclaim_space = r5c_log_required_to_flush_cache(conf); if (free_space < 2 * reclaim_space) set_bit(R5C_LOG_CRITICAL, &conf->cache_state); - else + else { + if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) + wake_reclaim = true; clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); + } if (free_space < 3 * reclaim_space) set_bit(R5C_LOG_TIGHT, &conf->cache_state); else clear_bit(R5C_LOG_TIGHT, &conf->cache_state); + + if (wake_reclaim) + r5l_wake_reclaim(log, 0); } /* @@ -1345,6 +1352,10 @@ static void r5c_do_reclaim(struct r5conf *conf) spin_unlock(&conf->device_lock); spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); } + + if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) + r5l_run_no_space_stripes(log); + md_wakeup_thread(conf->mddev->thread); } @@ -2401,6 +2412,7 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, spin_unlock_irq(&conf->log->stripe_in_journal_lock); sh->log_start = MaxSector; atomic_dec(&conf->log->stripe_in_journal_count); + r5c_update_log_state(conf->log); } int -- cgit v1.1 From 43b9674832cc41ad0ad7b7e2ec397e47dcd5f6c3 Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Mon, 5 Dec 2016 11:58:53 +0800 Subject: md/raid5-cache: adjust the write position of the empty block if no data blocks When recovery is complete, we write an empty block and record his position first, then make the data-only stripes rewritten done, the location of the empty block as the last checkpoint position to write into the super block. And we should update last_checkpoint to this empty block position. ------------------------------------------------------------------ | old log | empty block | data only stripes | invalid log | ------------------------------------------------------------------ ^ ^ ^ | |- log->last_checkpoint |- log->log_start | |- log->last_cp_seq |- log->next_checkpoint |- log->seq=n |- log->seq=10+n At the same time, if there is no data-only stripes, this scene may appear, | meta1 | meta2 | meta3 | meta 1 is valid, meta 2 is invalid. meta 3 could be valid. so we should The solution is we create a new meta in meta2 with its seq == meta1's seq + 10 and let superblock points to meta2. Signed-off-by: JackieLiu Reviewed-by: Zhengyuan Liu Reviewed-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index c36f86b..5efb876 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2077,7 +2077,6 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, return -ENOMEM; } - ctx->seq += 10; list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { struct r5l_meta_block *mb; int i; @@ -2137,6 +2136,8 @@ static int r5l_recovery_log(struct r5l_log *log) struct mddev *mddev = log->rdev->mddev; struct r5l_recovery_ctx ctx; int ret; + sector_t pos; + struct stripe_head *sh; ctx.pos = log->last_checkpoint; ctx.seq = log->last_cp_seq; @@ -2154,6 +2155,18 @@ static int r5l_recovery_log(struct r5l_log *log) if (ret) return ret; + pos = ctx.pos; + ctx.seq += 10; + + if (ctx.data_only_stripes == 0) { + log->next_checkpoint = ctx.pos; + r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); + ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); + } else { + sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru); + log->next_checkpoint = sh->log_start; + } + if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) pr_debug("md/raid:%s: starting from clean shutdown\n", mdname(mddev)); @@ -2171,10 +2184,9 @@ static int r5l_recovery_log(struct r5l_log *log) } log->log_start = ctx.pos; - log->next_checkpoint = ctx.pos; log->seq = ctx.seq; - r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq); - r5l_write_super(log, ctx.pos); + log->last_checkpoint = pos; + r5l_write_super(log, pos); return 0; } -- cgit v1.1 From 3d7e7e1d9db048bc6c1503bca9c82193450fc476 Mon Sep 17 00:00:00 2001 From: Zhengyuan Liu Date: Sun, 4 Dec 2016 16:49:44 +0800 Subject: md/r5cache: do r5c_update_log_state after log recovery We should update log state after we did a log recovery, current completion may get wrong log state since log->log_start wasn't initalized until we called r5l_recovery_log. At log recovery stage, no lock needed as there is no race conditon. next_checkpoint field will be initialized in r5l_recovery_log too. Signed-off-by: Zhengyuan Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 5efb876..c3b3124 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2542,14 +2542,12 @@ create: if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) log->max_free_space = RECLAIM_MAX_FREE_SPACE; log->last_checkpoint = cp; - log->next_checkpoint = cp; - mutex_lock(&log->io_mutex); - r5c_update_log_state(log); - mutex_unlock(&log->io_mutex); __free_page(page); - return r5l_recovery_log(log); + ret = r5l_recovery_log(log); + r5c_update_log_state(log); + return ret; ioerr: __free_page(page); return ret; -- cgit v1.1 From e2342ca832726a840ca6bd196dd2cc073815b08a Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 5 Dec 2016 16:40:50 +1100 Subject: md: fix refcount problem on mddev when stopping array. md_open() gets a counted reference on an mddev using mddev_find(). If it ends up returning an error, it must drop this reference. There are two error paths where the reference is not dropped. One only happens if the process is signalled and an awkward time, which is quite unlikely. The other was introduced recently in commit af8d8e6f0. Change the code to ensure the drop the reference when returning an error, and make it harded to re-introduce this sort of bug in the future. Reported-by: Marc Smith Fixes: af8d8e6f0315 ("md: changes for MD_STILL_CLOSED flag") Signed-off-by: NeilBrown Acked-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/md.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index c7894fb..84dc8913 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7112,7 +7112,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) if (test_bit(MD_CLOSING, &mddev->flags)) { mutex_unlock(&mddev->open_mutex); - return -ENODEV; + err = -ENODEV; + goto out; } err = 0; @@ -7121,6 +7122,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) check_disk_change(bdev); out: + if (err) + mddev_put(mddev); return err; } -- cgit v1.1 From d30dfeb9be25c67c9cfdfd932db57a571fd347b4 Mon Sep 17 00:00:00 2001 From: JackieLiu Date: Thu, 8 Dec 2016 08:47:39 +0800 Subject: md/raid5-cache: no recovery is required when create super-block When create the super-block information, We do not need to do this recovery stage, only need to initialize some variables. Signed-off-by: JackieLiu Reviewed-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index c3b3124..7c732c5 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2492,7 +2492,7 @@ static int r5l_load_log(struct r5l_log *log) sector_t cp = log->rdev->journal_tail; u32 stored_crc, expected_crc; bool create_super = false; - int ret; + int ret = 0; /* Make sure it's valid */ if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) @@ -2545,7 +2545,13 @@ create: __free_page(page); - ret = r5l_recovery_log(log); + if (create_super) { + log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); + log->seq = log->last_cp_seq + 1; + log->next_checkpoint = cp; + } else + ret = r5l_recovery_log(log); + r5c_update_log_state(log); return ret; ioerr: -- cgit v1.1 From 5c88f403a5d2bd75911c6faaacc9bea97ac7d121 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 7 Dec 2016 09:42:05 -0800 Subject: md/raid5-cache: fix crc in rewrite_data_only_stripes() r5l_recovery_create_empty_meta_block() creates crc for the empty metablock. After the metablock is updated, we need clear the checksum before recalculate it. Shaohua: moved checksum calculation out of r5l_recovery_create_empty_meta_block. We should calculate it after all fields are updated. Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 7c732c5..aa990bd 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1522,7 +1522,6 @@ r5l_recovery_create_empty_meta_block(struct r5l_log *log, sector_t pos, u64 seq) { struct r5l_meta_block *mb; - u32 crc; mb = page_address(page); clear_page(mb); @@ -1531,19 +1530,21 @@ r5l_recovery_create_empty_meta_block(struct r5l_log *log, mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); mb->seq = cpu_to_le64(seq); mb->position = cpu_to_le64(pos); - crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); - mb->checksum = cpu_to_le32(crc); } static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, u64 seq) { struct page *page; + struct r5l_meta_block *mb; page = alloc_page(GFP_KERNEL); if (!page) return -ENOMEM; r5l_recovery_create_empty_meta_block(log, page, pos, seq); + mb = page_address(page); + mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, + mb, PAGE_SIZE)); if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, WRITE_FUA, false)) { __free_page(page); @@ -2117,7 +2118,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, } } mb->meta_size = cpu_to_le32(offset); - mb->checksum = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); + mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, + mb, PAGE_SIZE)); sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_WRITE, WRITE_FUA, false); sh->log_start = ctx->pos; -- cgit v1.1 From 3c6edc66085e1d895a698c572bbfaf4d57fdb771 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 7 Dec 2016 09:42:06 -0800 Subject: md/r5cache: after recovery, increase journal seq by 10000 Currently, we increase journal entry seq by 10 after recovery. However, this is not sufficient in the following case. After crash the journal looks like | seq+0 | +1 | +2 | +3 | +4 | +5 | +6 | +7 | ... | +11 | +12 | If +1 is not valid, we dropped all entries from +1 to +12; and write seq+10: | seq+0 | +10 | +2 | +3 | +4 | +5 | +6 | +7 | ... | +11 | +12 | However, if we write a big journal entry with seq+11, it will connect with some stale journal entry: | seq+0 | +10 | +11 | +12 | To reduce the risk of this issue, we increase seq by 10000 instead. Shaohua: use 10000 instead of 1000. The risk should be very unlikely. The total stripe cache size is less than 2k typically, and several stripes can fit into one meta data block. So the total inflight meta data blocks would be quite small, which means the the total sequence number used should be quite small. The 10000 sequence number increase should be far more than safe. Signed-off-by: Song Liu Signed-off-by: Shaohua Li --- drivers/md/raid5-cache.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index aa990bd..de8a4ed 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -2004,8 +2004,8 @@ static int r5c_recovery_flush_log(struct r5l_log *log, * happens again, new recovery will start from meta 1. Since meta 2n is * valid now, recovery will think meta 3 is valid, which is wrong. * The solution is we create a new meta in meta2 with its seq == meta - * 1's seq + 10 and let superblock points to meta2. The same recovery will - * not think meta 3 is a valid meta, because its seq doesn't match + * 1's seq + 10000 and let superblock points to meta2. The same recovery + * will not think meta 3 is a valid meta, because its seq doesn't match */ /* @@ -2035,7 +2035,7 @@ static int r5c_recovery_flush_log(struct r5l_log *log, * --------------------------------------------- * ^ ^ * |- log->last_checkpoint |- ctx->pos+1 - * |- log->last_cp_seq |- ctx->seq+11 + * |- log->last_cp_seq |- ctx->seq+10001 * * However, it is not safe to start the state machine yet, because data only * parities are not yet secured in RAID. To save these data only parities, we @@ -2046,7 +2046,7 @@ static int r5c_recovery_flush_log(struct r5l_log *log, * ----------------------------------------------------------------- * ^ ^ * |- log->last_checkpoint |- ctx->pos+n - * |- log->last_cp_seq |- ctx->seq+10+n + * |- log->last_cp_seq |- ctx->seq+10000+n * * If failure happens again during this process, the recovery can safe start * again from log->last_checkpoint. @@ -2058,7 +2058,7 @@ static int r5c_recovery_flush_log(struct r5l_log *log, * ----------------------------------------------------------------- * ^ ^ * |- log->last_checkpoint |- ctx->pos+n - * |- log->last_cp_seq |- ctx->seq+10+n + * |- log->last_cp_seq |- ctx->seq+10000+n * * Then we can safely start the state machine. If failure happens from this * point on, the recovery will start from new log->last_checkpoint. @@ -2157,8 +2157,8 @@ static int r5l_recovery_log(struct r5l_log *log) if (ret) return ret; - pos = ctx.pos; - ctx.seq += 10; + pos = ctx.pos; + ctx.seq += 10000; if (ctx.data_only_stripes == 0) { log->next_checkpoint = ctx.pos; -- cgit v1.1 From 6995f0b247e15e34fbcd10852c08b30bdc1a78da Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Dec 2016 15:48:17 -0800 Subject: md: takeover should clear unrelated bits When we change level from raid1 to raid5, the MD_FAILFAST_SUPPORTED bit will be accidentally set, but raid5 doesn't support it. The same is true for the MD_HAS_JOURNAL bit. Fix: 46533ff (md: Use REQ_FAILFAST_* on metadata writes where appropriate) Reviewed-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/raid0.c | 5 +++++ drivers/md/raid1.c | 5 ++++- drivers/md/raid5.c | 6 +++++- 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e628f18..a162fed 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -539,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev) mddev->delta_disks = -1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; + clear_bit(MD_HAS_JOURNAL, &mddev->flags); + clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); create_strip_zones(mddev, &priv_conf); + return priv_conf; } @@ -580,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev) mddev->degraded = 0; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; + clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); create_strip_zones(mddev, &priv_conf); return priv_conf; @@ -622,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev) mddev->raid_disks = 1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; + clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); create_strip_zones(mddev, &priv_conf); return priv_conf; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 94e0afc..efc2e74 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3243,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev) mddev->new_layout = 0; mddev->new_chunk_sectors = 0; conf = setup_conf(mddev); - if (!IS_ERR(conf)) + if (!IS_ERR(conf)) { /* Array must appear to be quiesced */ conf->array_frozen = 1; + clear_bit(MD_HAS_JOURNAL, &mddev->flags); + clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); + } return conf; } return ERR_PTR(-EINVAL); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6bf3c26..3e6a2a0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7811,6 +7811,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level) static void *raid5_takeover_raid1(struct mddev *mddev) { int chunksect; + void *ret; if (mddev->raid_disks != 2 || mddev->degraded > 1) @@ -7832,7 +7833,10 @@ static void *raid5_takeover_raid1(struct mddev *mddev) mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; mddev->new_chunk_sectors = chunksect; - return setup_conf(mddev); + ret = setup_conf(mddev); + if (!IS_ERR_VALUE(ret)) + clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + return ret; } static void *raid5_takeover_raid6(struct mddev *mddev) -- cgit v1.1 From 82a301cb0ea2df8a5c88213094a01660067c7fb4 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Dec 2016 15:48:18 -0800 Subject: md: MD_RECOVERY_NEEDED is set for mddev->recovery Fixes: 90f5f7ad4f38("md: Wait for md_check_recovery before attempting device removal.") Reviewed-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 84dc8913..5e66648 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6856,7 +6856,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, /* need to ensure recovery thread has run */ wait_event_interruptible_timeout(mddev->sb_wait, !test_bit(MD_RECOVERY_NEEDED, - &mddev->flags), + &mddev->recovery), msecs_to_jiffies(5000)); if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { /* Need to flush page cache, and ensure no-one else opens -- cgit v1.1 From 2953079c692da067aeb6345659875b97378f9b0a Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Dec 2016 15:48:19 -0800 Subject: md: separate flags for superblock changes The mddev->flags are used for different purposes. There are a lot of places we check/change the flags without masking unrelated flags, we could check/change unrelated flags. These usage are most for superblock write, so spearate superblock related flags. This should make the code clearer and also fix real bugs. Reviewed-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 4 +- drivers/md/dm-raid.c | 4 +- drivers/md/md.c | 115 ++++++++++++++++++++++++----------------------- drivers/md/md.h | 16 ++++--- drivers/md/multipath.c | 2 +- drivers/md/raid1.c | 12 ++--- drivers/md/raid10.c | 22 ++++----- drivers/md/raid5-cache.c | 6 +-- drivers/md/raid5.c | 26 +++++------ 9 files changed, 106 insertions(+), 101 deletions(-) (limited to 'drivers') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index c462157..9fb2cca 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1623,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) atomic_read(&bitmap->mddev->recovery_active) == 0); bitmap->mddev->curr_resync_completed = sector; - set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { @@ -2296,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len) /* Ensure new bitmap info is stored in * metadata promptly. */ - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } rv = 0; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 6d53810..953159d 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2011,7 +2011,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); /* Force writing of superblocks to disk */ - set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags); /* Any superblock is better than none, choose that if given */ return refdev ? 0 : 1; @@ -3497,7 +3497,7 @@ static void rs_update_sbs(struct raid_set *rs) struct mddev *mddev = &rs->md; int ro = mddev->ro; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev->ro = 0; md_update_sb(mddev, 1); mddev->ro = ro; diff --git a/drivers/md/md.c b/drivers/md/md.c index 5e66648..c15e234 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -729,7 +729,7 @@ static void super_written(struct bio *bio) md_error(mddev, rdev); if (!test_bit(Faulty, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { - set_bit(MD_NEED_REWRITE, &mddev->flags); + set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); set_bit(LastDev, &rdev->flags); } } else @@ -780,7 +780,7 @@ int md_super_wait(struct mddev *mddev) { /* wait for all superblock writes that were scheduled to complete */ wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); - if (test_and_clear_bit(MD_NEED_REWRITE, &mddev->flags)) + if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) return -EAGAIN; return 0; } @@ -2322,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change) if (mddev->ro) { if (force_change) - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); return; } repeat: if (mddev_is_clustered(mddev)) { - if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) force_change = 1; - if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) nospares = 1; ret = md_cluster_ops->metadata_update_start(mddev); /* Has someone else has updated the sb */ if (!does_sb_need_changing(mddev)) { if (ret == 0) md_cluster_ops->metadata_update_cancel(mddev); - bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING), - BIT(MD_CHANGE_DEVS) | - BIT(MD_CHANGE_CLEAN)); + bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), + BIT(MD_SB_CHANGE_DEVS) | + BIT(MD_SB_CHANGE_CLEAN)); return; } } @@ -2355,10 +2355,10 @@ repeat: } if (!mddev->persistent) { - clear_bit(MD_CHANGE_CLEAN, &mddev->flags); - clear_bit(MD_CHANGE_DEVS, &mddev->flags); + clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); + clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->external) { - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); rdev_for_each(rdev, mddev) { if (rdev->badblocks.changed) { rdev->badblocks.changed = 0; @@ -2378,9 +2378,9 @@ repeat: mddev->utime = ktime_get_real_seconds(); - if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) force_change = 1; - if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) /* just a clean<-> dirty transition, possibly leave spares alone, * though if events isn't the right even/odd, we will have to do * spares after all @@ -2472,14 +2472,14 @@ rewrite: } if (md_super_wait(mddev) < 0) goto rewrite; - /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ + /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ if (mddev_is_clustered(mddev) && ret == 0) md_cluster_ops->metadata_update_finish(mddev); if (mddev->in_sync != sync_req || - !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING), - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN))) + !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) /* have to write it out again */ goto repeat; wake_up(&mddev->sb_wait); @@ -2523,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev) } sysfs_notify_dirent_safe(rdev->sysfs_state); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (mddev->degraded) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -2640,7 +2640,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (err == 0) { md_kick_rdev_from_array(rdev); if (mddev->pers) { - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } md_new_event(mddev); @@ -3651,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) } blk_set_stacking_limits(&mddev->queue->limits); pers->run(mddev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev_resume(mddev); if (!mddev->thread) md_update_sb(mddev, 1); @@ -3846,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len) if (!err) { mddev->recovery_cp = n; if (mddev->pers) - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } mddev_unlock(mddev); return err ?: len; @@ -3920,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page) st = read_auto; break; case 0: - if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) st = write_pending; else if (mddev->in_sync) st = clean; @@ -3958,7 +3958,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) spin_lock(&mddev->lock); if (st == active) { restart_array(mddev); - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); md_wakeup_thread(mddev->thread); wake_up(&mddev->sb_wait); err = 0; @@ -3969,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; if (mddev->safemode == 1) mddev->safemode = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } err = 0; } else @@ -4035,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; if (mddev->safemode == 1) mddev->safemode = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } err = 0; } else @@ -4049,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) err = restart_array(mddev); if (err) break; - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); wake_up(&mddev->sb_wait); err = 0; } else { @@ -5378,7 +5378,7 @@ int md_run(struct mddev *mddev) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - if (mddev->flags & MD_UPDATE_SB_FLAGS) + if (mddev->sb_flags) md_update_sb(mddev, 0); md_new_event(mddev); @@ -5473,6 +5473,7 @@ static void md_clean(struct mddev *mddev) mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; mddev->flags = 0; + mddev->sb_flags = 0; mddev->ro = 0; mddev->metadata_type[0] = 0; mddev->chunk_sectors = 0; @@ -5525,7 +5526,7 @@ static void __md_stop_writes(struct mddev *mddev) if (mddev->ro == 0 && ((!mddev->in_sync && !mddev_is_clustered(mddev)) || - (mddev->flags & MD_UPDATE_SB_FLAGS))) { + mddev->sb_flags)) { /* mark array as shutdown cleanly */ if (!mddev_is_clustered(mddev)) mddev->in_sync = 1; @@ -5608,13 +5609,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); - if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -6234,7 +6235,7 @@ kick_rdev: md_cluster_ops->remove_disk(mddev, rdev); md_kick_rdev_from_array(rdev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (mddev->thread) md_wakeup_thread(mddev->thread); else @@ -6303,7 +6304,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) rdev->raid_disk = -1; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) md_update_sb(mddev, 1); /* @@ -6460,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->max_disks = MD_SB_DISKS; - if (mddev->persistent) + if (mddev->persistent) { mddev->flags = 0; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + mddev->sb_flags = 0; + } + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); @@ -7007,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, /* If a device failed while we were read-only, we * need to make sure the metadata is updated now. */ - if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { + if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { mddev_unlock(mddev); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_DEVS, &mddev->flags) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); } } else { @@ -7766,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi) spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); - set_bit(MD_CHANGE_PENDING, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); + set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); md_wakeup_thread(mddev->thread); did_change = 1; } @@ -7776,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } EXPORT_SYMBOL(md_write_start); @@ -7797,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end); * attempting a GFP_KERNEL allocation while holding the mddev lock. * Must be called with mddev_lock held. * - * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock + * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock * is dropped, so return -EAGAIN after notifying userspace. */ int md_allow_write(struct mddev *mddev) @@ -7812,8 +7815,8 @@ int md_allow_write(struct mddev *mddev) spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); - set_bit(MD_CHANGE_PENDING, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); + set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); if (mddev->safemode_delay && mddev->safemode == 0) mddev->safemode = 1; @@ -7823,7 +7826,7 @@ int md_allow_write(struct mddev *mddev) } else spin_unlock(&mddev->lock); - if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) return -EAGAIN; else return 0; @@ -8058,7 +8061,7 @@ void md_do_sync(struct md_thread *thread) j > mddev->recovery_cp) mddev->recovery_cp = j; update_time = jiffies; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } @@ -8206,8 +8209,8 @@ void md_do_sync(struct md_thread *thread) /* set CHANGE_PENDING here since maybe another update is needed, * so other nodes are informed. It should be harmless for normal * raid */ - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); spin_lock(&mddev->lock); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -8307,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev, if (!test_bit(Journal, &rdev->flags)) spares++; md_new_event(mddev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } } no_add: if (removed) - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); return spares; } @@ -8385,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev) if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return; if ( ! ( - (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<sb_flags & ~ (1<recovery) || test_bit(MD_RECOVERY_DONE, &mddev->recovery) || test_bit(MD_RELOAD_SB, &mddev->flags) || @@ -8423,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); goto unlock; } @@ -8451,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev) mddev->recovery_cp == MaxSector) { mddev->in_sync = 1; did_change = 1; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } if (mddev->safemode == 1) mddev->safemode = 0; @@ -8460,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); } - if (mddev->flags & MD_UPDATE_SB_FLAGS) + if (mddev->sb_flags) md_update_sb(mddev, 0); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && @@ -8556,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev) if (mddev->pers->spare_active(mddev)) { sysfs_notify(&mddev->kobj, NULL, "degraded"); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && @@ -8571,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev) rdev->saved_raid_disk = -1; md_update_sb(mddev, 1); - /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can + /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by * clustered raid */ if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) @@ -8637,8 +8640,8 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sysfs_notify(&rdev->kobj, NULL, "unacknowledged_bad_blocks"); sysfs_notify_dirent_safe(rdev->sysfs_state); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); md_wakeup_thread(rdev->mddev->thread); return 1; } else diff --git a/drivers/md/md.h b/drivers/md/md.h index 5c08f84..e38936d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -213,9 +213,6 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, struct md_cluster_info; enum mddev_flags { - MD_CHANGE_DEVS, /* Some device status has changed */ - MD_CHANGE_CLEAN, /* transition to or from 'clean' */ - MD_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ MD_CLOSING, /* If set, we are closing the array, do not open * it then */ @@ -231,11 +228,15 @@ enum mddev_flags { * supported as calls to md_error() will * never cause the array to become failed. */ - MD_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ - BIT(MD_CHANGE_CLEAN) | \ - BIT(MD_CHANGE_PENDING)) /* If these are set, md_update_sb needed */ + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS, /* Some device status has changed */ + MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */ + MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ + MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ +}; + struct mddev { void *private; struct md_personality *pers; @@ -243,6 +244,7 @@ struct mddev { int md_minor; struct list_head disks; unsigned long flags; + unsigned long sb_flags; int suspended; atomic_t active_io; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 589b807..9fa2d6c 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -208,7 +208,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) spin_unlock_irqrestore(&conf->device_lock, flags); } set_bit(Faulty, &rdev->flags); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); pr_err("multipath: IO failure on %s, disabling IO path.\n" "multipath: Operation continuing on %d IO paths.\n", bdevname(rdev->bdev, b), diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index efc2e74..a1f3fbe 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1517,8 +1517,8 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * if recovery is running, make sure it aborts. */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" "md/raid1:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), @@ -2464,10 +2464,10 @@ static void raid1d(struct md_thread *thread) md_check_recovery(mddev); if (!list_empty_careful(&conf->bio_end_io_list) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { LIST_HEAD(tmp); spin_lock_irqsave(&conf->device_lock, flags); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { while (!list_empty(&conf->bio_end_io_list)) { list_move(conf->bio_end_io_list.prev, &tmp); conf->nr_queued--; @@ -2521,7 +2521,7 @@ static void raid1d(struct md_thread *thread) generic_make_request(r1_bio->bios[r1_bio->read_disk]); cond_resched(); - if (mddev->flags & ~(1<sb_flags & ~(1<flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); *skipped = 1; put_buf(r1_bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 525ca99..ab5e862 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1138,12 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio) bio->bi_iter.bi_sector < conf->reshape_progress))) { /* Need to update reshape_position in metadata */ mddev->reshape_position = conf->reshape_progress; - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); md_wakeup_thread(mddev->thread); raid10_log(conf->mddev, "wait reshape metadata"); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); conf->reshape_safe = mddev->reshape_position; } @@ -1652,8 +1652,8 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); spin_unlock_irqrestore(&conf->device_lock, flags); pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" "md/raid10:%s: Operation continuing on %d devices.\n", @@ -2761,10 +2761,10 @@ static void raid10d(struct md_thread *thread) md_check_recovery(mddev); if (!list_empty_careful(&conf->bio_end_io_list) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { LIST_HEAD(tmp); spin_lock_irqsave(&conf->device_lock, flags); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { while (!list_empty(&conf->bio_end_io_list)) { list_move(conf->bio_end_io_list.prev, &tmp); conf->nr_queued--; @@ -2822,7 +2822,7 @@ static void raid10d(struct md_thread *thread) } cond_resched(); - if (mddev->flags & ~(1<sb_flags & ~(1<device_lock); mddev->raid_disks = conf->geo.raid_disks; mddev->reshape_position = conf->reshape_progress; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); @@ -4404,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, else mddev->curr_resync_completed = conf->reshape_progress; conf->reshape_checkpoint = jiffies; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); - wait_event(mddev->sb_wait, mddev->flags == 0 || + wait_event(mddev->sb_wait, mddev->sb_flags == 0 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { allow_barrier(conf); diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index de8a4ed..6d1a150 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1206,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, * there is a deadlock. We workaround this issue with a trylock. * FIXME: we could miss discard if we can't take reconfig mutex */ - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); if (!mddev_trylock(mddev)) return; md_update_sb(mddev, 1); @@ -2197,7 +2197,7 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp) struct mddev *mddev = log->rdev->mddev; log->rdev->journal_tail = cp; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3e6a2a0..d40e94d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -961,7 +961,7 @@ again: if (bad < 0) { set_bit(BlockedBadBlocks, &rdev->flags); if (!conf->mddev->external && - conf->mddev->flags) { + conf->mddev->sb_flags) { /* It is very unlikely, but we might * still need to write out the * bad block log - better give it @@ -2547,8 +2547,8 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" "md/raid:%s: Operation continuing on %d devices.\n", mdname(mddev), @@ -4761,7 +4761,7 @@ finish: } if (!bio_list_empty(&s.return_bi)) { - if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { + if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->return_bi, &s.return_bi); spin_unlock_irq(&conf->device_lock); @@ -5617,9 +5617,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); - wait_event(mddev->sb_wait, mddev->flags == 0 || + wait_event(mddev->sb_wait, mddev->sb_flags == 0 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) return 0; @@ -5715,10 +5715,10 @@ finish: mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_DEVS, &mddev->flags) + !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) goto ret; @@ -5993,10 +5993,10 @@ static void raid5d(struct md_thread *thread) md_check_recovery(mddev); if (!bio_list_empty(&conf->return_bi) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { struct bio_list tmp = BIO_EMPTY_LIST; spin_lock_irq(&conf->device_lock); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { bio_list_merge(&tmp, &conf->return_bi); bio_list_init(&conf->return_bi); } @@ -6043,7 +6043,7 @@ static void raid5d(struct md_thread *thread) break; handled += batch_size; - if (mddev->flags & ~(1<sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); spin_lock_irq(&conf->device_lock); @@ -7640,7 +7640,7 @@ static int raid5_start_reshape(struct mddev *mddev) } mddev->raid_disks = conf->raid_disks; mddev->reshape_position = conf->reshape_progress; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); @@ -7906,7 +7906,7 @@ static int raid5_check_reshape(struct mddev *mddev) conf->chunk_sectors = new_chunk ; mddev->chunk_sectors = new_chunk; } - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } return check_reshape(mddev); -- cgit v1.1