summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 55de4f6..40b35be 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -494,7 +494,6 @@ static void raid1_end_write_request(struct bio *bio, int error)
bio_put(to_put);
}
-
/*
* This routine returns the disk from which the requested read should
* be done. There is a per-array 'next expected sequential IO' sector
@@ -901,18 +900,18 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* However if there are already pending
* requests (preventing the barrier from
* rising completely), and the
- * pre-process bio queue isn't empty,
+ * per-process bio queue isn't empty,
* then don't wait, as we need to empty
- * that queue to get the nr_pending
- * count down.
+ * that queue to allow conf->start_next_window
+ * to increase.
*/
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
- ((conf->start_next_window <
- conf->next_resync + RESYNC_SECTORS) &&
- current->bio_list &&
- !bio_list_empty(current->bio_list))),
+ ((conf->start_next_window <
+ conf->next_resync + RESYNC_SECTORS) &&
+ current->bio_list &&
+ !bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
@@ -1001,8 +1000,7 @@ static void unfreeze_array(struct r1conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
-
-/* duplicate the data pages for behind I/O
+/* duplicate the data pages for behind I/O
*/
static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
{
@@ -1471,7 +1469,6 @@ static void status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "]");
}
-
static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
@@ -1565,7 +1562,7 @@ static int raid1_spare_active(struct mddev *mddev)
unsigned long flags;
/*
- * Find all failed disks within the RAID1 configuration
+ * Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
*/
@@ -1606,7 +1603,6 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
-
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
@@ -1735,7 +1731,6 @@ abort:
return err;
}
-
static void end_sync_read(struct bio *bio, int error)
{
struct r1bio *r1_bio = bio->bi_private;
@@ -1947,7 +1942,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
return 1;
}
-static int process_checks(struct r1bio *r1_bio)
+static void process_checks(struct r1bio *r1_bio)
{
/* We have read all readable devices. If we haven't
* got the block, then there is no hope left.
@@ -2039,7 +2034,6 @@ static int process_checks(struct r1bio *r1_bio)
bio_copy_data(sbio, pbio);
}
- return 0;
}
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
@@ -2057,8 +2051,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
return;
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
- if (process_checks(r1_bio) < 0)
- return;
+ process_checks(r1_bio);
+
/*
* schedule writes
*/
@@ -2458,7 +2452,6 @@ static void raid1d(struct md_thread *thread)
blk_finish_plug(&plug);
}
-
static int init_resync(struct r1conf *conf)
{
int buffs;
@@ -2722,7 +2715,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* remove last page from this bio */
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
- bio->bi_flags &= ~(1<< BIO_SEG_VALID);
+ __clear_bit(BIO_SEG_VALID, &bio->bi_flags);
}
goto bio_full;
}
@@ -2947,9 +2940,9 @@ static int run(struct mddev *mddev)
printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
- printk(KERN_INFO
+ printk(KERN_INFO
"md/raid1:%s: active with %d out of %d mirrors\n",
- mdname(mddev), mddev->raid_disks - mddev->degraded,
+ mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
/*
OpenPOWER on IntegriCloud