diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 17 | ||||
-rw-r--r-- | drivers/md/dm.c | 19 | ||||
-rw-r--r-- | drivers/md/md.c | 4 | ||||
-rw-r--r-- | drivers/md/raid0.c | 5 | ||||
-rw-r--r-- | drivers/md/raid5.c | 123 |
6 files changed, 102 insertions, 78 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 9eeea19..5503e43 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc, switch (r) { /* async */ - case -EINPROGRESS: case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); + /* fall through*/ + case -EINPROGRESS: ctx->req = NULL; ctx->cc_sector++; continue; @@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->cc; - if (error == -EINPROGRESS) + if (error == -EINPROGRESS) { + complete(&ctx->restart); return; + } if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); @@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); if (!atomic_dec_and_test(&ctx->cc_pending)) - goto done; + return; if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt_read_done(io); else kcryptd_crypt_write_io_submit(io, 1); -done: - if (!completion_done(&ctx->restart)) - complete(&ctx->restart); } static void kcryptd_crypt(struct work_struct *work) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index c8a18e4..720ceeb 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size) goto err_unlock_md_type; } - if (dm_get_md_type(md) == DM_TYPE_NONE) + if (dm_get_md_type(md) == DM_TYPE_NONE) { /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); - else if (dm_get_md_type(md) != dm_table_get_type(t)) { + + /* setup md->queue to reflect md's type (may block) */ + r = dm_setup_md_queue(md); + if (r) { + DMWARN("unable to set up device queue for new table."); + goto err_unlock_md_type; + } + } else if (dm_get_md_type(md) != dm_table_get_type(t)) { DMWARN("can't change device type after initial table load."); r = -EINVAL; goto err_unlock_md_type; } - /* setup md->queue to reflect md's type (may block) */ - r = dm_setup_md_queue(md); - if (r) { - DMWARN("unable to set up device queue for new table."); - goto err_unlock_md_type; - } dm_unlock_md_type(md); /* stage inactive table */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f8c7ca3..a930b72 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone) +static void free_rq_clone(struct request *clone, bool must_be_mapped) { struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; + WARN_ON_ONCE(must_be_mapped && !clone->q); + blk_rq_unprep_clone(clone); - if (clone->q->mq_ops) + if (md->type == DM_TYPE_MQ_REQUEST_BASED) + /* stacked on blk-mq queue(s) */ tio->ti->type->release_clone_rq(clone); else if (!md->queue->mq_ops) /* request_fn queue stacked on request_fn queue(s) */ free_clone_request(md, clone); + /* + * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: + * no need to call free_clone_request() because we leverage blk-mq by + * allocating the clone at the end of the blk-mq pdu (see: clone_rq) + */ if (!md->queue->mq_ops) free_rq_tio(tio); @@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - free_rq_clone(clone); + free_rq_clone(clone, true); if (!rq->q->mq_ops) blk_end_request_all(rq, error); else @@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) } if (clone) - free_rq_clone(clone); + free_rq_clone(clone, false); } /* @@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) { struct request_queue *q = NULL; - if (md->queue->elevator) - return 0; - /* Fully initialize the queue */ q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); if (!q) diff --git a/drivers/md/md.c b/drivers/md/md.c index d4f31e1..593a024 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4818,12 +4818,12 @@ static void md_free(struct kobject *ko) if (mddev->sysfs_state) sysfs_put(mddev->sysfs_state); + if (mddev->queue) + blk_cleanup_queue(mddev->queue); if (mddev->gendisk) { del_gendisk(mddev->gendisk); put_disk(mddev->gendisk); } - if (mddev->queue) - blk_cleanup_queue(mddev->queue); kfree(mddev); } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 2cb59a6..6a68ef5 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -188,8 +188,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } dev[j] = rdev1; - disk_stack_limits(mddev->gendisk, rdev1->bdev, - rdev1->data_offset << 9); + if (mddev->queue) + disk_stack_limits(mddev->gendisk, rdev1->bdev, + rdev1->data_offset << 9); if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) conf->has_merge_bvec = 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77dfd72..1ba97fd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1078,9 +1078,6 @@ again: pr_debug("skip op %ld on disc %d for sector %llu\n", bi->bi_rw, i, (unsigned long long)sh->sector); clear_bit(R5_LOCKED, &sh->dev[i].flags); - if (sh->batch_head) - set_bit(STRIPE_BATCH_ERR, - &sh->batch_head->state); set_bit(STRIPE_HANDLE, &sh->state); } @@ -1971,17 +1968,30 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) put_cpu(); } +static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) +{ + struct stripe_head *sh; + + sh = kmem_cache_zalloc(sc, gfp); + if (sh) { + spin_lock_init(&sh->stripe_lock); + spin_lock_init(&sh->batch_lock); + INIT_LIST_HEAD(&sh->batch_list); + INIT_LIST_HEAD(&sh->lru); + atomic_set(&sh->count, 1); + } + return sh; +} static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) { struct stripe_head *sh; - sh = kmem_cache_zalloc(conf->slab_cache, gfp); + + sh = alloc_stripe(conf->slab_cache, gfp); if (!sh) return 0; sh->raid_conf = conf; - spin_lock_init(&sh->stripe_lock); - if (grow_buffers(sh, gfp)) { shrink_buffers(sh); kmem_cache_free(conf->slab_cache, sh); @@ -1990,13 +2000,8 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) sh->hash_lock_index = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; /* we just created an active stripe so... */ - atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); - INIT_LIST_HEAD(&sh->lru); - spin_lock_init(&sh->batch_lock); - INIT_LIST_HEAD(&sh->batch_list); - sh->batch_head = NULL; release_stripe(sh); conf->max_nr_stripes++; return 1; @@ -2060,6 +2065,35 @@ static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) return ret; } +static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) +{ + unsigned long cpu; + int err = 0; + + mddev_suspend(conf->mddev); + get_online_cpus(); + for_each_present_cpu(cpu) { + struct raid5_percpu *percpu; + struct flex_array *scribble; + + percpu = per_cpu_ptr(conf->percpu, cpu); + scribble = scribble_alloc(new_disks, + new_sectors / STRIPE_SECTORS, + GFP_NOIO); + + if (scribble) { + flex_array_free(percpu->scribble); + percpu->scribble = scribble; + } else { + err = -ENOMEM; + break; + } + } + put_online_cpus(); + mddev_resume(conf->mddev); + return err; +} + static int resize_stripes(struct r5conf *conf, int newsize) { /* Make all the stripes able to hold 'newsize' devices. @@ -2088,7 +2122,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) struct stripe_head *osh, *nsh; LIST_HEAD(newstripes); struct disk_info *ndisks; - unsigned long cpu; int err; struct kmem_cache *sc; int i; @@ -2109,13 +2142,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) return -ENOMEM; for (i = conf->max_nr_stripes; i; i--) { - nsh = kmem_cache_zalloc(sc, GFP_KERNEL); + nsh = alloc_stripe(sc, GFP_KERNEL); if (!nsh) break; nsh->raid_conf = conf; - spin_lock_init(&nsh->stripe_lock); - list_add(&nsh->lru, &newstripes); } if (i) { @@ -2142,13 +2173,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) lock_device_hash_lock(conf, hash)); osh = get_free_stripe(conf, hash); unlock_device_hash_lock(conf, hash); - atomic_set(&nsh->count, 1); + for(i=0; i<conf->pool_size; i++) { nsh->dev[i].page = osh->dev[i].page; nsh->dev[i].orig_page = osh->dev[i].page; } - for( ; i<newsize; i++) - nsh->dev[i].page = NULL; nsh->hash_lock_index = hash; kmem_cache_free(conf->slab_cache, osh); cnt++; @@ -2174,25 +2203,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; - get_online_cpus(); - for_each_present_cpu(cpu) { - struct raid5_percpu *percpu; - struct flex_array *scribble; - - percpu = per_cpu_ptr(conf->percpu, cpu); - scribble = scribble_alloc(newsize, conf->chunk_sectors / - STRIPE_SECTORS, GFP_NOIO); - - if (scribble) { - flex_array_free(percpu->scribble); - percpu->scribble = scribble; - } else { - err = -ENOMEM; - break; - } - } - put_online_cpus(); - /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); @@ -2212,7 +2222,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) conf->slab_cache = sc; conf->active_name = 1-conf->active_name; - conf->pool_size = newsize; + if (!err) + conf->pool_size = newsize; return err; } @@ -2434,7 +2445,7 @@ static void raid5_end_write_request(struct bio *bi, int error) } rdev_dec_pending(rdev, conf->mddev); - if (sh->batch_head && !uptodate) + if (sh->batch_head && !uptodate && !replacement) set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) @@ -3278,7 +3289,9 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, /* reconstruct-write isn't being forced */ return 0; for (i = 0; i < s->failed; i++) { - if (!test_bit(R5_UPTODATE, &fdev[i]->flags) && + if (s->failed_num[i] != sh->pd_idx && + s->failed_num[i] != sh->qd_idx && + !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) return 1; } @@ -3298,6 +3311,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, */ BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); BUG_ON(test_bit(R5_Wantread, &dev->flags)); + BUG_ON(sh->batch_head); if ((s->uptodate == disks - 1) && (s->failed && (disk_idx == s->failed_num[0] || disk_idx == s->failed_num[1]))) { @@ -3366,7 +3380,6 @@ static void handle_stripe_fill(struct stripe_head *sh, { int i; - BUG_ON(sh->batch_head); /* look for blocks to read/compute, skip this if a compute * is already in flight, or if the stripe contents are in the * midst of changing due to a write @@ -4198,15 +4211,9 @@ static void check_break_stripe_batch_list(struct stripe_head *sh) return; head_sh = sh; - do { - sh = list_first_entry(&sh->batch_list, - struct stripe_head, batch_list); - BUG_ON(sh == head_sh); - } while (!test_bit(STRIPE_DEGRADED, &sh->state)); - while (sh != head_sh) { - next = list_first_entry(&sh->batch_list, - struct stripe_head, batch_list); + list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { + list_del_init(&sh->batch_list); set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, @@ -4226,8 +4233,6 @@ static void check_break_stripe_batch_list(struct stripe_head *sh) set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); - - sh = next; } } @@ -6221,8 +6226,11 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu percpu->spare_page = alloc_page(GFP_KERNEL); if (!percpu->scribble) percpu->scribble = scribble_alloc(max(conf->raid_disks, - conf->previous_raid_disks), conf->chunk_sectors / - STRIPE_SECTORS, GFP_KERNEL); + conf->previous_raid_disks), + max(conf->chunk_sectors, + conf->prev_chunk_sectors) + / STRIPE_SECTORS, + GFP_KERNEL); if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { free_scratch_buffer(conf, percpu); @@ -7198,6 +7206,15 @@ static int check_reshape(struct mddev *mddev) if (!check_stripe_cache(mddev)) return -ENOSPC; + if (mddev->new_chunk_sectors > mddev->chunk_sectors || + mddev->delta_disks > 0) + if (resize_chunks(conf, + conf->previous_raid_disks + + max(0, mddev->delta_disks), + max(mddev->new_chunk_sectors, + mddev->chunk_sectors) + ) < 0) + return -ENOMEM; return resize_stripes(conf, (conf->previous_raid_disks + mddev->delta_disks)); } |