summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-08-25 19:56:14 +0900
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 08:56:08 +0200
commit074a7aca7afa6f230104e8e65eba3420263714a5 (patch)
treef418313e45bd55be8156c8a3e8f9a216cf63058d /drivers/md
parenteddb2e26b5ee3c5da68ba4bf1921ba20e2097bff (diff)
downloadop-kernel-dev-074a7aca7afa6f230104e8e65eba3420263714a5.zip
op-kernel-dev-074a7aca7afa6f230104e8e65eba3420263714a5.tar.gz
block: move stats from disk to part0
Move stats related fields - stamp, in_flight, dkstats - from disk to part0 and unify stat handling such that... * part_stat_*() now updates part0 together if the specified partition is not part0. ie. part_stat_*() are now essentially all_stat_*(). * {disk|all}_stat_*() are gone. * part_round_stats() is updated similary. It handles part0 stats automatically and disk_round_stats() is killed. * part_{inc|dec}_in_fligh() is implemented which automatically updates part0 stats for parts other than part0. * disk_map_sector_rcu() is updated to return part0 if no part matches. Combined with the above changes, this makes NULL special case handling in callers unnecessary. * Separate stats show code paths for disk are collapsed into part stats show code paths. * Rename disk_stat_lock/unlock() to part_stat_lock/unlock() While at it, reposition stat handling macros a bit and add missing parentheses around macro parameters. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/linear.c9
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c9
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c9
8 files changed, 46 insertions, 39 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6378066..327de03 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -381,10 +381,10 @@ static void start_io_acct(struct dm_io *io)
io->start_time = jiffies;
- cpu = disk_stat_lock();
- disk_round_stats(cpu, dm_disk(md));
- disk_stat_unlock();
- dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_unlock();
+ dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
}
static int end_io_acct(struct dm_io *io)
@@ -395,12 +395,13 @@ static int end_io_acct(struct dm_io *io)
int pending, cpu;
int rw = bio_data_dir(bio);
- cpu = disk_stat_lock();
- disk_round_stats(cpu, dm_disk(md));
- disk_stat_add(cpu, dm_disk(md), ticks[rw], duration);
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
+ part_stat_unlock();
- dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+ dm_disk(md)->part0.in_flight = pending =
+ atomic_dec_return(&md->pending);
return !pending;
}
@@ -899,10 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
down_read(&md->io_lock);
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, dm_disk(md), ios[rw]);
- disk_stat_add(cpu, dm_disk(md), sectors[rw], bio_sectors(bio));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
+ part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
+ part_stat_unlock();
/*
* If we're suspended we have to queue
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 00cbc8e..c80ea90 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -325,10 +325,11 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
return 0;
}
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
- disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
tmp_dev = which_dev(mddev, bio->bi_sector);
block = bio->bi_sector >> 1;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2bd9cf4..0a3a4bd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5546,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = disk_stat_read(disk, sectors[0]) +
- disk_stat_read(disk, sectors[1]) -
+ curr_events = part_stat_read(&disk->part0, sectors[0]) +
+ part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 182f5a9..8bb8794 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -159,10 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
- disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e26030f..f52f442 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -406,10 +406,11 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
return 0;
}
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
- disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
chunk_size = mddev->chunk_size >> 10;
chunk_sects = mddev->chunk_size >> 9;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index babb130..b976442 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -804,10 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
- disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
/*
* make_request() can abort the operation when READA is being
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5ec80da..5f99013 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -844,10 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
- disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5899f21..ae16794 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3396,10 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- cpu = disk_stat_lock();
- disk_stat_inc(cpu, mddev->gendisk, ios[rw]);
- disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bi));
- disk_stat_unlock();
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bi));
+ part_stat_unlock();
if (rw == READ &&
mddev->reshape_position == MaxSector &&
OpenPOWER on IntegriCloud