diff options
author | Tejun Heo <tj@kernel.org> | 2008-08-25 19:47:21 +0900 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 08:56:06 +0200 |
commit | c9959059161ddd7bf4670cf47367033d6b2f79c4 (patch) | |
tree | 6454db55f8e34361fe472358e10e0c5cfac1e366 /block/blk-core.c | |
parent | e71bf0d0ee89e51b92776391c5634938236977d5 (diff) | |
download | op-kernel-dev-c9959059161ddd7bf4670cf47367033d6b2f79c4.zip op-kernel-dev-c9959059161ddd7bf4670cf47367033d6b2f79c4.tar.gz |
block: fix diskstats access
There are two variants of stat functions - ones prefixed with double
underbars which don't care about preemption and ones without which
disable preemption before manipulating per-cpu counters. It's unclear
whether the underbarred ones assume that preemtion is disabled on
entry as some callers don't do that.
This patch unifies diskstats access by implementing disk_stat_lock()
and disk_stat_unlock() which take care of both RCU (for partition
access) and preemption (for per-cpu counter access). diskstats access
should always be enclosed between the two functions. As such, there's
no need for the versions which disables preemption. They're removed
and double underbars ones are renamed to drop the underbars. As an
extra argument is added, there's no danger of using the old version
unconverted.
disk_stat_lock() uses get_cpu() and returns the cpu index and all
diskstat functions which access per-cpu counters now has @cpu
argument to help RT.
This change adds RCU or preemption operations at some places but also
collapses several preemption ops into one at others. Overall, the
performance difference should be negligible as all involved ops are
very lightweight per-cpu ones.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 52 |
1 files changed, 27 insertions, 25 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index d6128d9a..e0a5ee3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -56,25 +56,26 @@ static void drive_stat_acct(struct request *rq, int new_io) { struct hd_struct *part; int rw = rq_data_dir(rq); + int cpu; if (!blk_fs_request(rq) || !rq->rq_disk) return; - rcu_read_lock(); - + cpu = disk_stat_lock(); part = disk_map_sector_rcu(rq->rq_disk, rq->sector); + if (!new_io) - __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); + all_stat_inc(cpu, rq->rq_disk, part, merges[rw], rq->sector); else { - disk_round_stats(rq->rq_disk); + disk_round_stats(cpu, rq->rq_disk); rq->rq_disk->in_flight++; if (part) { - part_round_stats(part); + part_round_stats(cpu, part); part->in_flight++; } } - rcu_read_unlock(); + disk_stat_unlock(); } void blk_queue_congestion_threshold(struct request_queue *q) @@ -997,7 +998,7 @@ static inline void add_request(struct request_queue *q, struct request *req) * /proc/diskstats. This accounts immediately for all queue usage up to * the current jiffies and restarts the counters again. */ -void disk_round_stats(struct gendisk *disk) +void disk_round_stats(int cpu, struct gendisk *disk) { unsigned long now = jiffies; @@ -1005,15 +1006,15 @@ void disk_round_stats(struct gendisk *disk) return; if (disk->in_flight) { - __disk_stat_add(disk, time_in_queue, - disk->in_flight * (now - disk->stamp)); - __disk_stat_add(disk, io_ticks, (now - disk->stamp)); + disk_stat_add(cpu, disk, time_in_queue, + disk->in_flight * (now - disk->stamp)); + disk_stat_add(cpu, disk, io_ticks, (now - disk->stamp)); } disk->stamp = now; } EXPORT_SYMBOL_GPL(disk_round_stats); -void part_round_stats(struct hd_struct *part) +void part_round_stats(int cpu, struct hd_struct *part) { unsigned long now = jiffies; @@ -1021,9 +1022,9 @@ void part_round_stats(struct hd_struct *part) return; if (part->in_flight) { - __part_stat_add(part, time_in_queue, - part->in_flight * (now - part->stamp)); - __part_stat_add(part, io_ticks, (now - part->stamp)); + part_stat_add(cpu, part, time_in_queue, + part->in_flight * (now - part->stamp)); + part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; } @@ -1563,12 +1564,13 @@ static int __end_that_request_first(struct request *req, int error, if (blk_fs_request(req) && req->rq_disk) { const int rw = rq_data_dir(req); struct hd_struct *part; + int cpu; - rcu_read_lock(); + cpu = disk_stat_lock(); part = disk_map_sector_rcu(req->rq_disk, req->sector); - all_stat_add(req->rq_disk, part, sectors[rw], - nr_bytes >> 9, req->sector); - rcu_read_unlock(); + all_stat_add(cpu, req->rq_disk, part, sectors[rw], + nr_bytes >> 9, req->sector); + disk_stat_unlock(); } total_bytes = bio_nbytes = 0; @@ -1753,21 +1755,21 @@ static void end_that_request_last(struct request *req, int error) unsigned long duration = jiffies - req->start_time; const int rw = rq_data_dir(req); struct hd_struct *part; + int cpu; - rcu_read_lock(); - + cpu = disk_stat_lock(); part = disk_map_sector_rcu(disk, req->sector); - __all_stat_inc(disk, part, ios[rw], req->sector); - __all_stat_add(disk, part, ticks[rw], duration, req->sector); - disk_round_stats(disk); + all_stat_inc(cpu, disk, part, ios[rw], req->sector); + all_stat_add(cpu, disk, part, ticks[rw], duration, req->sector); + disk_round_stats(cpu, disk); disk->in_flight--; if (part) { - part_round_stats(part); + part_round_stats(cpu, part); part->in_flight--; } - rcu_read_unlock(); + disk_stat_unlock(); } if (req->end_io) |