diff options
author | Jens Axboe <axboe@suse.de> | 2005-06-27 10:55:12 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-27 14:33:29 -0700 |
commit | 22e2c507c301c3dbbcf91b4948b88f78842ee6c9 (patch) | |
tree | 9a97c91d1362e69703aa286021daffb8a5456f4c /include | |
parent | 020f46a39eb7b99a575b9f4d105fce2b142acdf1 (diff) | |
download | op-kernel-dev-22e2c507c301c3dbbcf91b4948b88f78842ee6c9.zip op-kernel-dev-22e2c507c301c3dbbcf91b4948b88f78842ee6c9.tar.gz |
[PATCH] Update cfq io scheduler to time sliced design
This updates the CFQ io scheduler to the new time sliced design (cfq
v3). It provides full process fairness, while giving excellent
aggregate system throughput even for many competing processes. It
supports io priorities, either inherited from the cpu nice value or set
directly with the ioprio_get/set syscalls. The latter closely mimic
set/getpriority.
This import is based on my latest from -mm.
Signed-off-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-i386/unistd.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/unistd.h | 2 | ||||
-rw-r--r-- | include/asm-ppc/unistd.h | 4 | ||||
-rw-r--r-- | include/asm-x86_64/unistd.h | 6 | ||||
-rw-r--r-- | include/linux/bio.h | 14 | ||||
-rw-r--r-- | include/linux/blkdev.h | 25 | ||||
-rw-r--r-- | include/linux/elevator.h | 8 | ||||
-rw-r--r-- | include/linux/fs.h | 19 | ||||
-rw-r--r-- | include/linux/init_task.h | 2 | ||||
-rw-r--r-- | include/linux/ioprio.h | 87 | ||||
-rw-r--r-- | include/linux/sched.h | 6 | ||||
-rw-r--r-- | include/linux/writeback.h | 6 |
12 files changed, 165 insertions, 18 deletions
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 176413f..e25e4c7 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -294,8 +294,10 @@ #define __NR_add_key 286 #define __NR_request_key 287 #define __NR_keyctl 288 +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 -#define NR_syscalls 289 +#define NR_syscalls 291 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index f7f43ec..517f164 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -263,6 +263,8 @@ #define __NR_add_key 1271 #define __NR_request_key 1272 #define __NR_keyctl 1273 +#define __NR_ioprio_set 1274 +#define __NR_ioprio_get 1275 #define __NR_set_zone_reclaim 1276 #ifdef __KERNEL__ diff --git a/include/asm-ppc/unistd.h b/include/asm-ppc/unistd.h index cc51e5c..e8b7922 100644 --- a/include/asm-ppc/unistd.h +++ b/include/asm-ppc/unistd.h @@ -277,8 +277,10 @@ #define __NR_request_key 270 #define __NR_keyctl 271 #define __NR_waitid 272 +#define __NR_ioprio_set 273 +#define __NR_ioprio_get 274 -#define __NR_syscalls 273 +#define __NR_syscalls 275 #define __NR(n) #n diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index d767adc..6560439 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -561,8 +561,12 @@ __SYSCALL(__NR_add_key, sys_add_key) __SYSCALL(__NR_request_key, sys_request_key) #define __NR_keyctl 250 __SYSCALL(__NR_keyctl, sys_keyctl) +#define __NR_ioprio_set 251 +__SYSCALL(__NR_ioprio_set, sys_ioprio_set) +#define __NR_ioprio_get 252 +__SYSCALL(__NR_ioprio_get, sys_ioprio_get) -#define __NR_syscall_max __NR_keyctl +#define __NR_syscall_max __NR_ioprio_get #ifndef __NO_STUBS /* user-visible error numbers are in the range -1 - -4095 */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 0380227..36ef29f 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -22,6 +22,7 @@ #include <linux/highmem.h> #include <linux/mempool.h> +#include <linux/ioprio.h> /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ #include <asm/io.h> @@ -150,6 +151,19 @@ struct bio { #define BIO_RW_SYNC 4 /* + * upper 16 bits of bi_rw define the io priority of this bio + */ +#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) +#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) +#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) + +#define bio_set_prio(bio, prio) do { \ + WARN_ON(prio >= (1 << IOPRIO_BITS)); \ + (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ + (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ +} while (0) + +/* * various member access, note that bio_data should of course not be used * on highmem page vectors */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b54a034..21a8674 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -54,16 +54,23 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); - - struct io_context *ioc; - /* * circular list of cfq_io_contexts belonging to a process io context */ struct list_head list; struct cfq_queue *cfqq; + void *key; + + struct io_context *ioc; + + unsigned long last_end_request; + unsigned long last_queue; + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; + + void (*dtor)(struct cfq_io_context *); + void (*exit)(struct cfq_io_context *); }; /* @@ -73,7 +80,9 @@ struct cfq_io_context { */ struct io_context { atomic_t refcount; - pid_t pid; + struct task_struct *task; + + int (*set_ioprio)(struct io_context *, unsigned int); /* * For request batching @@ -81,8 +90,6 @@ struct io_context { unsigned long last_waited; /* Time last woken after wait for request */ int nr_batch_requests; /* Number of requests left in the batch */ - spinlock_t lock; - struct as_io_context *aic; struct cfq_io_context *cic; }; @@ -134,6 +141,8 @@ struct request { void *elevator_private; + unsigned short ioprio; + int rq_status; /* should split this into a few status bits */ struct gendisk *rq_disk; int errors; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ee54f81..ea6bbc2 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -16,9 +16,9 @@ typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *); typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); -typedef int (elevator_may_queue_fn) (request_queue_t *, int); +typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); -typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int); +typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); @@ -96,9 +96,9 @@ extern struct request *elv_former_request(request_queue_t *, struct request *); extern struct request *elv_latter_request(request_queue_t *, struct request *); extern int elv_register_queue(request_queue_t *q); extern void elv_unregister_queue(request_queue_t *q); -extern int elv_may_queue(request_queue_t *, int); +extern int elv_may_queue(request_queue_t *, int, struct bio *); extern void elv_completed_request(request_queue_t *, struct request *); -extern int elv_set_request(request_queue_t *, struct request *, int); +extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int); extern void elv_put_request(request_queue_t *, struct request *); /* diff --git a/include/linux/fs.h b/include/linux/fs.h index 3ae8e37..047bde3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -213,6 +213,7 @@ extern int dir_notify_enable; #include <linux/radix-tree.h> #include <linux/prio_tree.h> #include <linux/init.h> +#include <linux/sched.h> #include <asm/atomic.h> #include <asm/semaphore.h> @@ -822,16 +823,34 @@ enum { #define vfs_check_frozen(sb, level) \ wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) +static inline void get_fs_excl(void) +{ + atomic_inc(¤t->fs_excl); +} + +static inline void put_fs_excl(void) +{ + atomic_dec(¤t->fs_excl); +} + +static inline int has_fs_excl(void) +{ + return atomic_read(¤t->fs_excl); +} + + /* * Superblock locking. */ static inline void lock_super(struct super_block * sb) { + get_fs_excl(); down(&sb->s_lock); } static inline void unlock_super(struct super_block * sb) { + put_fs_excl(); up(&sb->s_lock); } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 03206a4..c727c195 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -81,6 +81,7 @@ extern struct group_info init_groups; .mm = NULL, \ .active_mm = &init_mm, \ .run_list = LIST_HEAD_INIT(tsk.run_list), \ + .ioprio = 0, \ .time_slice = HZ, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ @@ -110,6 +111,7 @@ extern struct group_info init_groups; .proc_lock = SPIN_LOCK_UNLOCKED, \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .fs_excl = ATOMIC_INIT(0), \ } diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h new file mode 100644 index 0000000..7811300 --- /dev/null +++ b/include/linux/ioprio.h @@ -0,0 +1,87 @@ +#ifndef IOPRIO_H +#define IOPRIO_H + +#include <linux/sched.h> + +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_BITS (16) +#define IOPRIO_CLASS_SHIFT (13) +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) +#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) + +#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) + +/* + * These are the io priority groups as implemented by CFQ. RT is the realtime + * class, it always gets premium service. BE is the best-effort scheduling + * class, the default for any process. IDLE is the idle scheduling class, it + * is only served when no one else is using the disk. + */ +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + +/* + * 8 best effort priority levels are supported + */ +#define IOPRIO_BE_NR (8) + +asmlinkage int sys_ioprio_set(int, int, int); +asmlinkage int sys_ioprio_get(int, int); + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * if process has set io priority explicitly, use that. if not, convert + * the cpu scheduler nice value to an io priority + */ +#define IOPRIO_NORM (4) +static inline int task_ioprio(struct task_struct *task) +{ + WARN_ON(!ioprio_valid(task->ioprio)); + return IOPRIO_PRIO_DATA(task->ioprio); +} + +static inline int task_nice_ioprio(struct task_struct *task) +{ + return (task_nice(task) + 20) / 5; +} + +/* + * For inheritance, return the highest of the two given priorities + */ +static inline int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (!ioprio_valid(aprio)) + return bprio; + if (!ioprio_valid(bprio)) + return aprio; + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 9530b19..ff48815 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -608,6 +608,8 @@ struct task_struct { struct list_head run_list; prio_array_t *array; + unsigned short ioprio; + unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ @@ -763,6 +765,7 @@ struct task_struct { nodemask_t mems_allowed; int cpuset_mems_generation; #endif + atomic_t fs_excl; /* holding fs exclusive resources */ }; static inline pid_t process_group(struct task_struct *tsk) @@ -1112,7 +1115,8 @@ extern void unhash_process(struct task_struct *p); /* * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 1262cb4..d5c3fe1 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -14,11 +14,13 @@ extern struct list_head inode_unused; * Yes, writeback.h requires sched.h * No, sched.h is not included from here. */ -static inline int current_is_pdflush(void) +static inline int task_is_pdflush(struct task_struct *task) { - return current->flags & PF_FLUSHER; + return task->flags & PF_FLUSHER; } +#define current_is_pdflush() task_is_pdflush(current) + /* * fs/fs-writeback.c */ |