diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-26 19:42:08 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-26 19:42:08 -0800 |
commit | 0685ab4fb8e527639d9867df60d49dccba85d842 (patch) | |
tree | 7d7db22548b95da9555afec2889a9e8efa2053d9 /kernel | |
parent | ff1ea52fa317a5658b6415b25169c5e531f54876 (diff) | |
parent | f7b9329e556a8bdb9e07292cddbbe484c7a2b8c5 (diff) | |
download | op-kernel-dev-0685ab4fb8e527639d9867df60d49dccba85d842.zip op-kernel-dev-0685ab4fb8e527639d9867df60d49dccba85d842.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
sched: bump version of kernel/sched_debug.c
sched: fix minimum granularity tunings
sched: fix RLIMIT_CPU comment
sched: fix kernel/acct.c comment
sched: fix prev_stime calculation
sched: don't forget to unlock uids_mutex on error paths
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/acct.c | 2 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 12 | ||||
-rw-r--r-- | kernel/user.c | 7 |
4 files changed, 14 insertions, 9 deletions
diff --git a/kernel/acct.c b/kernel/acct.c index fce53d8..cf19547 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -413,7 +413,7 @@ static u32 encode_float(u64 value) * The acct_process() call is the workhorse of the process * accounting system. The struct acct is built here and then written * into the accounting file. This function should only be called from - * do_exit(). + * do_exit() or when switching to a different output file. */ /* diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index ca198a7..5d0d623 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -199,7 +199,7 @@ static int sched_debug_show(struct seq_file *m, void *v) u64 now = ktime_to_ns(ktime_get()); int cpu; - SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n", + SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ee00da2..2f16e15 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -22,7 +22,7 @@ /* * Targeted preemption latency for CPU-bound tasks: - * (default: 20ms * ilog(ncpus), units: nanoseconds) + * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) * * NOTE: this latency value is not the same as the concept of * 'timeslice length' - timeslices in CFS are of variable length @@ -36,14 +36,14 @@ unsigned int sysctl_sched_latency = 20000000ULL; /* * Minimal preemption granularity for CPU-bound tasks: - * (default: 1 msec * ilog(ncpus), units: nanoseconds) + * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 1000000ULL; +unsigned int sysctl_sched_min_granularity = 4000000ULL; /* * is kept at sysctl_sched_latency / sysctl_sched_min_granularity */ -static unsigned int sched_nr_latency = 20; +static unsigned int sched_nr_latency = 5; /* * After fork, child runs first. (default) If set to 0 then @@ -61,7 +61,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield; /* * SCHED_BATCH wake-up granularity. - * (default: 10 msec * ilog(ncpus), units: nanoseconds) + * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still @@ -71,7 +71,7 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; /* * SCHED_OTHER wake-up granularity. - * (default: 10 msec * ilog(ncpus), units: nanoseconds) + * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still diff --git a/kernel/user.c b/kernel/user.c index 0f3aa02..8320a87 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -337,8 +337,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) struct user_struct *new; new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); - if (!new) + if (!new) { + uids_mutex_unlock(); return NULL; + } + new->uid = uid; atomic_set(&new->__count, 1); atomic_set(&new->processes, 0); @@ -355,6 +358,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) if (alloc_uid_keyring(new, current) < 0) { kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); return NULL; } @@ -362,6 +366,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); return NULL; } |