summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@infradead.org>2006-01-31 18:49:28 -0200
committerMauro Carvalho Chehab <mchehab@infradead.org>2006-01-31 18:49:28 -0200
commit638e174688f58200d0deb7435093435e7d737b09 (patch)
treea2cd32dbb41daf0a5d4e69eff1805fd5574c292a /kernel
parent8d58d773b745950ac912e028b3c81f4902fbf91d (diff)
parentd195ea4b1456192abe780fd773778cbe9f6d77ea (diff)
downloadop-kernel-dev-638e174688f58200d0deb7435093435e7d737b09.zip
op-kernel-dev-638e174688f58200d0deb7435093435e7d737b09.tar.gz
Merge branch 'origin'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/user.c25
4 files changed, 26 insertions, 17 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7732199..7712912 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -114,16 +114,16 @@ rcu_torture_alloc(void)
{
struct list_head *p;
- spin_lock(&rcu_torture_lock);
+ spin_lock_bh(&rcu_torture_lock);
if (list_empty(&rcu_torture_freelist)) {
atomic_inc(&n_rcu_torture_alloc_fail);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
return NULL;
}
atomic_inc(&n_rcu_torture_alloc);
p = rcu_torture_freelist.next;
list_del_init(p);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
return container_of(p, struct rcu_torture, rtort_free);
}
@@ -134,9 +134,9 @@ static void
rcu_torture_free(struct rcu_torture *p)
{
atomic_inc(&n_rcu_torture_free);
- spin_lock(&rcu_torture_lock);
+ spin_lock_bh(&rcu_torture_lock);
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
}
static void
diff --git a/kernel/sched.c b/kernel/sched.c
index 3ee2ae4..ec7fd9c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5141,7 +5141,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
#define SEARCH_SCOPE 2
#define MIN_CACHE_SIZE (64*1024U)
#define DEFAULT_CACHE_SIZE (5*1024*1024U)
-#define ITERATIONS 2
+#define ITERATIONS 1
#define SIZE_THRESH 130
#define COST_THRESH 130
@@ -5480,9 +5480,9 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
break;
}
/*
- * Increase the cachesize in 5% steps:
+ * Increase the cachesize in 10% steps:
*/
- size = size * 20 / 19;
+ size = size * 10 / 9;
}
if (migration_debug)
diff --git a/kernel/time.c b/kernel/time.c
index 7477b1d..1f23e68 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -155,7 +155,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
static int firsttime = 1;
int error = 0;
- if (!timespec_valid(tv))
+ if (tv && !timespec_valid(tv))
return -EINVAL;
error = security_settime(tv, tz);
diff --git a/kernel/user.c b/kernel/user.c
index 89e562f..d1ae234 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/key.h>
+#include <linux/interrupt.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
@@ -27,6 +28,12 @@
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ */
static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
@@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
ret = uid_hash_find(uid, uidhashentry(uid));
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
return ret;
}
void free_uid(struct user_struct *up)
{
+ local_bh_disable();
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
key_put(up->uid_keyring);
@@ -98,6 +106,7 @@ void free_uid(struct user_struct *up)
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
+ local_bh_enable();
}
struct user_struct * alloc_uid(uid_t uid)
@@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid)
struct list_head *hashent = uidhashentry(uid);
struct user_struct *up;
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
up = uid_hash_find(uid, hashent);
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
if (!up) {
struct user_struct *new;
@@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid)
* Before adding this, check whether we raced
* on adding the same user already..
*/
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
@@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid)
uid_hash_insert(new, hashent);
up = new;
}
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
}
return up;
@@ -183,9 +192,9 @@ static int __init uid_cache_init(void)
INIT_LIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(0));
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
return 0;
}
OpenPOWER on IntegriCloud