summaryrefslogtreecommitdiffstats
path: root/kernel/pid.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/pid.c')
-rw-r--r--kernel/pid.c49
1 files changed, 34 insertions, 15 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index c7346fb..8132a6d 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -29,7 +29,8 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
-#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
+#define pid_hashfn(nr, ns) \
+ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
static struct hlist_head *pid_hash;
static int pidhash_shift;
struct pid init_struct_pid = INIT_STRUCT_PID;
@@ -186,11 +187,13 @@ fastcall void put_pid(struct pid *pid)
if (!pid)
return;
- /* FIXME - this must be the namespace this pid lives in */
- ns = &init_pid_ns;
+ ns = pid->numbers[pid->level].ns;
if ((atomic_read(&pid->count) == 1) ||
- atomic_dec_and_test(&pid->count))
+ atomic_dec_and_test(&pid->count)) {
kmem_cache_free(ns->pid_cachep, pid);
+ if (ns != &init_pid_ns)
+ put_pid_ns(ns);
+ }
}
EXPORT_SYMBOL_GPL(put_pid);
@@ -203,45 +206,61 @@ static void delayed_put_pid(struct rcu_head *rhp)
fastcall void free_pid(struct pid *pid)
{
/* We can be called with write_lock_irq(&tasklist_lock) held */
+ int i;
unsigned long flags;
spin_lock_irqsave(&pidmap_lock, flags);
hlist_del_rcu(&pid->pid_chain);
spin_unlock_irqrestore(&pidmap_lock, flags);
- free_pidmap(&init_pid_ns, pid->nr);
+ for (i = 0; i <= pid->level; i++)
+ free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+
call_rcu(&pid->rcu, delayed_put_pid);
}
-struct pid *alloc_pid(void)
+struct pid *alloc_pid(struct pid_namespace *ns)
{
struct pid *pid;
enum pid_type type;
- int nr = -1;
- struct pid_namespace *ns;
+ int i, nr;
+ struct pid_namespace *tmp;
- ns = task_active_pid_ns(current);
pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
if (!pid)
goto out;
- nr = alloc_pidmap(ns);
- if (nr < 0)
- goto out_free;
+ tmp = ns;
+ for (i = ns->level; i >= 0; i--) {
+ nr = alloc_pidmap(tmp);
+ if (nr < 0)
+ goto out_free;
+ pid->numbers[i].nr = nr;
+ pid->numbers[i].ns = tmp;
+ tmp = tmp->parent;
+ }
+
+ if (ns != &init_pid_ns)
+ get_pid_ns(ns);
+
+ pid->level = ns->level;
+ pid->nr = pid->numbers[0].nr;
atomic_set(&pid->count, 1);
- pid->nr = nr;
for (type = 0; type < PIDTYPE_MAX; ++type)
INIT_HLIST_HEAD(&pid->tasks[type]);
spin_lock_irq(&pidmap_lock);
- hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
+ hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr, ns)]);
spin_unlock_irq(&pidmap_lock);
out:
return pid;
out_free:
+ for (i++; i <= ns->level; i++)
+ free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+
kmem_cache_free(ns->pid_cachep, pid);
pid = NULL;
goto out;
@@ -253,7 +272,7 @@ struct pid * fastcall find_pid(int nr)
struct pid *pid;
hlist_for_each_entry_rcu(pid, elem,
- &pid_hash[pid_hashfn(nr)], pid_chain) {
+ &pid_hash[pid_hashfn(nr, &init_pid_ns)], pid_chain) {
if (pid->nr == nr)
return pid;
}
OpenPOWER on IntegriCloud