summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kprobes.c20
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/profile.c3
-rw-r--r--kernel/sys.c15
7 files changed, 32 insertions, 15 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 3540172..fec12eb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -938,8 +938,8 @@ fastcall NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
proc_exit_connector(tsk);
- exit_notify(tsk);
exit_task_namespaces(tsk);
+ exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
diff --git a/kernel/fork.c b/kernel/fork.c
index fc723e5..d57118d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1313,7 +1313,7 @@ noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_re
return regs;
}
-struct task_struct * __devinit fork_idle(int cpu)
+struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
struct pt_regs regs;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index b385878..8b961ad 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -315,6 +315,9 @@ int setup_irq(unsigned int irq, struct irqaction *new)
/* Undo nested disables: */
desc->depth = 1;
}
+ /* Reset broken irq detection when installing new handler */
+ desc->irq_count = 0;
+ desc->irqs_unhandled = 0;
spin_unlock_irqrestore(&desc->lock, flags);
new->irq = irq;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 17ec4af..6fcf8dd 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -87,6 +87,12 @@ struct kprobe_insn_page {
int ngarbage;
};
+enum kprobe_slot_state {
+ SLOT_CLEAN = 0,
+ SLOT_DIRTY = 1,
+ SLOT_USED = 2,
+};
+
static struct hlist_head kprobe_insn_pages;
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -130,8 +136,8 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
- if (!kip->slot_used[i]) {
- kip->slot_used[i] = 1;
+ if (kip->slot_used[i] == SLOT_CLEAN) {
+ kip->slot_used[i] = SLOT_USED;
kip->nused++;
return kip->insns + (i * MAX_INSN_SIZE);
}
@@ -163,8 +169,8 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
}
INIT_HLIST_NODE(&kip->hlist);
hlist_add_head(&kip->hlist, &kprobe_insn_pages);
- memset(kip->slot_used, 0, INSNS_PER_PAGE);
- kip->slot_used[0] = 1;
+ memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
+ kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
return kip->insns;
@@ -173,7 +179,7 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
/* Return 1 if all garbages are collected, otherwise 0. */
static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
- kip->slot_used[idx] = 0;
+ kip->slot_used[idx] = SLOT_CLEAN;
kip->nused--;
if (kip->nused == 0) {
/*
@@ -212,7 +218,7 @@ static int __kprobes collect_garbage_slots(void)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
for (i = 0; i < INSNS_PER_PAGE; i++) {
- if (kip->slot_used[i] == -1 &&
+ if (kip->slot_used[i] == SLOT_DIRTY &&
collect_one_slot(kip, i))
break;
}
@@ -232,7 +238,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) {
- kip->slot_used[i] = -1;
+ kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++;
} else {
collect_one_slot(kip, i);
diff --git a/kernel/pid.c b/kernel/pid.c
index 2efe9d8..78f2aee 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -197,7 +197,7 @@ fastcall void free_pid(struct pid *pid)
hlist_del_rcu(&pid->pid_chain);
spin_unlock_irqrestore(&pidmap_lock, flags);
- free_pidmap(current->nsproxy->pid_ns, pid->nr);
+ free_pidmap(&init_pid_ns, pid->nr);
call_rcu(&pid->rcu, delayed_put_pid);
}
diff --git a/kernel/profile.c b/kernel/profile.c
index a6574a1..d6579d5 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -331,7 +331,6 @@ out:
local_irq_restore(flags);
put_cpu();
}
-EXPORT_SYMBOL_GPL(profile_hits);
static int __devinit profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
@@ -401,6 +400,8 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
}
#endif /* !CONFIG_SMP */
+EXPORT_SYMBOL_GPL(profile_hits);
+
void profile_tick(int type)
{
struct pt_regs *regs = get_irq_regs();
diff --git a/kernel/sys.c b/kernel/sys.c
index c7675c1..6e2101d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -323,11 +323,18 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v)
{
- int ret;
+ int ret = NOTIFY_DONE;
- down_read(&nh->rwsem);
- ret = notifier_call_chain(&nh->head, val, v);
- up_read(&nh->rwsem);
+ /*
+ * We check the head outside the lock, but if this access is
+ * racy then it does not matter what the result of the test
+ * is, we re-check the list after having taken the lock anyway:
+ */
+ if (rcu_dereference(nh->head)) {
+ down_read(&nh->rwsem);
+ ret = notifier_call_chain(&nh->head, val, v);
+ up_read(&nh->rwsem);
+ }
return ret;
}
OpenPOWER on IntegriCloud