summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c20
-rw-r--r--kernel/cpuset.c26
-rw-r--r--kernel/crash_dump.c11
-rw-r--r--kernel/itimer.c37
-rw-r--r--kernel/panic.c9
-rw-r--r--kernel/power/disk.c9
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/power/smp.c2
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sys.c113
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time.c2
12 files changed, 160 insertions, 93 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index 64db1ee..8986a37 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -31,8 +31,14 @@ static DEFINE_SPINLOCK(task_capability_lock);
* uninteresting and/or not to be changed.
*/
-/*
+/**
* sys_capget - get the capabilities of a given process.
+ * @header: pointer to struct that contains capability version and
+ * target pid data
+ * @dataptr: pointer to struct that contains the effective, permitted,
+ * and inheritable capabilities that are returned
+ *
+ * Returns 0 on success and < 0 on error.
*/
asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
{
@@ -141,8 +147,14 @@ static inline int cap_set_all(kernel_cap_t *effective,
return ret;
}
-/*
- * sys_capset - set capabilities for a given process, all processes, or all
+/**
+ * sys_capset - set capabilities for a process or a group of processes
+ * @header: pointer to struct that contains capability version and
+ * target pid data
+ * @data: pointer to struct that contains the effective, permitted,
+ * and inheritable capabilities
+ *
+ * Set capabilities for a given process, all processes, or all
* processes in a given process group.
*
* The restrictions on setting capabilities are specified as:
@@ -152,6 +164,8 @@ static inline int cap_set_all(kernel_cap_t *effective,
* I: any raised capabilities must be a subset of the (old current) permitted
* P: any raised capabilities must be a subset of the (old current) permitted
* E: must be set to a subset of (new target) permitted
+ *
+ * Returns 0 on success and < 0 on error.
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 984c0bf..805fb90 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1440,10 +1440,10 @@ void __init cpuset_init_smp(void)
/**
* cpuset_fork - attach newly forked task to its parents cpuset.
- * @p: pointer to task_struct of forking parent process.
+ * @tsk: pointer to task_struct of forking parent process.
*
* Description: By default, on fork, a task inherits its
- * parents cpuset. The pointer to the shared cpuset is
+ * parent's cpuset. The pointer to the shared cpuset is
* automatically copied in fork.c by dup_task_struct().
* This cpuset_fork() routine need only increment the usage
* counter in that cpuset.
@@ -1471,7 +1471,6 @@ void cpuset_fork(struct task_struct *tsk)
* by the cpuset_sem semaphore. If you don't hold cpuset_sem,
* then a zero cpuset use count is a license to any other task to
* nuke the cpuset immediately.
- *
**/
void cpuset_exit(struct task_struct *tsk)
@@ -1521,7 +1520,9 @@ void cpuset_init_current_mems_allowed(void)
current->mems_allowed = NODE_MASK_ALL;
}
-/*
+/**
+ * cpuset_update_current_mems_allowed - update mems parameters to new values
+ *
* If the current tasks cpusets mems_allowed changed behind our backs,
* update current->mems_allowed and mems_generation to the new value.
* Do not call this routine if in_interrupt().
@@ -1540,13 +1541,20 @@ void cpuset_update_current_mems_allowed(void)
}
}
+/**
+ * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed
+ * @nodes: pointer to a node bitmap that is and-ed with mems_allowed
+ */
void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
{
bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
MAX_NUMNODES);
}
-/*
+/**
+ * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
+ * @zl: the zonelist to be checked
+ *
* Are any of the nodes on zonelist zl allowed in current->mems_allowed?
*/
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
@@ -1562,8 +1570,12 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
return 0;
}
-/*
- * Is 'current' valid, and is zone z allowed in current->mems_allowed?
+/**
+ * cpuset_zone_allowed - is zone z allowed in current->mems_allowed
+ * @z: zone in question
+ *
+ * Is zone z allowed in current->mems_allowed, or is
+ * the CPU in interrupt context? (zone is always allowed in this case)
*/
int cpuset_zone_allowed(struct zone *z)
{
diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c
index 459ba49..334c37f 100644
--- a/kernel/crash_dump.c
+++ b/kernel/crash_dump.c
@@ -18,7 +18,16 @@
/* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
-/*
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
diff --git a/kernel/itimer.c b/kernel/itimer.c
index a72cb0e..7c1b25e 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -112,28 +112,11 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
return error;
}
-/*
- * Called with P->sighand->siglock held and P->signal->real_timer inactive.
- * If interval is nonzero, arm the timer for interval ticks from now.
- */
-static inline void it_real_arm(struct task_struct *p, unsigned long interval)
-{
- p->signal->it_real_value = interval; /* XXX unnecessary field?? */
- if (interval == 0)
- return;
- if (interval > (unsigned long) LONG_MAX)
- interval = LONG_MAX;
- /* the "+ 1" below makes sure that the timer doesn't go off before
- * the interval requested. This could happen if
- * time requested % (usecs per jiffy) is more than the usecs left
- * in the current jiffy */
- p->signal->real_timer.expires = jiffies + interval + 1;
- add_timer(&p->signal->real_timer);
-}
void it_real_fn(unsigned long __data)
{
struct task_struct * p = (struct task_struct *) __data;
+ unsigned long inc = p->signal->it_real_incr;
send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
@@ -141,14 +124,23 @@ void it_real_fn(unsigned long __data)
* Now restart the timer if necessary. We don't need any locking
* here because do_setitimer makes sure we have finished running
* before it touches anything.
+ * Note, we KNOW we are (or should be) at a jiffie edge here so
+ * we don't need the +1 stuff. Also, we want to use the prior
+ * expire value so as to not "slip" a jiffie if we are late.
+ * Deal with requesting a time prior to "now" here rather than
+ * in add_timer.
*/
- it_real_arm(p, p->signal->it_real_incr);
+ if (!inc)
+ return;
+ while (time_before_eq(p->signal->real_timer.expires, jiffies))
+ p->signal->real_timer.expires += inc;
+ add_timer(&p->signal->real_timer);
}
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
struct task_struct *tsk = current;
- unsigned long val, interval;
+ unsigned long val, interval, expires;
cputime_t cval, cinterval, nval, ninterval;
switch (which) {
@@ -164,7 +156,10 @@ again:
}
tsk->signal->it_real_incr =
timeval_to_jiffies(&value->it_interval);
- it_real_arm(tsk, timeval_to_jiffies(&value->it_value));
+ expires = timeval_to_jiffies(&value->it_value);
+ if (expires)
+ mod_timer(&tsk->signal->real_timer,
+ jiffies + 1 + expires);
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
jiffies_to_timeval(val, &ovalue->it_value);
diff --git a/kernel/panic.c b/kernel/panic.c
index 74ba5f3..aabc5f8 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -111,12 +111,11 @@ NORET_TYPE void panic(const char * fmt, ...)
mdelay(1);
i++;
}
- /*
- * Should we run the reboot notifier. For the moment Im
- * choosing not too. It might crash, be corrupt or do
- * more harm than good for other reasons.
+ /* This will not be a clean reboot, with everything
+ * shutting down. But if there is a chance of
+ * rebooting the system it will be rebooted.
*/
- machine_restart(NULL);
+ emergency_restart();
}
#ifdef __sparc__
{
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 3ec789c..664eb04 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -59,16 +59,13 @@ static void power_down(suspend_disk_method_t mode)
error = pm_ops->enter(PM_SUSPEND_DISK);
break;
case PM_DISK_SHUTDOWN:
- printk("Powering off system\n");
- device_shutdown();
- machine_power_off();
+ kernel_power_off();
break;
case PM_DISK_REBOOT:
- device_shutdown();
- machine_restart(NULL);
+ kernel_restart(NULL);
break;
}
- machine_halt();
+ kernel_halt();
/* Valid image is on the disk, if we continue we risk serious data corruption
after resume. */
printk(KERN_CRIT "Please power me down manually\n");
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 715081b..7a4144b 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/workqueue.h>
+#include <linux/reboot.h>
/*
* When the user hits Sys-Rq o to power down the machine this is the
@@ -17,8 +18,7 @@
static void do_poweroff(void *dummy)
{
- if (pm_power_off)
- pm_power_off();
+ kernel_power_off();
}
static DECLARE_WORK(poweroff_work, do_poweroff, NULL);
diff --git a/kernel/power/smp.c b/kernel/power/smp.c
index bbe2307..911fc62 100644
--- a/kernel/power/smp.c
+++ b/kernel/power/smp.c
@@ -38,7 +38,7 @@ void disable_nonboot_cpus(void)
}
printk("Error taking cpu %d down: %d\n", cpu, error);
}
- BUG_ON(smp_processor_id() != 0);
+ BUG_ON(raw_smp_processor_id() != 0);
if (error)
panic("cpus not sleeping");
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 4107db0..a646e4f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3486,7 +3486,7 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
p->policy = policy;
p->rt_priority = prio;
if (policy != SCHED_NORMAL)
- p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
+ p->prio = MAX_RT_PRIO-1 - p->rt_priority;
else
p->prio = p->static_prio;
}
@@ -3518,7 +3518,8 @@ recheck:
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
*/
if (param->sched_priority < 0 ||
- param->sched_priority > MAX_USER_RT_PRIO-1)
+ (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
+ (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
return -EINVAL;
if ((policy == SCHED_NORMAL) != (param->sched_priority == 0))
return -EINVAL;
@@ -3528,7 +3529,8 @@ recheck:
*/
if (!capable(CAP_SYS_NICE)) {
/* can't change policy */
- if (policy != p->policy)
+ if (policy != p->policy &&
+ !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
return -EPERM;
/* can't increase priority */
if (policy != SCHED_NORMAL &&
diff --git a/kernel/sys.c b/kernel/sys.c
index 9a24374..a7403903 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -361,6 +361,68 @@ out_unlock:
return retval;
}
+void emergency_restart(void)
+{
+ machine_emergency_restart();
+}
+EXPORT_SYMBOL_GPL(emergency_restart);
+
+void kernel_restart(char *cmd)
+{
+ notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
+ system_state = SYSTEM_RESTART;
+ device_suspend(PMSG_FREEZE);
+ device_shutdown();
+ if (!cmd) {
+ printk(KERN_EMERG "Restarting system.\n");
+ } else {
+ printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+ }
+ printk(".\n");
+ machine_restart(cmd);
+}
+EXPORT_SYMBOL_GPL(kernel_restart);
+
+void kernel_kexec(void)
+{
+#ifdef CONFIG_KEXEC
+ struct kimage *image;
+ image = xchg(&kexec_image, 0);
+ if (!image) {
+ return;
+ }
+ notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
+ system_state = SYSTEM_RESTART;
+ device_suspend(PMSG_FREEZE);
+ device_shutdown();
+ printk(KERN_EMERG "Starting new kernel\n");
+ machine_shutdown();
+ machine_kexec(image);
+#endif
+}
+EXPORT_SYMBOL_GPL(kernel_kexec);
+
+void kernel_halt(void)
+{
+ notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
+ system_state = SYSTEM_HALT;
+ device_suspend(PMSG_SUSPEND);
+ device_shutdown();
+ printk(KERN_EMERG "System halted.\n");
+ machine_halt();
+}
+EXPORT_SYMBOL_GPL(kernel_halt);
+
+void kernel_power_off(void)
+{
+ notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
+ system_state = SYSTEM_POWER_OFF;
+ device_suspend(PMSG_SUSPEND);
+ device_shutdown();
+ printk(KERN_EMERG "Power down.\n");
+ machine_power_off();
+}
+EXPORT_SYMBOL_GPL(kernel_power_off);
/*
* Reboot system call: for obvious reasons only root may call it,
@@ -389,11 +451,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
lock_kernel();
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- system_state = SYSTEM_RESTART;
- device_shutdown();
- printk(KERN_EMERG "Restarting system.\n");
- machine_restart(NULL);
+ kernel_restart(NULL);
break;
case LINUX_REBOOT_CMD_CAD_ON:
@@ -405,23 +463,13 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
break;
case LINUX_REBOOT_CMD_HALT:
- notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
- system_state = SYSTEM_HALT;
- device_suspend(PMSG_SUSPEND);
- device_shutdown();
- printk(KERN_EMERG "System halted.\n");
- machine_halt();
+ kernel_halt();
unlock_kernel();
do_exit(0);
break;
case LINUX_REBOOT_CMD_POWER_OFF:
- notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
- system_state = SYSTEM_POWER_OFF;
- device_suspend(PMSG_SUSPEND);
- device_shutdown();
- printk(KERN_EMERG "Power down.\n");
- machine_power_off();
+ kernel_power_off();
unlock_kernel();
do_exit(0);
break;
@@ -433,32 +481,14 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
}
buffer[sizeof(buffer) - 1] = '\0';
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer);
- system_state = SYSTEM_RESTART;
- device_suspend(PMSG_FREEZE);
- device_shutdown();
- printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer);
- machine_restart(buffer);
+ kernel_restart(buffer);
break;
-#ifdef CONFIG_KEXEC
case LINUX_REBOOT_CMD_KEXEC:
- {
- struct kimage *image;
- image = xchg(&kexec_image, 0);
- if (!image) {
- unlock_kernel();
- return -EINVAL;
- }
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- system_state = SYSTEM_RESTART;
- device_shutdown();
- printk(KERN_EMERG "Starting new kernel\n");
- machine_shutdown();
- machine_kexec(image);
- break;
- }
-#endif
+ kernel_kexec();
+ unlock_kernel();
+ return -EINVAL;
+
#ifdef CONFIG_SOFTWARE_SUSPEND
case LINUX_REBOOT_CMD_SW_SUSPEND:
{
@@ -478,8 +508,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
static void deferred_cad(void *dummy)
{
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- machine_restart(NULL);
+ kernel_restart(NULL);
}
/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e60b9c3..3e0bbee 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -114,6 +114,7 @@ extern int unaligned_enabled;
extern int sysctl_ieee_emulation_warnings;
#endif
extern int sysctl_userprocess_debug;
+extern int spin_retry;
#endif
extern int sysctl_hz_timer;
@@ -647,7 +648,16 @@ static ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
-
+#if defined(CONFIG_ARCH_S390)
+ {
+ .ctl_name = KERN_SPIN_RETRY,
+ .procname = "spin_retry",
+ .data = &spin_retry,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
{ .ctl_name = 0 }
};
diff --git a/kernel/time.c b/kernel/time.c
index d4335c1..dd5ae11 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -128,7 +128,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
* as real UNIX machines always do it. This avoids all headaches about
* daylight saving times and warping kernel clocks.
*/
-inline static void warp_clock(void)
+static inline void warp_clock(void)
{
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
OpenPOWER on IntegriCloud