diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 2 | ||||
-rw-r--r-- | kernel/exit.c | 10 | ||||
-rw-r--r-- | kernel/posix-timers.c | 8 | ||||
-rw-r--r-- | kernel/sched.c | 16 | ||||
-rw-r--r-- | kernel/signal.c | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 2 |
6 files changed, 21 insertions, 21 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 2a75e44..d4b6bd7 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1554,7 +1554,7 @@ struct ctr_struct { * when reading out p->cpuset, as we don't really care if it changes * on the next cycle, and we are not going to try to dereference it. */ -static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) +static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) { int n = 0; struct task_struct *g, *p; diff --git a/kernel/exit.c b/kernel/exit.c index 7fb541c..93cee36 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -193,7 +193,7 @@ int is_orphaned_pgrp(int pgrp) return retval; } -static inline int has_stopped_jobs(int pgrp) +static int has_stopped_jobs(int pgrp) { int retval = 0; struct task_struct *p; @@ -230,7 +230,7 @@ static inline int has_stopped_jobs(int pgrp) * * NOTE that reparent_to_init() gives the caller full capabilities. */ -static inline void reparent_to_init(void) +static void reparent_to_init(void) { write_lock_irq(&tasklist_lock); @@ -369,7 +369,7 @@ void daemonize(const char *name, ...) EXPORT_SYMBOL(daemonize); -static inline void close_files(struct files_struct * files) +static void close_files(struct files_struct * files) { int i, j; struct fdtable *fdt; @@ -543,7 +543,7 @@ static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_re p->real_parent = reaper; } -static inline void reparent_thread(task_t *p, task_t *father, int traced) +static void reparent_thread(task_t *p, task_t *father, int traced) { /* We don't want people slaying init. */ if (p->exit_signal != -1) @@ -607,7 +607,7 @@ static inline void reparent_thread(task_t *p, task_t *father, int traced) * group, and if no such member exists, give it to * the global child reaper process (ie "init") */ -static inline void forget_original_parent(struct task_struct * father, +static void forget_original_parent(struct task_struct * father, struct list_head *to_release) { struct task_struct *p, *reaper = father; diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 9e66e61..197208b 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -192,7 +192,7 @@ static inline int common_clock_set(const clockid_t which_clock, return do_sys_settimeofday(tp, NULL); } -static inline int common_timer_create(struct k_itimer *new_timer) +static int common_timer_create(struct k_itimer *new_timer) { hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock); new_timer->it.real.timer.data = new_timer; @@ -361,7 +361,7 @@ static int posix_timer_fn(void *data) return ret; } -static inline struct task_struct * good_sigevent(sigevent_t * event) +static struct task_struct * good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; @@ -687,7 +687,7 @@ sys_timer_getoverrun(timer_t timer_id) /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ -static inline int +static int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec *new_setting, struct itimerspec *old_setting) { @@ -829,7 +829,7 @@ retry_delete: /* * return timer owned by the process, used by exit_itimers */ -static inline void itimer_delete(struct k_itimer *timer) +static void itimer_delete(struct k_itimer *timer) { unsigned long flags; diff --git a/kernel/sched.c b/kernel/sched.c index e1dc903..788ecce 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -521,7 +521,7 @@ static inline void sched_info_dequeued(task_t *t) * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ -static inline void sched_info_arrive(task_t *t) +static void sched_info_arrive(task_t *t) { unsigned long now = jiffies, diff = 0; struct runqueue *rq = task_rq(t); @@ -1007,7 +1007,7 @@ void kick_process(task_t *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) +static unsigned long __source_load(int cpu, int type, enum idle_type idle) { runqueue_t *rq = cpu_rq(cpu); unsigned long running = rq->nr_running; @@ -1870,7 +1870,7 @@ void sched_exec(void) * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ -static inline +static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) { @@ -1892,7 +1892,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ -static inline +static int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) @@ -2378,7 +2378,7 @@ out_balanced: * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -static inline void idle_balance(int this_cpu, runqueue_t *this_rq) +static void idle_balance(int this_cpu, runqueue_t *this_rq) { struct sched_domain *sd; @@ -2762,7 +2762,7 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq) resched_task(rq->idle); } -static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) +static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) { struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; @@ -2816,7 +2816,7 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) return p->time_slice * (100 - sd->per_cpu_gain) / 100; } -static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) +static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) { struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; @@ -6008,7 +6008,7 @@ next_sg: * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ -static inline void detach_destroy_domains(const cpumask_t *cpu_map) +static void detach_destroy_domains(const cpumask_t *cpu_map) { int i; diff --git a/kernel/signal.c b/kernel/signal.c index 1da2e74..5dafbd3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -476,7 +476,7 @@ unblock_all_signals(void) spin_unlock_irqrestore(¤t->sighand->siglock, flags); } -static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) { struct sigqueue *q, *first = NULL; int still_pending = 0; @@ -1881,7 +1881,7 @@ do_signal_stop(int signr) * We return zero if we still hold the siglock and should look * for another signal without checking group_stop_count again. */ -static inline int handle_group_stop(void) +static int handle_group_stop(void) { int stop_count; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 82c4fa7..b052e2c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -147,7 +147,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, return ret; } -static inline void run_workqueue(struct cpu_workqueue_struct *cwq) +static void run_workqueue(struct cpu_workqueue_struct *cwq) { unsigned long flags; |