diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2013-04-19 15:10:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-05-07 13:14:51 +0200 |
commit | 8527632dc95472adb571701e852479531c0567a2 (patch) | |
tree | 658d9237beda390534cae46558b9c3e1adf4bf23 /kernel/sched | |
parent | 45ceebf77653975815d82fcf7cec0a164215ae11 (diff) | |
download | op-kernel-dev-8527632dc95472adb571701e852479531c0567a2.zip op-kernel-dev-8527632dc95472adb571701e852479531c0567a2.tar.gz |
sched: Move update_load_*() methods from sched.h to fair.c
These inlines are only used by kernel/sched/fair.c so they do
not need to be present in the main kernel/sched/sched.h file.
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/1366398650-31599-3-git-send-email-paul.gortmaker@windriver.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 18 | ||||
-rw-r--r-- | kernel/sched/sched.h | 18 |
2 files changed, 18 insertions, 18 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c61a614..08a554d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -113,6 +113,24 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif +static inline void update_load_add(struct load_weight *lw, unsigned long inc) +{ + lw->weight += inc; + lw->inv_weight = 0; +} + +static inline void update_load_sub(struct load_weight *lw, unsigned long dec) +{ + lw->weight -= dec; + lw->inv_weight = 0; +} + +static inline void update_load_set(struct load_weight *lw, unsigned long w) +{ + lw->weight = w; + lw->inv_weight = 0; +} + /* * Increase the granularity value when there are more CPUs, * because with more CPUs the 'effective latency' as visible diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a38ee0a..f1f6256 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -892,24 +892,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */ -static inline void update_load_add(struct load_weight *lw, unsigned long inc) -{ - lw->weight += inc; - lw->inv_weight = 0; -} - -static inline void update_load_sub(struct load_weight *lw, unsigned long dec) -{ - lw->weight -= dec; - lw->inv_weight = 0; -} - -static inline void update_load_set(struct load_weight *lw, unsigned long w) -{ - lw->weight = w; - lw->inv_weight = 0; -} - /* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that |