summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c552
1 files changed, 207 insertions, 345 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e27a536..ee61f45 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
@@ -119,8 +119,6 @@
*/
#define RUNTIME_INF ((u64)~0ULL)
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -295,12 +293,12 @@ struct task_group root_task_group;
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
@@ -378,13 +376,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#else
-#ifdef CONFIG_SMP
-static int root_task_group_empty(void)
-{
- return 1;
-}
-#endif
-
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
@@ -514,14 +505,6 @@ struct root_domain {
#ifdef CONFIG_SMP
struct cpupri cpupri;
#endif
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- /*
- * Preferred wake up cpu nominated by sched_mc balance that will be
- * used when most cpus are idle in the system indicating overall very
- * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
- */
- unsigned int sched_mc_preferred_wakeup_cpu;
-#endif
};
/*
@@ -646,9 +629,10 @@ struct rq {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
+static inline
+void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
- rq->curr->sched_class->check_preempt_curr(rq, p, sync);
+ rq->curr->sched_class->check_preempt_curr(rq, p, flags);
}
static inline int cpu_of(struct rq *rq)
@@ -697,15 +681,9 @@ inline void update_rq_clock(struct rq *rq)
* This interface allows printk to be called with the runqueue lock
* held and know whether or not it is OK to wake up the klogd.
*/
-int runqueue_is_locked(void)
+int runqueue_is_locked(int cpu)
{
- int cpu = get_cpu();
- struct rq *rq = cpu_rq(cpu);
- int ret;
-
- ret = spin_is_locked(&rq->lock);
- put_cpu();
- return ret;
+ return spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
@@ -1509,8 +1487,65 @@ static int tg_nop(struct task_group *tg, void *data)
#endif
#ifdef CONFIG_SMP
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+ return cpu_rq(cpu)->load.weight;
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static unsigned long source_load(int cpu, int type)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0 || !sched_feat(LB_BIAS))
+ return total;
+
+ return min(rq->cpu_load[type-1], total);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static unsigned long target_load(int cpu, int type)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0 || !sched_feat(LB_BIAS))
+ return total;
+
+ return max(rq->cpu_load[type-1], total);
+}
+
+static struct sched_group *group_of(int cpu)
+{
+ struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+ if (!sd)
+ return NULL;
+
+ return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+ struct sched_group *group = group_of(cpu);
+
+ if (!group)
+ return SCHED_LOAD_SCALE;
+
+ return group->cpu_power;
+}
+
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
@@ -1695,6 +1730,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
#ifdef CONFIG_PREEMPT
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
/*
* fair double_lock_balance: Safely acquires both rq->locks in a fair
* way at the expense of forcing extra atomic operations in all
@@ -1959,13 +1996,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
}
#ifdef CONFIG_SMP
-
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
- return cpu_rq(cpu)->load.weight;
-}
-
/*
* Is this task likely cache-hot:
*/
@@ -2023,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
#endif
- perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1, 1, NULL, 0);
}
p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2239,185 +2269,6 @@ void kick_process(struct task_struct *p)
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
-
-/*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return max(rq->cpu_load[type-1], total);
-}
-
-/*
- * find_idlest_group finds and returns the least busy CPU group within the
- * domain.
- */
-static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
-{
- struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
- unsigned long min_load = ULONG_MAX, this_load = 0;
- int load_idx = sd->forkexec_idx;
- int imbalance = 100 + (sd->imbalance_pct-100)/2;
-
- do {
- unsigned long load, avg_load;
- int local_group;
- int i;
-
- /* Skip over this group if it has no CPUs allowed */
- if (!cpumask_intersects(sched_group_cpus(group),
- &p->cpus_allowed))
- continue;
-
- local_group = cpumask_test_cpu(this_cpu,
- sched_group_cpus(group));
-
- /* Tally up the load of all CPUs in the group */
- avg_load = 0;
-
- for_each_cpu(i, sched_group_cpus(group)) {
- /* Bias balancing toward cpus of our domain */
- if (local_group)
- load = source_load(i, load_idx);
- else
- load = target_load(i, load_idx);
-
- avg_load += load;
- }
-
- /* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
-
- if (local_group) {
- this_load = avg_load;
- this = group;
- } else if (avg_load < min_load) {
- min_load = avg_load;
- idlest = group;
- }
- } while (group = group->next, group != sd->groups);
-
- if (!idlest || 100*this_load < imbalance*min_load)
- return NULL;
- return idlest;
-}
-
-/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
- */
-static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
-{
- unsigned long load, min_load = ULONG_MAX;
- int idlest = -1;
- int i;
-
- /* Traverse only the allowed CPUs */
- for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
- load = weighted_cpuload(i);
-
- if (load < min_load || (load == min_load && i == this_cpu)) {
- min_load = load;
- idlest = i;
- }
- }
-
- return idlest;
-}
-
-/*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
- *
- * Balance, ie. select the least loaded group.
- *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
- *
- * preempt must be disabled.
- */
-static int sched_balance_self(int cpu, int flag)
-{
- struct task_struct *t = current;
- struct sched_domain *tmp, *sd = NULL;
-
- for_each_domain(cpu, tmp) {
- /*
- * If power savings logic is enabled for a domain, stop there.
- */
- if (tmp->flags & SD_POWERSAVINGS_BALANCE)
- break;
- if (tmp->flags & flag)
- sd = tmp;
- }
-
- if (sd)
- update_shares(sd);
-
- while (sd) {
- struct sched_group *group;
- int new_cpu, weight;
-
- if (!(sd->flags & flag)) {
- sd = sd->child;
- continue;
- }
-
- group = find_idlest_group(sd, t, cpu);
- if (!group) {
- sd = sd->child;
- continue;
- }
-
- new_cpu = find_idlest_cpu(group, t, cpu);
- if (new_cpu == -1 || new_cpu == cpu) {
- /* Now try balancing at a lower domain level of cpu */
- sd = sd->child;
- continue;
- }
-
- /* Now try balancing at a lower domain level of new_cpu */
- cpu = new_cpu;
- weight = cpumask_weight(sched_domain_span(sd));
- sd = NULL;
- for_each_domain(cpu, tmp) {
- if (weight <= cpumask_weight(sched_domain_span(tmp)))
- break;
- if (tmp->flags & flag)
- sd = tmp;
- }
- /* while loop will break here if sd == NULL */
- }
-
- return cpu;
-}
-
#endif /* CONFIG_SMP */
/**
@@ -2455,37 +2306,22 @@ void task_oncpu_function_call(struct task_struct *p,
*
* returns failure only if the task is already active.
*/
-static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+static int try_to_wake_up(struct task_struct *p, unsigned int state,
+ int wake_flags)
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
- long old_state;
struct rq *rq;
if (!sched_feat(SYNC_WAKEUPS))
- sync = 0;
-
-#ifdef CONFIG_SMP
- if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
- struct sched_domain *sd;
+ wake_flags &= ~WF_SYNC;
- this_cpu = raw_smp_processor_id();
- cpu = task_cpu(p);
-
- for_each_domain(this_cpu, sd) {
- if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
- update_shares(sd);
- break;
- }
- }
- }
-#endif
+ this_cpu = get_cpu();
smp_wmb();
rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
- old_state = p->state;
- if (!(old_state & state))
+ if (!(p->state & state))
goto out;
if (p->se.on_rq)
@@ -2493,27 +2329,29 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
cpu = task_cpu(p);
orig_cpu = cpu;
- this_cpu = smp_processor_id();
#ifdef CONFIG_SMP
if (unlikely(task_running(rq, p)))
goto out_activate;
- cpu = p->sched_class->select_task_rq(p, sync);
- if (cpu != orig_cpu) {
+ /*
+ * In order to handle concurrent wakeups and release the rq->lock
+ * we put the task in TASK_WAKING state.
+ *
+ * First fix up the nr_uninterruptible count:
+ */
+ if (task_contributes_to_load(p))
+ rq->nr_uninterruptible--;
+ p->state = TASK_WAKING;
+ task_rq_unlock(rq, &flags);
+
+ cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+ if (cpu != orig_cpu)
set_task_cpu(p, cpu);
- task_rq_unlock(rq, &flags);
- /* might preempt at this point */
- rq = task_rq_lock(p, &flags);
- old_state = p->state;
- if (!(old_state & state))
- goto out;
- if (p->se.on_rq)
- goto out_running;
- this_cpu = smp_processor_id();
- cpu = task_cpu(p);
- }
+ rq = task_rq_lock(p, &flags);
+ WARN_ON(p->state != TASK_WAKING);
+ cpu = task_cpu(p);
#ifdef CONFIG_SCHEDSTATS
schedstat_inc(rq, ttwu_count);
@@ -2533,7 +2371,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
out_activate:
#endif /* CONFIG_SMP */
schedstat_inc(p, se.nr_wakeups);
- if (sync)
+ if (wake_flags & WF_SYNC)
schedstat_inc(p, se.nr_wakeups_sync);
if (orig_cpu != cpu)
schedstat_inc(p, se.nr_wakeups_migrate);
@@ -2562,7 +2400,7 @@ out_activate:
out_running:
trace_sched_wakeup(rq, p, success);
- check_preempt_curr(rq, p, sync);
+ check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
@@ -2571,6 +2409,7 @@ out_running:
#endif
out:
task_rq_unlock(rq, &flags);
+ put_cpu();
return success;
}
@@ -2613,6 +2452,7 @@ static void __sched_fork(struct task_struct *p)
p->se.avg_overlap = 0;
p->se.start_runtime = 0;
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
+ p->se.avg_running = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
@@ -2674,11 +2514,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
__sched_fork(p);
-#ifdef CONFIG_SMP
- cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
- set_task_cpu(p, cpu);
-
/*
* Make sure we do not leak PI boosting priority to the child.
*/
@@ -2709,6 +2544,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
+#ifdef CONFIG_SMP
+ cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+#endif
+ set_task_cpu(p, cpu);
+
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -2754,7 +2594,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
inc_nr_running(rq);
}
trace_sched_wakeup_new(rq, p, 1);
- check_preempt_curr(rq, p, 0);
+ check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
p->sched_class->task_wake_up(rq, p);
@@ -2878,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
- perf_counter_task_sched_in(current, cpu_of(rq));
+ perf_event_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
@@ -3064,6 +2904,19 @@ unsigned long nr_iowait(void)
return sum;
}
+unsigned long nr_iowait_cpu(void)
+{
+ struct rq *this = this_rq();
+ return atomic_read(&this->nr_iowait);
+}
+
+unsigned long this_cpu_load(void)
+{
+ struct rq *this = this_rq();
+ return this->cpu_load[0];
+}
+
+
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
@@ -3263,7 +3116,7 @@ out:
void sched_exec(void)
{
int new_cpu, this_cpu = get_cpu();
- new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
+ new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
put_cpu();
if (new_cpu != this_cpu)
sched_migrate_task(current, new_cpu);
@@ -3683,11 +3536,6 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
*imbalance = sds->min_load_per_task;
sds->busiest = sds->group_min;
- if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
- cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(sds->group_leader);
- }
-
return 1;
}
@@ -3711,7 +3559,18 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
-unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+
+unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return SCHED_LOAD_SCALE;
+}
+
+unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return default_scale_freq_power(sd, cpu);
+}
+
+unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
{
unsigned long weight = cpumask_weight(sched_domain_span(sd));
unsigned long smt_gain = sd->smt_gain;
@@ -3721,6 +3580,11 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
return smt_gain;
}
+unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+{
+ return default_scale_smt_power(sd, cpu);
+}
+
unsigned long scale_rt_power(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -3745,10 +3609,19 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
unsigned long power = SCHED_LOAD_SCALE;
struct sched_group *sdg = sd->groups;
- /* here we could scale based on cpufreq */
+ if (sched_feat(ARCH_POWER))
+ power *= arch_scale_freq_power(sd, cpu);
+ else
+ power *= default_scale_freq_power(sd, cpu);
+
+ power >>= SCHED_LOAD_SHIFT;
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
- power *= arch_scale_smt_power(sd, cpu);
+ if (sched_feat(ARCH_POWER))
+ power *= arch_scale_smt_power(sd, cpu);
+ else
+ power *= default_scale_smt_power(sd, cpu);
+
power >>= SCHED_LOAD_SHIFT;
}
@@ -4161,26 +4034,6 @@ ret:
return NULL;
}
-static struct sched_group *group_of(int cpu)
-{
- struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
-
- if (!sd)
- return NULL;
-
- return sd->groups;
-}
-
-static unsigned long power_of(int cpu)
-{
- struct sched_group *group = group_of(cpu);
-
- if (!group)
- return SCHED_LOAD_SCALE;
-
- return group->cpu_power;
-}
-
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
@@ -5239,17 +5092,16 @@ void account_idle_time(cputime_t cputime)
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t one_jiffy = jiffies_to_cputime(1);
- cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
if (user_tick)
- account_user_time(p, one_jiffy, one_jiffy_scaled);
+ account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
+ account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled);
else
- account_idle_time(one_jiffy);
+ account_idle_time(cputime_one_jiffy);
}
/*
@@ -5353,7 +5205,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
spin_unlock(&rq->lock);
- perf_counter_task_tick(curr, cpu);
+ perf_event_task_tick(curr, cpu);
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
@@ -5465,14 +5317,13 @@ static inline void schedule_debug(struct task_struct *prev)
#endif
}
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
+static void put_prev_task(struct rq *rq, struct task_struct *p)
{
- if (prev->state == TASK_RUNNING) {
- u64 runtime = prev->se.sum_exec_runtime;
+ u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
- runtime -= prev->se.prev_sum_exec_runtime;
- runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+ update_avg(&p->se.avg_running, runtime);
+ if (p->state == TASK_RUNNING) {
/*
* In order to avoid avg_overlap growing stale when we are
* indeed overlapping and hence not getting put to sleep, grow
@@ -5482,9 +5333,12 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
* correlates to the amount of cache footprint a task can
* build up.
*/
- update_avg(&prev->se.avg_overlap, runtime);
+ runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+ update_avg(&p->se.avg_overlap, runtime);
+ } else {
+ update_avg(&p->se.avg_running, 0);
}
- prev->sched_class->put_prev_task(rq, prev);
+ p->sched_class->put_prev_task(rq, p);
}
/*
@@ -5567,7 +5421,7 @@ need_resched_nonpreemptible:
if (likely(prev != next)) {
sched_info_switch(prev, next);
- perf_counter_task_sched_out(prev, next, cpu);
+ perf_event_task_sched_out(prev, next, cpu);
rq->nr_switches++;
rq->curr = next;
@@ -5716,10 +5570,10 @@ asmlinkage void __sched preempt_schedule_irq(void)
#endif /* CONFIG_PREEMPT */
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
- return try_to_wake_up(curr->private, mode, sync);
+ return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
@@ -5733,14 +5587,14 @@ EXPORT_SYMBOL(default_wake_function);
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, int sync, void *key)
+ int nr_exclusive, int wake_flags, void *key)
{
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
- if (curr->func(curr, mode, sync, key) &&
+ if (curr->func(curr, mode, wake_flags, key) &&
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
@@ -5801,16 +5655,16 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
- int sync = 1;
+ int wake_flags = WF_SYNC;
if (unlikely(!q))
return;
if (unlikely(!nr_exclusive))
- sync = 0;
+ wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, sync, key);
+ __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
@@ -6977,23 +6831,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
if (retval)
goto out_unlock;
- /*
- * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
- * tasks that are on an otherwise idle runqueue:
- */
- time_slice = 0;
- if (p->policy == SCHED_RR) {
- time_slice = DEF_TIMESLICE;
- } else if (p->policy != SCHED_FIFO) {
- struct sched_entity *se = &p->se;
- unsigned long flags;
- struct rq *rq;
+ time_slice = p->sched_class->get_rr_interval(p);
- rq = task_rq_lock(p, &flags);
- if (rq->cfs.load.weight)
- time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
- task_rq_unlock(rq, &flags);
- }
read_unlock(&tasklist_lock);
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -7844,7 +7683,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
- * the notifier in the perf_counter subsystem, though.
+ * the notifier in the perf_event subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
@@ -8000,9 +7839,7 @@ static int sd_degenerate(struct sched_domain *sd)
}
/* Following flags don't use groups */
- if (sd->flags & (SD_WAKE_IDLE |
- SD_WAKE_AFFINE |
- SD_WAKE_BALANCE))
+ if (sd->flags & (SD_WAKE_AFFINE))
return 0;
return 1;
@@ -8019,10 +7856,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0;
- /* Does parent contain flags not in child? */
- /* WAKE_BALANCE is a subset of WAKE_AFFINE */
- if (cflags & SD_WAKE_AFFINE)
- pflags &= ~SD_WAKE_BALANCE;
/* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next) {
pflags &= ~(SD_LOAD_BALANCE |
@@ -8708,10 +8541,10 @@ static void set_domain_attribute(struct sched_domain *sd,
request = attr->relax_domain_level;
if (request < sd->level) {
/* turn off idle balance on this domain */
- sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
+ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
} else {
/* turn on idle balance on this domain */
- sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
+ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
@@ -9329,6 +9162,7 @@ void __init sched_init_smp(void)
cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
+ alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
#if defined(CONFIG_NUMA)
sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -9360,7 +9194,6 @@ void __init sched_init_smp(void)
sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
- alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
init_sched_rt_class();
}
#else
@@ -9707,7 +9540,7 @@ void __init sched_init(void)
alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
- perf_counter_init();
+ perf_event_init();
scheduler_running = 1;
}
@@ -10479,7 +10312,7 @@ static int sched_rt_global_constraints(void)
#endif /* CONFIG_RT_GROUP_SCHED */
int sched_rt_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
@@ -10490,7 +10323,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
- ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_constraints();
@@ -10544,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
}
static int
-cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *tsk)
+cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10555,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
if (tsk->sched_class != &fair_sched_class)
return -EINVAL;
#endif
+ return 0;
+}
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct task_struct *tsk, bool threadgroup)
+{
+ int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
+ if (retval)
+ return retval;
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+ retval = cpu_cgroup_can_attach_task(cgrp, c);
+ if (retval) {
+ rcu_read_unlock();
+ return retval;
+ }
+ }
+ rcu_read_unlock();
+ }
return 0;
}
static void
cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup *old_cont, struct task_struct *tsk)
+ struct cgroup *old_cont, struct task_struct *tsk,
+ bool threadgroup)
{
sched_move_task(tsk);
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+ sched_move_task(c);
+ }
+ rcu_read_unlock();
+ }
}
#ifdef CONFIG_FAIR_GROUP_SCHED
OpenPOWER on IntegriCloud