summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h59
1 files changed, 30 insertions, 29 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c5d3f84..f6cd60f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
-extern unsigned long weighted_cpuload(const int cpu);
struct seq_file;
struct cfs_rq;
@@ -784,6 +783,8 @@ struct sched_domain {
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
+ u64 last_update;
+
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -823,23 +824,6 @@ extern int arch_reinit_sched_domains(void);
#endif /* CONFIG_SMP */
-/*
- * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
- * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
- * task of nice 0 or enough lower priority tasks to bring up the
- * weighted_cpuload
- */
-static inline int above_background_load(void)
-{
- unsigned long cpu;
-
- for_each_online_cpu(cpu) {
- if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
- return 1;
- }
- return 0;
-}
-
struct io_context; /* See blkdev.h */
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
@@ -921,8 +905,8 @@ struct sched_class {
void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);
- void (*join_domain)(struct rq *rq);
- void (*leave_domain)(struct rq *rq);
+ void (*rq_online)(struct rq *rq);
+ void (*rq_offline)(struct rq *rq);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1039,6 +1023,7 @@ struct task_struct {
#endif
int prio, static_prio, normal_prio;
+ unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
@@ -1122,7 +1107,6 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- unsigned int rt_priority;
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
cputime_t prev_utime, prev_stime;
@@ -1141,12 +1125,12 @@ struct task_struct {
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- unsigned securebits;
struct user_struct *user;
+ unsigned securebits;
#ifdef CONFIG_KEYS
+ unsigned char jit_keyring; /* default keyring to attach requested keys to */
struct key *request_key_auth; /* assumed request_key authority */
struct key *thread_keyring; /* keyring private to this thread */
- unsigned char jit_keyring; /* default keyring to attach requested keys to */
#endif
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -1233,8 +1217,8 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
unsigned int lockdep_recursion;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
/* journalling filesystem info */
@@ -1262,10 +1246,6 @@ struct task_struct {
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_stimexpd;/* stime since last update */
#endif
-#ifdef CONFIG_NUMA
- struct mempolicy *mempolicy;
- short il_next;
-#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed;
int cpuset_mems_generation;
@@ -1285,6 +1265,10 @@ struct task_struct {
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *mempolicy;
+ short il_next;
+#endif
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
@@ -1504,6 +1488,7 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
+#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
@@ -1573,13 +1558,28 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
-#else
+
+#ifdef CONFIG_NO_HZ
+static inline void sched_clock_tick_stop(int cpu)
+{
+}
+
+static inline void sched_clock_tick_start(int cpu)
+{
+}
+#endif
+
+#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+#ifdef CONFIG_NO_HZ
+extern void sched_clock_tick_stop(int cpu);
+extern void sched_clock_tick_start(int cpu);
#endif
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -1622,6 +1622,7 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_shares_ratelimit;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
OpenPOWER on IntegriCloud