summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h27
1 files changed, 21 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c62a9f8..115af05 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -190,6 +190,7 @@ extern unsigned long long time_sync_thresh;
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
+#define TASK_WAKING 256
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -256,7 +257,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
-extern int runqueue_is_locked(void);
+extern int runqueue_is_locked(int cpu);
extern void task_rq_unlock_wait(struct task_struct *p);
extern cpumask_var_t nohz_cpu_mask;
@@ -802,14 +803,14 @@ enum cpu_idle_type {
#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
-#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */
+#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */
+#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
-#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */
+
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
enum powersavings_balance_level {
@@ -991,6 +992,9 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
return 0;
}
+unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
+unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -1002,6 +1006,7 @@ partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
}
#endif /* !CONFIG_SMP */
+
struct io_context; /* See blkdev.h */
@@ -1019,6 +1024,12 @@ struct uts_namespace;
struct rq;
struct sched_domain;
+/*
+ * wake flags
+ */
+#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
+#define WF_FORK 0x02 /* child wakeup after fork */
+
struct sched_class {
const struct sched_class *next;
@@ -1026,13 +1037,13 @@ struct sched_class {
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
- void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
- int (*select_task_rq)(struct task_struct *p, int sync);
+ int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
@@ -1064,6 +1075,8 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);
+ unsigned int (*get_rr_interval) (struct task_struct *task);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*moved_group) (struct task_struct *p);
#endif
@@ -1102,6 +1115,8 @@ struct sched_entity {
u64 start_runtime;
u64 avg_wakeup;
+ u64 avg_running;
+
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
OpenPOWER on IntegriCloud