diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-07-19 21:28:35 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-19 21:28:35 +0200 |
commit | c41917df8a1adde34864116ce2231a7fe308d2ff (patch) | |
tree | f0dd28ffef93117bacfbf8657ede880071de8e5d | |
parent | ce8c2293be47999584908069e78bf6d94beadc53 (diff) | |
download | op-kernel-dev-c41917df8a1adde34864116ce2231a7fe308d2ff.zip op-kernel-dev-c41917df8a1adde34864116ce2231a7fe308d2ff.tar.gz |
[PATCH] sched: sched_cacheflush is now unused
Since Ingo's recent scheduler rewrite which was merged as commit
0437e109e1841607f2988891eaa36c531c6aa6ac sched_cacheflush is unused.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/ia64/kernel/setup.c | 9 | ||||
-rw-r--r-- | include/asm-alpha/system.h | 10 | ||||
-rw-r--r-- | include/asm-arm/system.h | 10 | ||||
-rw-r--r-- | include/asm-arm26/system.h | 10 | ||||
-rw-r--r-- | include/asm-i386/system.h | 9 | ||||
-rw-r--r-- | include/asm-ia64/system.h | 1 | ||||
-rw-r--r-- | include/asm-m32r/system.h | 10 | ||||
-rw-r--r-- | include/asm-mips/system.h | 10 | ||||
-rw-r--r-- | include/asm-parisc/system.h | 11 | ||||
-rw-r--r-- | include/asm-powerpc/system.h | 10 | ||||
-rw-r--r-- | include/asm-ppc/system.h | 10 | ||||
-rw-r--r-- | include/asm-s390/system.h | 10 | ||||
-rw-r--r-- | include/asm-sh/system.h | 10 | ||||
-rw-r--r-- | include/asm-sparc/system.h | 10 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 10 | ||||
-rw-r--r-- | include/asm-x86_64/system.h | 9 |
16 files changed, 0 insertions, 149 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4d9864c..cf06fe79 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -980,15 +980,6 @@ cpu_init (void) pm_idle = default_idle; } -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -void sched_cacheflush(void) -{ - ia64_sal_cache_flush(3); -} - void __init check_bugs (void) { diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index cf1021a..620c4d8 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn)); struct task_struct; extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #define imb() \ __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 6f8e6a6..94ea8c6 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -254,16 +254,6 @@ do { \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* * On the StrongARM, "swp" is terminally broken since it bypasses the diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h index 4703593..e09da5f 100644 --- a/include/asm-arm26/system.h +++ b/include/asm-arm26/system.h @@ -110,16 +110,6 @@ do { \ } while (0) /* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - -/* * Save the current interrupt enable state & disable IRQs */ #define local_irq_save(x) \ diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 94ed3686..609756c 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -310,15 +310,6 @@ void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible: - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 384fbf7..91bb8e0 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task); #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) void cpu_idle_wait(void); -void sched_cacheflush(void); #define arch_align_stack(x) (x) diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 8ee73d3..2365de5 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -54,16 +54,6 @@ ); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - /* Interrupt Control */ #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) #define local_irq_enable() \ diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 46bdb3f..7633916 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -71,16 +71,6 @@ do { \ write_c0_userlocal(task_thread_info(current)->tp_value);\ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) { __u32 retval; diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 21fbfc5..ee80c92 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h @@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * (last) = _switch_to(prev, next); \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - - /* interrupt control */ #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 32aa42b..41520b7 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -184,16 +184,6 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern unsigned long memory_limit; diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index d84a3cf..f1311a8 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index bbe137c..64a3cd0 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void account_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *); diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 7c75045..2450425 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev, last = __last; \ } while (0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - #ifdef CONFIG_CPU_SH4A #define __icbi() \ { \ diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 8b4e23b..d1a2572 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -165,16 +165,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, } while(0) /* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - -/* * Changing the IRQ level on the Sparc. */ extern void local_irq_restore(unsigned long); diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 8ba380e..4090674 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ } \ } while(0) -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index ead9f9a..e4f246d 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") |