summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-03-27 15:03:03 +1100
committerPaul Mackerras <paulus@samba.org>2006-03-27 15:03:03 +1100
commita0652fc9a28c3ef8cd59264bfcb089c44d1b0e06 (patch)
treea28527b65237b3067553a993f5ad06dfb24df044 /arch
parent55aab8cd3a498201b769a19de861c77516bdfd45 (diff)
downloadop-kernel-dev-a0652fc9a28c3ef8cd59264bfcb089c44d1b0e06.zip
op-kernel-dev-a0652fc9a28c3ef8cd59264bfcb089c44d1b0e06.tar.gz
powerpc: Unify the 32 and 64 bit idle loops
This unifies the 32-bit (ARCH=ppc and ARCH=powerpc) and 64-bit idle loops. It brings over the concept of having a ppc_md.power_save function from 32-bit to ARCH=powerpc, which lets us get rid of native_idle(). With this we will also be able to simplify the idle handling for pSeries and cell. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/Makefile6
-rw-r--r--arch/powerpc/kernel/entry_32.S8
-rw-r--r--arch/powerpc/kernel/idle.c (renamed from arch/powerpc/kernel/idle_64.c)79
-rw-r--r--arch/powerpc/kernel/idle_6xx.S15
-rw-r--r--arch/powerpc/kernel/idle_power4.S15
-rw-r--r--arch/powerpc/kernel/setup_32.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c2
-rw-r--r--arch/ppc/Makefile2
-rw-r--r--arch/ppc/kernel/Makefile4
-rw-r--r--arch/ppc/kernel/entry.S8
-rw-r--r--arch/ppc/kernel/idle.c112
-rw-r--r--arch/ppc/kernel/idle_6xx.S233
-rw-r--r--arch/ppc/kernel/idle_power4.S91
15 files changed, 61 insertions, 529 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 80e9fe2..f2c47e90 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,12 +12,12 @@ endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
- init_task.o process.o systbl.o
+ init_task.o process.o systbl.o idle.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o cpu_setup_power4.o \
- firmware.o sysfs.o idle_64.o
+ firmware.o sysfs.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_POWER4) += idle_power4.o
@@ -34,6 +34,7 @@ obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_6xx) += idle_6xx.o
ifeq ($(CONFIG_PPC_MERGE),y)
@@ -51,7 +52,6 @@ obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
-obj-$(CONFIG_6xx) += idle_6xx.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4827ca1..b3a9794 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -135,10 +135,10 @@ transfer_to_handler:
mfspr r11,SPRN_HID0
mtcr r11
BEGIN_FTR_SECTION
- bt- 8,power_save_6xx_restore /* Check DOZE */
+ bt- 8,4f /* Check DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
- bt- 9,power_save_6xx_restore /* Check NAP */
+ bt- 9,4f /* Check NAP */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
#endif /* CONFIG_6xx */
.globl transfer_to_handler_cont
@@ -157,6 +157,10 @@ transfer_to_handler_cont:
SYNC
RFI /* jump to handler, enable MMU */
+#ifdef CONFIG_6xx
+4: b power_save_6xx_restore
+#endif
+
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
diff --git a/arch/powerpc/kernel/idle_64.c b/arch/powerpc/kernel/idle.c
index b879d30..e9f321d 100644
--- a/arch/powerpc/kernel/idle_64.c
+++ b/arch/powerpc/kernel/idle.c
@@ -2,13 +2,17 @@
* Idle daemon for PowerPC. Idle daemon will handle any action
* that needs to be taken when the system becomes idle.
*
- * Originally Written by Cort Dougan (cort@cs.nmt.edu)
+ * Originally written by Cort Dougan (cort@cs.nmt.edu).
+ * Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
+ * Paul Mackerras and others.
*
* iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
*
* Additional shared processor, SMT, and firmware support
* Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
+ * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -29,18 +33,43 @@
#include <asm/machdep.h>
#include <asm/smp.h>
-extern void power4_idle(void);
+#ifdef CONFIG_HOTPLUG_CPU
+#define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \
+ system_state == SYSTEM_RUNNING)
+#else
+#define cpu_should_die() 0
+#endif
-void default_idle(void)
+/*
+ * The body of the idle task.
+ */
+void cpu_idle(void)
{
- unsigned int cpu = smp_processor_id();
- set_thread_flag(TIF_POLLING_NRFLAG);
+ if (ppc_md.idle_loop)
+ ppc_md.idle_loop(); /* doesn't return */
+ set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- if (!need_resched()) {
- while (!need_resched() && !cpu_is_offline(cpu)) {
- ppc64_runlatch_off();
+ ppc64_runlatch_off();
+ while (!need_resched() && !cpu_should_die()) {
+ if (ppc_md.power_save) {
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ /*
+ * smp_mb is so clearing of TIF_POLLING_NRFLAG
+ * is ordered w.r.t. need_resched() test.
+ */
+ smp_mb();
+ local_irq_disable();
+
+ /* check again after disabling irqs */
+ if (!need_resched() && !cpu_should_die())
+ ppc_md.power_save();
+
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ } else {
/*
* Go into low thread priority and possibly
* low power mode.
@@ -48,46 +77,18 @@ void default_idle(void)
HMT_low();
HMT_very_low();
}
-
- HMT_medium();
}
+ HMT_medium();
ppc64_runlatch_on();
+ if (cpu_should_die())
+ cpu_die();
preempt_enable_no_resched();
schedule();
preempt_disable();
- if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
- cpu_die();
}
}
-void native_idle(void)
-{
- while (1) {
- ppc64_runlatch_off();
-
- if (!need_resched())
- power4_idle();
-
- if (need_resched()) {
- ppc64_runlatch_on();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-
- if (cpu_is_offline(smp_processor_id()) &&
- system_state == SYSTEM_RUNNING)
- cpu_die();
- }
-}
-
-void cpu_idle(void)
-{
- BUG_ON(NULL == ppc_md.idle_loop);
- ppc_md.idle_loop();
-}
-
int powersave_nap;
#ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 444fdcc..1647ea3 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -87,19 +87,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
cmpwi 0,r3,0
beqlr
- /* Clear MSR:EE */
- mfmsr r7
- rlwinm r0,r7,0,17,15
- mtmsr r0
-
- /* Check current_thread_info()->flags */
- rlwinm r4,r1,0,0,18
- lwz r4,TI_FLAGS(r4)
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- mtmsr r7 /* out of line this ? */
- blr
-1:
/* Some pre-nap cleanups needed on some CPUs */
andis. r0,r3,HID0_NAP@h
beq 2f
@@ -220,8 +207,6 @@ _GLOBAL(nap_save_msscr0)
_GLOBAL(nap_save_hid1)
.space 4*NR_CPUS
-_GLOBAL(powersave_nap)
- .long 0
_GLOBAL(powersave_lowspeed)
.long 0
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index c16b4af..692cf2e 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -49,21 +49,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
cmpwi 0,r4,0
beqlr
- /* Clear MSR:EE */
- mfmsr r7
- li r4,0
- ori r4,r4,MSR_EE
- andc r0,r7,r4
- mtmsrd r0
-
- /* Check current_thread_info()->flags */
- clrrdi r4,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r4)
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- mtmsrd r7 /* out of line this ? */
- blr
-1:
/* Go to NAP now */
BEGIN_FTR_SECTION
DSSALL
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 676f894..e39f830 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -53,9 +53,6 @@
extern void platform_init(void);
extern void bootx_init(unsigned long r4, unsigned long phys);
-extern void ppc6xx_idle(void);
-extern void power4_idle(void);
-
boot_infos_t *boot_infos;
struct ide_machdep_calls ppc_ide_md;
@@ -194,7 +191,9 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
platform_init();
#ifdef CONFIG_6xx
- ppc_md.power_save = ppc6xx_idle;
+ if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
+ cpu_has_feature(CPU_FTR_CAN_NAP))
+ ppc_md.power_save = ppc6xx_idle;
#endif
if (ppc_md.progress)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6c9b093..5b63a86 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -607,12 +607,6 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
- /* Use the default idle loop if the platform hasn't provided one. */
- if (NULL == ppc_md.idle_loop) {
- ppc_md.idle_loop = default_idle;
- printk(KERN_INFO "Using default idle loop\n");
- }
-
paging_init();
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index ec5c1e1..137d606 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -290,7 +290,7 @@ struct machdep_calls __initdata maple_md = {
.get_rtc_time = maple_get_rtc_time,
.calibrate_decr = generic_calibrate_decr,
.progress = maple_progress,
- .idle_loop = native_idle,
+ .power_save = power4_idle,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 385aab9..c2696d0 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -733,7 +733,7 @@ struct machdep_calls __initdata pmac_md = {
.progress = udbg_progress,
#ifdef CONFIG_PPC64
.pci_probe_mode = pmac_pci_probe_mode,
- .idle_loop = native_idle,
+ .power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 9fbdf54..cde5fa8 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -59,8 +59,6 @@ head-$(CONFIG_4xx) := arch/ppc/kernel/head_4xx.o
head-$(CONFIG_44x) := arch/ppc/kernel/head_44x.o
head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
-head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
-head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index e399bbb..1b2c745 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -8,10 +8,9 @@ extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
-extra-$(CONFIG_6xx) += idle_6xx.o
extra-y += vmlinux.lds
-obj-y := entry.o traps.o idle.o time.o misc.o \
+obj-y := entry.o traps.o time.o misc.o \
setup.o \
ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
@@ -35,7 +34,6 @@ endif
# These are here while we do the architecture merge
else
-obj-y := idle.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 3a28159..fa8d497 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -135,10 +135,10 @@ transfer_to_handler:
mfspr r11,SPRN_HID0
mtcr r11
BEGIN_FTR_SECTION
- bt- 8,power_save_6xx_restore /* Check DOZE */
+ bt- 8,4f /* Check DOZE */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
BEGIN_FTR_SECTION
- bt- 9,power_save_6xx_restore /* Check NAP */
+ bt- 9,4f /* Check NAP */
END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
#endif /* CONFIG_6xx */
.globl transfer_to_handler_cont
@@ -157,6 +157,10 @@ transfer_to_handler_cont:
SYNC
RFI /* jump to handler, enable MMU */
+#ifdef CONFIG_6xx
+4: b power_save_6xx_restore
+#endif
+
/*
* On kernel stack overflow, load up an initial stack pointer
* and call StackOverflow(regs), which should not return.
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
deleted file mode 100644
index 1be3ca5..0000000
--- a/arch/ppc/kernel/idle.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Idle daemon for PowerPC. Idle daemon will handle any action
- * that needs to be taken when the system becomes idle.
- *
- * Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked
- * on by Tom Rini, Armin Kuster, Paul Mackerras and others.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/sysctl.h>
-#include <linux/cpu.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/mmu.h>
-#include <asm/cache.h>
-#include <asm/cputable.h>
-#include <asm/machdep.h>
-#include <asm/smp.h>
-
-void default_idle(void)
-{
- void (*powersave)(void);
-
- powersave = ppc_md.power_save;
-
- if (!need_resched()) {
- if (powersave != NULL)
- powersave();
-#ifdef CONFIG_SMP
- else {
- set_thread_flag(TIF_POLLING_NRFLAG);
- while (!need_resched() &&
- !cpu_is_offline(smp_processor_id()))
- barrier();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- }
-#endif
- }
-}
-
-/*
- * The body of the idle task.
- */
-void cpu_idle(void)
-{
- int cpu = smp_processor_id();
-
- for (;;) {
- while (!need_resched()) {
- if (ppc_md.idle != NULL)
- ppc_md.idle();
- else
- default_idle();
- }
-
- if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
- cpu_die();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
-
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
-/*
- * Register the sysctl to set/clear powersave_nap.
- */
-extern int powersave_nap;
-
-static ctl_table powersave_nap_ctl_table[]={
- {
- .ctl_name = KERN_PPC_POWERSAVE_NAP,
- .procname = "powersave-nap",
- .data = &powersave_nap,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- { 0, },
-};
-static ctl_table powersave_nap_sysctl_root[] = {
- { 1, "kernel", NULL, 0, 0755, powersave_nap_ctl_table, },
- { 0,},
-};
-
-static int __init
-register_powersave_nap_sysctl(void)
-{
- register_sysctl_table(powersave_nap_sysctl_root, 0);
-
- return 0;
-}
-
-__initcall(register_powersave_nap_sysctl);
-#endif
diff --git a/arch/ppc/kernel/idle_6xx.S b/arch/ppc/kernel/idle_6xx.S
deleted file mode 100644
index 1a2194c..0000000
--- a/arch/ppc/kernel/idle_6xx.S
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * This file contains the power_save function for 6xx & 7xxx CPUs
- * rewritten in assembler
- *
- * Warning ! This code assumes that if your machine has a 750fx
- * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
- * if this is not the case some additional changes will have to
- * be done to check a runtime var (a bit like powersave-nap)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-#undef DEBUG
-
- .text
-
-/*
- * Init idle, called at early CPU setup time from head.S for each CPU
- * Make sure no rest of NAP mode remains in HID0, save default
- * values for some CPU specific registers. Called with r24
- * containing CPU number and r3 reloc offset
- */
-_GLOBAL(init_idle_6xx)
-BEGIN_FTR_SECTION
- mfspr r4,SPRN_HID0
- rlwinm r4,r4,0,10,8 /* Clear NAP */
- mtspr SPRN_HID0, r4
- b 1f
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
- blr
-1:
- slwi r5,r24,2
- add r5,r5,r3
-BEGIN_FTR_SECTION
- mfspr r4,SPRN_MSSCR0
- addis r6,r5, nap_save_msscr0@ha
- stw r4,nap_save_msscr0@l(r6)
-END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-BEGIN_FTR_SECTION
- mfspr r4,SPRN_HID1
- addis r6,r5,nap_save_hid1@ha
- stw r4,nap_save_hid1@l(r6)
-END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
- blr
-
-/*
- * Here is the power_save_6xx function. This could eventually be
- * split into several functions & changing the function pointer
- * depending on the various features.
- */
-_GLOBAL(ppc6xx_idle)
- /* Check if we can nap or doze, put HID0 mask in r3
- */
- lis r3, 0
-BEGIN_FTR_SECTION
- lis r3,HID0_DOZE@h
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
- /* We must dynamically check for the NAP feature as it
- * can be cleared by CPU init after the fixups are done
- */
- lis r4,cur_cpu_spec@ha
- lwz r4,cur_cpu_spec@l(r4)
- lwz r4,CPU_SPEC_FEATURES(r4)
- andi. r0,r4,CPU_FTR_CAN_NAP
- beq 1f
- /* Now check if user or arch enabled NAP mode */
- lis r4,powersave_nap@ha
- lwz r4,powersave_nap@l(r4)
- cmpwi 0,r4,0
- beq 1f
- lis r3,HID0_NAP@h
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
- cmpwi 0,r3,0
- beqlr
-
- /* Clear MSR:EE */
- mfmsr r7
- rlwinm r0,r7,0,17,15
- mtmsr r0
-
- /* Check current_thread_info()->flags */
- rlwinm r4,r1,0,0,18
- lwz r4,TI_FLAGS(r4)
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- mtmsr r7 /* out of line this ? */
- blr
-1:
- /* Some pre-nap cleanups needed on some CPUs */
- andis. r0,r3,HID0_NAP@h
- beq 2f
-BEGIN_FTR_SECTION
- /* Disable L2 prefetch on some 745x and try to ensure
- * L2 prefetch engines are idle. As explained by errata
- * text, we can't be sure they are, we just hope very hard
- * that well be enough (sic !). At least I noticed Apple
- * doesn't even bother doing the dcbf's here...
- */
- mfspr r4,SPRN_MSSCR0
- rlwinm r4,r4,0,0,29
- sync
- mtspr SPRN_MSSCR0,r4
- sync
- isync
- lis r4,KERNELBASE@h
- dcbf 0,r4
- dcbf 0,r4
- dcbf 0,r4
- dcbf 0,r4
-END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-#ifdef DEBUG
- lis r6,nap_enter_count@ha
- lwz r4,nap_enter_count@l(r6)
- addi r4,r4,1
- stw r4,nap_enter_count@l(r6)
-#endif
-2:
-BEGIN_FTR_SECTION
- /* Go to low speed mode on some 750FX */
- lis r4,powersave_lowspeed@ha
- lwz r4,powersave_lowspeed@l(r4)
- cmpwi 0,r4,0
- beq 1f
- mfspr r4,SPRN_HID1
- oris r4,r4,0x0001
- mtspr SPRN_HID1,r4
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
-
- /* Go to NAP or DOZE now */
- mfspr r4,SPRN_HID0
- lis r5,(HID0_NAP|HID0_SLEEP)@h
-BEGIN_FTR_SECTION
- oris r5,r5,HID0_DOZE@h
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
- andc r4,r4,r5
- or r4,r4,r3
-BEGIN_FTR_SECTION
- oris r4,r4,HID0_DPM@h /* that should be done once for all */
-END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
- mtspr SPRN_HID0,r4
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- ori r7,r7,MSR_EE /* Could be ommited (already set) */
- oris r7,r7,MSR_POW@h
- sync
- isync
- mtmsr r7
- isync
- sync
- blr
-
-/*
- * Return from NAP/DOZE mode, restore some CPU specific registers,
- * we are called with DR/IR still off and r2 containing physical
- * address of current.
- */
-_GLOBAL(power_save_6xx_restore)
- mfspr r11,SPRN_HID0
- rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
- cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
-BEGIN_FTR_SECTION
- rlwinm r11,r11,0,9,7 /* Clear DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
- mtspr SPRN_HID0, r11
-
-#ifdef DEBUG
- beq cr1,1f
- lis r11,(nap_return_count-KERNELBASE)@ha
- lwz r9,nap_return_count@l(r11)
- addi r9,r9,1
- stw r9,nap_return_count@l(r11)
-1:
-#endif
-
- rlwinm r9,r1,0,0,18
- tophys(r9,r9)
- lwz r11,TI_CPU(r9)
- slwi r11,r11,2
- /* Todo make sure all these are in the same page
- * and load r22 (@ha part + CPU offset) only once
- */
-BEGIN_FTR_SECTION
- beq cr1,1f
- addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
- lwz r9,nap_save_msscr0@l(r9)
- mtspr SPRN_MSSCR0, r9
- sync
- isync
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
-BEGIN_FTR_SECTION
- addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
- lwz r9,nap_save_hid1@l(r9)
- mtspr SPRN_HID1, r9
-END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
- b transfer_to_handler_cont
-
- .data
-
-_GLOBAL(nap_save_msscr0)
- .space 4*NR_CPUS
-
-_GLOBAL(nap_save_hid1)
- .space 4*NR_CPUS
-
-_GLOBAL(powersave_nap)
- .long 0
-_GLOBAL(powersave_lowspeed)
- .long 0
-
-#ifdef DEBUG
-_GLOBAL(nap_enter_count)
- .space 4
-_GLOBAL(nap_return_count)
- .space 4
-#endif
diff --git a/arch/ppc/kernel/idle_power4.S b/arch/ppc/kernel/idle_power4.S
deleted file mode 100644
index cc0d535..0000000
--- a/arch/ppc/kernel/idle_power4.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * This file contains the power_save function for 6xx & 7xxx CPUs
- * rewritten in assembler
- *
- * Warning ! This code assumes that if your machine has a 750fx
- * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
- * if this is not the case some additional changes will have to
- * be done to check a runtime var (a bit like powersave-nap)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-#undef DEBUG
-
- .text
-
-/*
- * Init idle, called at early CPU setup time from head.S for each CPU
- * So nothing for now. Called with r24 containing CPU number and r3
- * reloc offset
- */
- .globl init_idle_power4
-init_idle_power4:
- blr
-
-/*
- * Here is the power_save_6xx function. This could eventually be
- * split into several functions & changing the function pointer
- * depending on the various features.
- */
- .globl power4_idle
-power4_idle:
-BEGIN_FTR_SECTION
- blr
-END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
- /* We must dynamically check for the NAP feature as it
- * can be cleared by CPU init after the fixups are done
- */
- lis r4,cur_cpu_spec@ha
- lwz r4,cur_cpu_spec@l(r4)
- lwz r4,CPU_SPEC_FEATURES(r4)
- andi. r0,r4,CPU_FTR_CAN_NAP
- beqlr
- /* Now check if user or arch enabled NAP mode */
- lis r4,powersave_nap@ha
- lwz r4,powersave_nap@l(r4)
- cmpwi 0,r4,0
- beqlr
-
- /* Clear MSR:EE */
- mfmsr r7
- rlwinm r0,r7,0,17,15
- mtmsr r0
-
- /* Check current_thread_info()->flags */
- rlwinm r4,r1,0,0,18
- lwz r4,TI_FLAGS(r4)
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- mtmsr r7 /* out of line this ? */
- blr
-1:
- /* Go to NAP now */
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- ori r7,r7,MSR_EE /* Could be ommited (already set) */
- oris r7,r7,MSR_POW@h
- sync
- isync
- mtmsr r7
- isync
- sync
- blr
-
- .globl powersave_nap
-powersave_nap:
- .long 0
OpenPOWER on IntegriCloud