From 9420dc65ff9e6b67c032286efde823aeb8684670 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Mon, 30 Jul 2007 08:18:25 +1000 Subject: [POWERPC] Clean out a bunch of duplicate includes This removes several duplicate includes from arch/powerpc/. Signed-off-by: Jesper Juhl Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/btext.c | 1 - arch/powerpc/kernel/crash.c | 1 - arch/powerpc/kernel/iommu.c | 1 - arch/powerpc/kernel/prom.c | 1 - arch/powerpc/kernel/time.c | 1 - 5 files changed, 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index e7b6846..3ef51fb 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 37658ea..77c749a 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index c08ceca..e4ec6eee 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index a38197b..0028fe6 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -52,7 +52,6 @@ #include #include #include -#include #ifdef DEBUG #define DBG(fmt...) printk(KERN_ERR fmt) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 727a669..c85d9b0 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -72,7 +72,6 @@ #include #include #endif -#include /* keep track of when we need to update the rtc */ time_t last_rtc_update; -- cgit v1.1 From 72755f44075d34cdb9bc467c6cd9a229292b5aff Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Thu, 9 Aug 2007 06:04:48 +1000 Subject: [POWERPC] Remove nvram forward declarations Forward declarations serve no purpose in this file. Signed-off-by: Linas Vepstas ---- arch/powerpc/kernel/nvram_64.c | 5 ----- 1 file changed, 5 deletions(-) Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/nvram_64.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index f9676f5..ba97990 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -34,11 +34,6 @@ #undef DEBUG_NVRAM -static int nvram_scan_partitions(void); -static int nvram_setup_partition(void); -static int nvram_create_os_partition(void); -static int nvram_remove_os_partition(void); - static struct nvram_partition * nvram_part; static long nvram_error_log_index = -1; static long nvram_error_log_size = 0; -- cgit v1.1 From 79c0108d1b9db4864ab77b2a95dfa04f2dcf264c Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Thu, 9 Aug 2007 06:06:15 +1000 Subject: [POWERPC] pseries: Fix jumbled no_logging flag Get rid of the jumbled usage of the no_logging flag. Its use spans several directories, and is incorrectly/misleadingly documented. Instead, two changes: 1) nvram will accept error log as soon as its ready. 2) logging to nvram stops on the first fatal error reported. Signed-off-by: Linas Vepstas ---- arch/powerpc/kernel/nvram_64.c | 8 -------- arch/powerpc/platforms/pseries/rtasd.c | 14 ++++++-------- 2 files changed, 6 insertions(+), 16 deletions(-) Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/nvram_64.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index ba97990..4a4d785 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -38,10 +38,6 @@ static struct nvram_partition * nvram_part; static long nvram_error_log_index = -1; static long nvram_error_log_size = 0; -int no_logging = 1; /* Until we initialize everything, - * make sure we don't try logging - * anything */ - extern volatile int error_log_cnt; struct err_log_info { @@ -637,10 +633,6 @@ int nvram_write_error_log(char * buff, int length, unsigned int err_type) loff_t tmp_index; struct err_log_info info; - if (no_logging) { - return -EPERM; - } - if (nvram_error_log_index == -1) { return -ESPIPE; } -- cgit v1.1 From 0f2342c85df4248bc1cd72421b13969a0782ed6a Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Fri, 10 Aug 2007 06:56:41 +1000 Subject: [POWERPC] pseries: Eliminate global error_log_cnt variable Eliminate the use of error_log_cnt as a global var shared across different directories. Pass it as a parameter instead. Signed-off-by: Linas Vepstas ---- Respin of earlier patch, with the CONFIG_PSERIES junk removed from the header file. arch/powerpc/kernel/nvram_64.c | 10 +++++----- arch/powerpc/platforms/pseries/rtasd.c | 7 ++++--- include/asm-powerpc/nvram.h | 6 ++++-- 3 files changed, 13 insertions(+), 10 deletions(-) Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/nvram_64.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 4a4d785..0ed31f2 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -38,8 +38,6 @@ static struct nvram_partition * nvram_part; static long nvram_error_log_index = -1; static long nvram_error_log_size = 0; -extern volatile int error_log_cnt; - struct err_log_info { int error_type; unsigned int seq_num; @@ -627,7 +625,8 @@ void __exit nvram_cleanup(void) * sequence #: The unique sequence # for each event. (until it wraps) * error log: The error log from event_scan */ -int nvram_write_error_log(char * buff, int length, unsigned int err_type) +int nvram_write_error_log(char * buff, int length, + unsigned int err_type, unsigned int error_log_cnt) { int rc; loff_t tmp_index; @@ -665,7 +664,8 @@ int nvram_write_error_log(char * buff, int length, unsigned int err_type) * * Reads nvram for error log for at most 'length' */ -int nvram_read_error_log(char * buff, int length, unsigned int * err_type) +int nvram_read_error_log(char * buff, int length, + unsigned int * err_type, unsigned int * error_log_cnt) { int rc; loff_t tmp_index; @@ -691,7 +691,7 @@ int nvram_read_error_log(char * buff, int length, unsigned int * err_type) return rc; } - error_log_cnt = info.seq_num; + *error_log_cnt = info.seq_num; *err_type = info.error_type; return 0; -- cgit v1.1 From 8674e0c9e570fcce948518e8fe518f4b61ac91fe Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 10 Aug 2007 05:18:36 +1000 Subject: [POWERPC] rtas_pci_ops: Use named structure member initializers Signed-off-by: Nathan Lynch Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/rtas_pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index a5de621..21f14e5 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -171,8 +171,8 @@ static int rtas_pci_write_config(struct pci_bus *bus, } struct pci_ops rtas_pci_ops = { - rtas_pci_read_config, - rtas_pci_write_config + .read = rtas_pci_read_config, + .write = rtas_pci_write_config, }; int is_python(struct device_node *dev) -- cgit v1.1 From 6127d1c0b097b2e9acc83dede1c1cb64ce76e7d5 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 10 Aug 2007 05:18:42 +1000 Subject: [POWERPC] null_pci_ops: Use named structure member initializers Signed-off-by: Nathan Lynch Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/pci_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 04a3109..0e2bee4 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -1457,8 +1457,8 @@ null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, static struct pci_ops null_pci_ops = { - null_read_config, - null_write_config + .read = null_read_config, + .write = null_write_config, }; /* -- cgit v1.1 From c6d4267eced79775399f256fbb4adb671e9b597e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 10 Aug 2007 14:07:38 +1000 Subject: [POWERPC] Handle alignment faults on new FP load/store instructions This adds code to handle alignment traps generated by the following new floating-point load/store instructions, by emulating the instruction in the kernel (as is done for other instructions that generate alignment traps): lfiwax load floating-point as integer word algebraic indexed stfiwx store floating-point as integer word indexed lfdp load floating-point double pair lfdpx load floating-point double pair indexed stfdp store floating-point double pair stfdpx store floating-point double pair indexed All these except stfiwx are new in POWER6. lfdp/lfdpx/stfdp/stfdpx load and store 16 bytes of memory into an even/odd FP register pair. In little-endian mode each 8-byte value is byte-reversed separately (i.e. not as a 16-byte unit). lfiwax/stfiwx load or store the lower 4 bytes of a floating-point register from/to memory; lfiwax sets the upper 4 bytes of the FP register to the sign extension of the value loaded. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/align.c | 57 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 5c9ff7f..4c47f9c 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -38,7 +38,7 @@ struct aligninfo { /* Bits in the flags field */ #define LD 0 /* load */ #define ST 1 /* store */ -#define SE 2 /* sign-extend value */ +#define SE 2 /* sign-extend value, or FP ld/st as word */ #define F 4 /* to/from fp regs */ #define U 8 /* update index register */ #define M 0x10 /* multiple load/store */ @@ -87,9 +87,9 @@ static struct aligninfo aligninfo[128] = { { 8, LD+F+U }, /* 00 1 1001: lfdu */ { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ { 8, ST+F+U }, /* 00 1 1011: stfdu */ - INVALID, /* 00 1 1100 */ + { 16, LD+F }, /* 00 1 1100: lfdp */ INVALID, /* 00 1 1101 */ - INVALID, /* 00 1 1110 */ + { 16, ST+F }, /* 00 1 1110: stfdp */ INVALID, /* 00 1 1111 */ { 8, LD }, /* 01 0 0000: ldx */ INVALID, /* 01 0 0001 */ @@ -167,10 +167,10 @@ static struct aligninfo aligninfo[128] = { { 8, LD+F }, /* 11 0 1001: lfdx */ { 4, ST+F+S }, /* 11 0 1010: stfsx */ { 8, ST+F }, /* 11 0 1011: stfdx */ - INVALID, /* 11 0 1100 */ - { 8, LD+M }, /* 11 0 1101: lmd */ - INVALID, /* 11 0 1110 */ - { 8, ST+M }, /* 11 0 1111: stmd */ + { 16, LD+F }, /* 11 0 1100: lfdpx */ + { 4, LD+F+SE }, /* 11 0 1101: lfiwax */ + { 16, ST+F }, /* 11 0 1110: stfdpx */ + { 4, ST+F }, /* 11 0 1111: stfiwx */ { 4, LD+U }, /* 11 1 0000: lwzux */ INVALID, /* 11 1 0001 */ { 4, ST+U }, /* 11 1 0010: stwux */ @@ -356,6 +356,42 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, return 1; } +/* + * Emulate floating-point pair loads and stores. + * Only POWER6 has these instructions, and it does true little-endian, + * so we don't need the address swizzling. + */ +static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr, + unsigned int reg, unsigned int flags) +{ + char *ptr = (char *) ¤t->thread.fpr[reg]; + int i, ret; + + if (!(flags & F)) + return 0; + if (reg & 1) + return 0; /* invalid form: FRS/FRT must be even */ + if (!(flags & SW)) { + /* not byte-swapped - easy */ + if (!(flags & ST)) + ret = __copy_from_user(ptr, addr, 16); + else + ret = __copy_to_user(addr, ptr, 16); + } else { + /* each FPR value is byte-swapped separately */ + ret = 0; + for (i = 0; i < 16; ++i) { + if (!(flags & ST)) + ret |= __get_user(ptr[i^7], addr + i); + else + ret |= __put_user(ptr[i^7], addr + i); + } + } + if (ret) + return -EFAULT; + return 1; /* exception handled and fixed up */ +} + /* * Called on alignment exception. Attempts to fixup @@ -471,6 +507,10 @@ int fix_alignment(struct pt_regs *regs) flush_fp_to_thread(current); } + /* Special case for 16-byte FP loads and stores */ + if (nb == 16) + return emulate_fp_pair(regs, addr, reg, flags); + /* If we are loading, get the data from user space, else * get it from register values */ @@ -531,7 +571,8 @@ int fix_alignment(struct pt_regs *regs) * or floating point single precision conversion */ switch (flags & ~(U|SW)) { - case LD+SE: /* sign extend */ + case LD+SE: /* sign extending integer loads */ + case LD+F+SE: /* sign extend for lfiwax */ if ( nb == 2 ) data.ll = data.x16.low16; else /* nb must be 4 */ -- cgit v1.1 From d56c3aaa9f660e5f5f295da74f5d3db510de0ede Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 15 Aug 2007 20:53:26 +1000 Subject: [POWERPC] Fix section mismatch in crash_dump.c WARNING: vmlinux.o(.text+0x23258): Section mismatch: reference to .init.text:.lmb_reserve (between '.reserve_kdump_trampoline' and '.restore_processor_state') Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/crash_dump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 2f6f5a7..ffa91d6 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -25,7 +25,7 @@ #define DBG(fmt...) #endif -void reserve_kdump_trampoline(void) +void __init reserve_kdump_trampoline(void) { lmb_reserve(0, KDUMP_RESERVE_LIMIT); } -- cgit v1.1 From 15f6527e8e63e793f8ab1ddce4ed3c487ebd0d42 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Mon, 20 Aug 2007 07:27:07 -0500 Subject: [POWERPC] Rename 4xx paths to 40x 4xx is a bit of a misnomer for certain things, as they really apply to PowerPC 40x only. Rename some of the files to clean this up. Signed-off-by: Josh Boyer Acked-by: David Gibson --- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/head_40x.S | 1021 ++++++++++++++++++++++++++++++++++++++++ arch/powerpc/kernel/head_4xx.S | 1021 ---------------------------------------- 3 files changed, 1022 insertions(+), 1022 deletions(-) create mode 100644 arch/powerpc/kernel/head_40x.S delete mode 100644 arch/powerpc/kernel/head_4xx.S (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b0cb2e6..a4850dd 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -46,7 +46,7 @@ ifeq ($(CONFIG_PPC_MERGE),y) extra-$(CONFIG_PPC_STD_MMU) := head_32.o extra-$(CONFIG_PPC64) := head_64.o -extra-$(CONFIG_40x) := head_4xx.o +extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S new file mode 100644 index 0000000..adc7f80 --- /dev/null +++ b/arch/powerpc/kernel/head_40x.S @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 1995-1996 Gary Thomas + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * + * + * Module name: head_4xx.S + * + * Description: + * Kernel execution entry point code. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=96m") + * r7 - End of kernel command line string + * + * This is all going to change RSN when we add bi_recs....... -- Dan + */ + .text +_GLOBAL(_stext) +_GLOBAL(_start) + + /* Save parameters we are passed. + */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + + /* We have to turn on the MMU right away so we get cache modes + * set correctly. + */ + bl initial_mmu + +/* We now have the lower 16 Meg mapped into TLB entries, and the caches + * ready to work. + */ +turn_on_mmu: + lis r0,MSR_KERNEL@h + ori r0,r0,MSR_KERNEL@l + mtspr SPRN_SRR1,r0 + lis r0,start_here@h + ori r0,r0,start_here@l + mtspr SPRN_SRR0,r0 + SYNC + rfi /* enables MMU */ + b . /* prevent prefetch past rfi */ + +/* + * This area is used for temporarily saving registers during the + * critical exception prolog. + */ + . = 0xc0 +crit_save: +_GLOBAL(crit_r10) + .space 4 +_GLOBAL(crit_r11) + .space 4 + +/* + * Exception vector entry code. This code runs with address translation + * turned off (i.e. using physical addresses). We assume SPRG3 has the + * physical address of the current task thread_struct. + * Note that we have to have decremented r1 before we write to any fields + * of the exception frame, since a critical interrupt could occur at any + * time, and it will write to the area immediately below the current r1. + */ +#define NORMAL_EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ + mtspr SPRN_SPRG1,r11; \ + mtspr SPRN_SPRG2,r1; \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + beq 1f; \ + mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ + lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ + addi r1,r1,THREAD_SIZE; \ +1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ + tophys(r11,r1); \ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRN_SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRN_SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r10,SPRN_SPRG2; \ + mfspr r12,SPRN_SRR0; \ + stw r10,GPR1(r11); \ + mfspr r9,SPRN_SRR1; \ + stw r10,0(r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception prolog for critical exceptions. This is a little different + * from the normal exception prolog above since a critical exception + * can potentially occur at any point during normal exception processing. + * Thus we cannot use the same SPRG registers as the normal prolog above. + * Instead we use a couple of words of memory at low physical addresses. + * This is OK since we don't support SMP on these processors. + */ +#define CRITICAL_EXCEPTION_PROLOG \ + stw r10,crit_r10@l(0); /* save two registers to work with */\ + stw r11,crit_r11@l(0); \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + lis r11,critical_stack_top@h; \ + ori r11,r11,critical_stack_top@l; \ + beq 1f; \ + /* COMING FROM USER MODE */ \ + mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + addi r11,r11,THREAD_SIZE; \ +1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ + tophys(r11,r11); \ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ + stw r12,_DEAR(r11); /* since they may have had stuff */\ + mfspr r9,SPRN_ESR; /* in them at the point where the */\ + stw r9,_ESR(r11); /* exception was taken */\ + mfspr r12,SPRN_SRR2; \ + stw r1,GPR1(r11); \ + mfspr r9,SPRN_SRR3; \ + stw r1,0(r11); \ + tovirt(r1,r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + + /* + * State at this point: + * r9 saved in stack frame, now saved SRR3 & ~MSR_WE + * r10 saved in crit_r10 and in stack frame, trashed + * r11 saved in crit_r11 and in stack frame, + * now phys stack/exception frame pointer + * r12 saved in stack frame, now saved SRR2 + * CR saved in stack frame, CR0.EQ = !SRR3.PR + * LR, DEAR, ESR in stack frame + * r1 saved in stack frame, now virt stack/excframe pointer + * r0, r3-r8 saved in stack frame + */ + +/* + * Exception vectors. + */ +#define START_EXCEPTION(n, label) \ + . = n; \ +label: + +#define EXCEPTION(n, label, hdlr, xfer) \ + START_EXCEPTION(n, label); \ + NORMAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define CRITICAL_EXCEPTION(n, label, hdlr) \ + START_EXCEPTION(n, label); \ + CRITICAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, crit_transfer_to_handler, \ + ret_from_crit_exc) + +#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,_TRAP(r11); \ + lis r10,msr@h; \ + ori r10,r10,msr@l; \ + copyee(r10, r9); \ + bl tfer; \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ + ret_from_except) + + +/* + * 0x0100 - Critical Interrupt Exception + */ + CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception) + +/* + * 0x0200 - Machine Check Exception + */ + CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) + +/* + * 0x0300 - Data Storage Exception + * This happens for just a few reasons. U0 set (but we don't do that), + * or zone protection fault (user violation, write to protected page). + * If this is just an update of modified status, we do that quickly + * and exit. Otherwise, we call heavywight functions to do the work. + */ + START_EXCEPTION(0x0300, DataStorage) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 +#ifdef CONFIG_403GCX + stw r12, 0(r0) + stw r9, 4(r0) + mfcr r11 + mfspr r12, SPRN_PID + stw r11, 8(r0) + stw r12, 12(r0) +#else + mtspr SPRN_SPRG4, r12 + mtspr SPRN_SPRG5, r9 + mfcr r11 + mfspr r12, SPRN_PID + mtspr SPRN_SPRG7, r11 + mtspr SPRN_SPRG6, r12 +#endif + + /* First, check if it was a zone fault (which means a user + * tried to access a kernel or read-protected page - always + * a SEGV). All other faults here must be stores, so no + * need to check ESR_DST as well. */ + mfspr r10, SPRN_ESR + andis. r10, r10, ESR_DIZ@h + bne 2f + + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + li r9, 0 + mtspr SPRN_PID, r9 /* TLB will have 0 TID */ + b 4f + + /* Get the PGD for the current thread. + */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + tophys(r11, r11) + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + + andi. r9, r11, _PAGE_RW /* Is it writeable? */ + beq 2f /* Bail if not */ + + /* Update 'changed'. + */ + ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + stw r11, 0(r12) /* Update Linux page table */ + + /* Most of the Linux PTE is ready to load into the TLB LO. + * We set ZSEL, where only the LS-bit determines user access. + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * If shared is set, we cause a zero PID->TID load. + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + li r12, 0x0ce2 + andc r11, r11, r12 /* Make sure 20, 21 are zero */ + + /* find the TLB index that caused the fault. It has to be here. + */ + tlbsx r9, 0, r10 + + tlbwe r11, r9, TLB_DATA /* Load TLB LO */ + + /* Done...restore registers and get out of here. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + PPC405_ERR77_SYNC + rfi /* Should sync shadow TLBs */ + b . /* prevent prefetch past rfi */ + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b DataAccess + +/* + * 0x0400 - Instruction Storage Exception + * This is caused by a fetch from non-execute or guarded pages. + */ + START_EXCEPTION(0x0400, InstructionAccess) + NORMAL_EXCEPTION_PROLOG + mr r4,r12 /* Pass SRR0 as arg2 */ + li r5,0 /* Pass zero as arg3 */ + EXC_XFER_EE_LITE(0x400, handle_page_fault) + +/* 0x0500 - External Interrupt Exception */ + EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) + +/* 0x0600 - Alignment Exception */ + START_EXCEPTION(0x0600, Alignment) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ + stw r4,_DEAR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x600, alignment_exception) + +/* 0x0700 - Program Exception */ + START_EXCEPTION(0x0700, ProgramCheck) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_ESR /* Grab the ESR and save it */ + stw r4,_ESR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x700, program_check_exception) + + EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE) + +/* 0x0C00 - System Call Exception */ + START_EXCEPTION(0x0C00, SystemCall) + NORMAL_EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0xc00, DoSyscall) + + EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE) + +/* 0x1000 - Programmable Interval Timer (PIT) Exception */ + START_EXCEPTION(0x1000, Decrementer) + NORMAL_EXCEPTION_PROLOG + lis r0,TSR_PIS@h + mtspr SPRN_TSR,r0 /* Clear the PIT exception */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_LITE(0x1000, timer_interrupt) + +#if 0 +/* NOTE: + * FIT and WDT handlers are not implemented yet. + */ + +/* 0x1010 - Fixed Interval Timer (FIT) Exception +*/ + STND_EXCEPTION(0x1010, FITException, unknown_exception) + +/* 0x1020 - Watchdog Timer (WDT) Exception +*/ +#ifdef CONFIG_BOOKE_WDT + CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) +#else + CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception) +#endif +#endif + +/* 0x1100 - Data TLB Miss Exception + * As the name implies, translation is not in the MMU, so search the + * page tables and fix it. The only purpose of this function is to + * load TLB entries from the page table if they exist. + */ + START_EXCEPTION(0x1100, DTLBMiss) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 +#ifdef CONFIG_403GCX + stw r12, 0(r0) + stw r9, 4(r0) + mfcr r11 + mfspr r12, SPRN_PID + stw r11, 8(r0) + stw r12, 12(r0) +#else + mtspr SPRN_SPRG4, r12 + mtspr SPRN_SPRG5, r9 + mfcr r11 + mfspr r12, SPRN_PID + mtspr SPRN_SPRG7, r11 + mtspr SPRN_SPRG6, r12 +#endif + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + li r9, 0 + mtspr SPRN_PID, r9 /* TLB will have 0 TID */ + b 4f + + /* Get the PGD for the current thread. + */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + tophys(r11, r11) + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r12, 0(r11) /* Get L1 entry */ + andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r9, r11, _PAGE_PRESENT + beq 5f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Create TLB tag. This is the faulting address plus a static + * set of bits. These are size, valid, E, U0. + */ + li r12, 0x00c0 + rlwimi r10, r12, 0, 20, 31 + + b finish_tlb_load + +2: /* Check for possible large-page pmd entry */ + rlwinm. r9, r12, 2, 22, 24 + beq 5f + + /* Create TLB tag. This is the faulting address, plus a static + * set of bits (valid, E, U0) plus the size from the PMD. + */ + ori r9, r9, 0x40 + rlwimi r10, r9, 0, 20, 31 + mr r11, r12 + + b finish_tlb_load + +5: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b DataAccess + +/* 0x1200 - Instruction TLB Miss Exception + * Nearly the same as above, except we get our information from different + * registers and bailout to a different point. + */ + START_EXCEPTION(0x1200, ITLBMiss) + mtspr SPRN_SPRG0, r10 /* Save some working registers */ + mtspr SPRN_SPRG1, r11 +#ifdef CONFIG_403GCX + stw r12, 0(r0) + stw r9, 4(r0) + mfcr r11 + mfspr r12, SPRN_PID + stw r11, 8(r0) + stw r12, 12(r0) +#else + mtspr SPRN_SPRG4, r12 + mtspr SPRN_SPRG5, r9 + mfcr r11 + mfspr r12, SPRN_PID + mtspr SPRN_SPRG7, r11 + mtspr SPRN_SPRG6, r12 +#endif + mfspr r10, SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + cmplw r10, r11 + blt+ 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + li r9, 0 + mtspr SPRN_PID, r9 /* TLB will have 0 TID */ + b 4f + + /* Get the PGD for the current thread. + */ +3: + mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) +4: + tophys(r11, r11) + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r12, 0(r11) /* Get L1 entry */ + andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r9, r11, _PAGE_PRESENT + beq 5f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Create TLB tag. This is the faulting address plus a static + * set of bits. These are size, valid, E, U0. + */ + li r12, 0x00c0 + rlwimi r10, r12, 0, 20, 31 + + b finish_tlb_load + +2: /* Check for possible large-page pmd entry */ + rlwinm. r9, r12, 2, 22, 24 + beq 5f + + /* Create TLB tag. This is the faulting address, plus a static + * set of bits (valid, E, U0) plus the size from the PMD. + */ + ori r9, r9, 0x40 + rlwimi r10, r9, 0, 20, 31 + mr r11, r12 + + b finish_tlb_load + +5: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + b InstructionAccess + + EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) +#ifdef CONFIG_IBM405_ERR51 + /* 405GP errata 51 */ + START_EXCEPTION(0x1700, Trap_17) + b DTLBMiss +#else + EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE) +#endif + EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE) + EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE) + +/* Check for a single step debug exception while in an exception + * handler before state has been saved. This is to catch the case + * where an instruction that we are trying to single step causes + * an exception (eg ITLB/DTLB miss) and thus the first instruction of + * the exception handler generates a single step debug exception. + * + * If we get a debug trap on the first instruction of an exception handler, + * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is + * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). + * The exception handler was handling a non-critical interrupt, so it will + * save (and later restore) the MSR via SPRN_SRR1, which will still have + * the MSR_DE bit set. + */ + /* 0x2000 - Debug Exception */ + START_EXCEPTION(0x2000, DebugTrap) + CRITICAL_EXCEPTION_PROLOG + + /* + * If this is a single step or branch-taken exception in an + * exception entry sequence, it was probably meant to apply to + * the code where the exception occurred (since exception entry + * doesn't turn off DE automatically). We simulate the effect + * of turning off DE on entry to an exception handler by turning + * off DE in the SRR3 value and clearing the debug status. + */ + mfspr r10,SPRN_DBSR /* check single-step/branch taken */ + andis. r10,r10,DBSR_IC@h + beq+ 2f + + andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ + beq 1f /* branch and fix it up */ + + mfspr r10,SPRN_SRR2 /* Faulting instruction address */ + cmplwi r10,0x2100 + bgt+ 2f /* address above exception vectors */ + + /* here it looks like we got an inappropriate debug exception. */ +1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ + lis r10,DBSR_IC@h /* clear the IC event */ + mtspr SPRN_DBSR,r10 + /* restore state and get out */ + lwz r10,_CCR(r11) + lwz r0,GPR0(r11) + lwz r1,GPR1(r11) + mtcrf 0x80,r10 + mtspr SPRN_SRR2,r12 + mtspr SPRN_SRR3,r9 + lwz r9,GPR9(r11) + lwz r12,GPR12(r11) + lwz r10,crit_r10@l(0) + lwz r11,crit_r11@l(0) + PPC405_ERR77_SYNC + rfci + b . + + /* continue normal handling for a critical exception... */ +2: mfspr r4,SPRN_DBSR + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_TEMPLATE(DebugException, 0x2002, \ + (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) + +/* + * The other Data TLB exceptions bail out to this point + * if they can't resolve the lightweight TLB fault. + */ +DataAccess: + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + EXC_XFER_EE_LITE(0x300, handle_page_fault) + +/* Other PowerPC processors, namely those derived from the 6xx-series + * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. + * However, for the 4xx-series processors these are neither defined nor + * reserved. + */ + + /* Damn, I came up one instruction too many to fit into the + * exception space :-). Both the instruction and data TLB + * miss get to this point to load the TLB. + * r10 - TLB_TAG value + * r11 - Linux PTE + * r12, r9 - avilable to use + * PID - loaded with proper value when we get here + * Upon exit, we reload everything and RFI. + * Actually, it will fit now, but oh well.....a common place + * to load the TLB. + */ +tlb_4xx_index: + .long 0 +finish_tlb_load: + /* load the next available TLB index. + */ + lwz r9, tlb_4xx_index@l(0) + addi r9, r9, 1 + andi. r9, r9, (PPC4XX_TLB_SIZE-1) + stw r9, tlb_4xx_index@l(0) + +6: + /* + * Clear out the software-only bits in the PTE to generate the + * TLB_DATA value. These are the bottom 2 bits of the RPM, the + * top 3 bits of the zone field, and M. + */ + li r12, 0x0ce2 + andc r11, r11, r12 + + tlbwe r11, r9, TLB_DATA /* Load TLB LO */ + tlbwe r10, r9, TLB_TAG /* Load TLB HI */ + + /* Done...restore registers and get out of here. + */ +#ifdef CONFIG_403GCX + lwz r12, 12(r0) + lwz r11, 8(r0) + mtspr SPRN_PID, r12 + mtcr r11 + lwz r9, 4(r0) + lwz r12, 0(r0) +#else + mfspr r12, SPRN_SPRG6 + mfspr r11, SPRN_SPRG7 + mtspr SPRN_PID, r12 + mtcr r11 + mfspr r9, SPRN_SPRG5 + mfspr r12, SPRN_SPRG4 +#endif + mfspr r11, SPRN_SPRG1 + mfspr r10, SPRN_SPRG0 + PPC405_ERR77_SYNC + rfi /* Should sync shadow TLBs */ + b . /* prevent prefetch past rfi */ + +/* extern void giveup_fpu(struct task_struct *prev) + * + * The PowerPC 4xx family of processors do not have an FPU, so this just + * returns. + */ +_GLOBAL(giveup_fpu) + blr + +/* This is where the main kernel code starts. + */ +start_here: + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to phys current thread */ + tophys(r4,r2) + addi r4,r4,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG3,r4 + + /* stack */ + lis r1,init_thread_union@ha + addi r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init /* We have to do this with MMU on */ + +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + +/* Go back to running unmapped so we can load up new values + * and change to using our exception vectors. + * On the 4xx, all we have to do is invalidate the TLB to clear + * the old 16M byte TLB mappings. + */ + lis r4,2f@h + ori r4,r4,2f@l + tophys(r4,r4) + lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h + ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l + mtspr SPRN_SRR0,r4 + mtspr SPRN_SRR1,r3 + rfi + b . /* prevent prefetch past rfi */ + +/* Load up the kernel context */ +2: + sync /* Flush to memory before changing TLB */ + tlbia + isync /* Flush shadow TLBs */ + + /* set up the PTE pointers for the Abatron bdiGDB. + */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r5, 0xf0(r0) /* Must match your Abatron config file */ + tophys(r5,r5) + stw r6, 0(r5) + +/* Now turn on the MMU for real! */ + lis r4,MSR_KERNEL@h + ori r4,r4,MSR_KERNEL@l + lis r3,start_kernel@h + ori r3,r3,start_kernel@l + mtspr SPRN_SRR0,r3 + mtspr SPRN_SRR1,r4 + rfi /* enable MMU and jump to start_kernel */ + b . /* prevent prefetch past rfi */ + +/* Set up the initial MMU state so we can do the first level of + * kernel initialization. This maps the first 16 MBytes of memory 1:1 + * virtual to physical and more importantly sets the cache mode. + */ +initial_mmu: + tlbia /* Invalidate all TLB entries */ + isync + + /* We should still be executing code at physical address 0x0000xxxx + * at this point. However, start_here is at virtual address + * 0xC000xxxx. So, set up a TLB mapping to cover this once + * translation is enabled. + */ + + lis r3,KERNELBASE@h /* Load the kernel virtual address */ + ori r3,r3,KERNELBASE@l + tophys(r4,r3) /* Load the kernel physical address */ + + iccci r0,r3 /* Invalidate the i-cache before use */ + + /* Load the kernel PID. + */ + li r0,0 + mtspr SPRN_PID,r0 + sync + + /* Configure and load two entries into TLB slots 62 and 63. + * In case we are pinning TLBs, these are reserved in by the + * other TLB functions. If not reserving, then it doesn't + * matter where they are loaded. + */ + clrrwi r4,r4,10 /* Mask off the real page number */ + ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ + + clrrwi r3,r3,10 /* Mask off the effective page number */ + ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) + + li r0,63 /* TLB slot 63 */ + + tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ + tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) + + /* Load a TLB entry for the UART, so that ppc4xx_progress() can use + * the UARTs nice and early. We use a 4k real==virtual mapping. */ + + lis r3,SERIAL_DEBUG_IO_BASE@h + ori r3,r3,SERIAL_DEBUG_IO_BASE@l + mr r4,r3 + clrrwi r4,r4,12 + ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) + + clrrwi r3,r3,12 + ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) + + li r0,0 /* TLB slot 0 */ + tlbwe r4,r0,TLB_DATA + tlbwe r3,r0,TLB_TAG +#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ + + isync + + /* Establish the exception vector base + */ + lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ + tophys(r0,r4) /* Use the physical address */ + mtspr SPRN_EVPR,r0 + + blr + +_GLOBAL(abort) + mfspr r13,SPRN_DBCR0 + oris r13,r13,DBCR0_RST_SYSTEM@h + mtspr SPRN_DBCR0,r13 + +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is the second parameter. + */ + lis r5, KERNELBASE@h + lwz r5, 0xf0(r5) + stw r4, 0x4(r5) +#endif + sync + mtspr SPRN_PID,r3 + isync /* Need an isync to flush shadow */ + /* TLBs after changing PID */ + blr + +/* We put a few things here that have to be page-aligned. This stuff + * goes at the beginning of the data segment, which is page-aligned. + */ + .data + .align 12 + .globl sdata +sdata: + .globl empty_zero_page +empty_zero_page: + .space 4096 + .globl swapper_pg_dir +swapper_pg_dir: + .space 4096 + + +/* Stack for handling critical exceptions from kernel mode */ + .section .bss + .align 12 +exception_stack_bottom: + .space 4096 +critical_stack_top: + .globl exception_stack_top +exception_stack_top: + +/* This space gets a copy of optional info passed to us by the bootstrap + * which is used to pass parameters into the kernel like root=/dev/sda1, etc. + */ + .globl cmd_line +cmd_line: + .space 512 + +/* Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S deleted file mode 100644 index adc7f80..0000000 --- a/arch/powerpc/kernel/head_4xx.S +++ /dev/null @@ -1,1021 +0,0 @@ -/* - * Copyright (c) 1995-1996 Gary Thomas - * Initial PowerPC version. - * Copyright (c) 1996 Cort Dougan - * Rewritten for PReP - * Copyright (c) 1996 Paul Mackerras - * Low-level exception handers, MMU support, and rewrite. - * Copyright (c) 1997 Dan Malek - * PowerPC 8xx modifications. - * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. - * Copyright (c) 1999 Grant Erickson - * PowerPC 403GCX/405GP modifications. - * Copyright 2000 MontaVista Software Inc. - * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com - * - * - * Module name: head_4xx.S - * - * Description: - * Kernel execution entry point code. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* As with the other PowerPC ports, it is expected that when code - * execution begins here, the following registers contain valid, yet - * optional, information: - * - * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) - * r4 - Starting address of the init RAM disk - * r5 - Ending address of the init RAM disk - * r6 - Start of kernel command line string (e.g. "mem=96m") - * r7 - End of kernel command line string - * - * This is all going to change RSN when we add bi_recs....... -- Dan - */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) - - /* Save parameters we are passed. - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - - /* We have to turn on the MMU right away so we get cache modes - * set correctly. - */ - bl initial_mmu - -/* We now have the lower 16 Meg mapped into TLB entries, and the caches - * ready to work. - */ -turn_on_mmu: - lis r0,MSR_KERNEL@h - ori r0,r0,MSR_KERNEL@l - mtspr SPRN_SRR1,r0 - lis r0,start_here@h - ori r0,r0,start_here@l - mtspr SPRN_SRR0,r0 - SYNC - rfi /* enables MMU */ - b . /* prevent prefetch past rfi */ - -/* - * This area is used for temporarily saving registers during the - * critical exception prolog. - */ - . = 0xc0 -crit_save: -_GLOBAL(crit_r10) - .space 4 -_GLOBAL(crit_r11) - .space 4 - -/* - * Exception vector entry code. This code runs with address translation - * turned off (i.e. using physical addresses). We assume SPRG3 has the - * physical address of the current task thread_struct. - * Note that we have to have decremented r1 before we write to any fields - * of the exception frame, since a critical interrupt could occur at any - * time, and it will write to the area immediately below the current r1. - */ -#define NORMAL_EXCEPTION_PROLOG \ - mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ - mtspr SPRN_SPRG1,r11; \ - mtspr SPRN_SPRG2,r1; \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - beq 1f; \ - mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ - lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ - addi r1,r1,THREAD_SIZE; \ -1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ - tophys(r11,r1); \ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mfspr r10,SPRN_SPRG0; \ - stw r10,GPR10(r11); \ - mfspr r12,SPRN_SPRG1; \ - stw r12,GPR11(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r10,SPRN_SPRG2; \ - mfspr r12,SPRN_SRR0; \ - stw r10,GPR1(r11); \ - mfspr r9,SPRN_SRR1; \ - stw r10,0(r11); \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - -/* - * Exception prolog for critical exceptions. This is a little different - * from the normal exception prolog above since a critical exception - * can potentially occur at any point during normal exception processing. - * Thus we cannot use the same SPRG registers as the normal prolog above. - * Instead we use a couple of words of memory at low physical addresses. - * This is OK since we don't support SMP on these processors. - */ -#define CRITICAL_EXCEPTION_PROLOG \ - stw r10,crit_r10@l(0); /* save two registers to work with */\ - stw r11,crit_r11@l(0); \ - mfcr r10; /* save CR in r10 for now */\ - mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ - andi. r11,r11,MSR_PR; \ - lis r11,critical_stack_top@h; \ - ori r11,r11,critical_stack_top@l; \ - beq 1f; \ - /* COMING FROM USER MODE */ \ - mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ - lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ - addi r11,r11,THREAD_SIZE; \ -1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ - tophys(r11,r11); \ - stw r10,_CCR(r11); /* save various registers */\ - stw r12,GPR12(r11); \ - stw r9,GPR9(r11); \ - mflr r10; \ - stw r10,_LINK(r11); \ - mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ - stw r12,_DEAR(r11); /* since they may have had stuff */\ - mfspr r9,SPRN_ESR; /* in them at the point where the */\ - stw r9,_ESR(r11); /* exception was taken */\ - mfspr r12,SPRN_SRR2; \ - stw r1,GPR1(r11); \ - mfspr r9,SPRN_SRR3; \ - stw r1,0(r11); \ - tovirt(r1,r11); \ - rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ - stw r0,GPR0(r11); \ - SAVE_4GPRS(3, r11); \ - SAVE_2GPRS(7, r11) - - /* - * State at this point: - * r9 saved in stack frame, now saved SRR3 & ~MSR_WE - * r10 saved in crit_r10 and in stack frame, trashed - * r11 saved in crit_r11 and in stack frame, - * now phys stack/exception frame pointer - * r12 saved in stack frame, now saved SRR2 - * CR saved in stack frame, CR0.EQ = !SRR3.PR - * LR, DEAR, ESR in stack frame - * r1 saved in stack frame, now virt stack/excframe pointer - * r0, r3-r8 saved in stack frame - */ - -/* - * Exception vectors. - */ -#define START_EXCEPTION(n, label) \ - . = n; \ -label: - -#define EXCEPTION(n, label, hdlr, xfer) \ - START_EXCEPTION(n, label); \ - NORMAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - xfer(n, hdlr) - -#define CRITICAL_EXCEPTION(n, label, hdlr) \ - START_EXCEPTION(n, label); \ - CRITICAL_EXCEPTION_PROLOG; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - NOCOPY, crit_transfer_to_handler, \ - ret_from_crit_exc) - -#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ - li r10,trap; \ - stw r10,_TRAP(r11); \ - lis r10,msr@h; \ - ori r10,r10,msr@l; \ - copyee(r10, r9); \ - bl tfer; \ - .long hdlr; \ - .long ret - -#define COPY_EE(d, s) rlwimi d,s,0,16,16 -#define NOCOPY(d, s) - -#define EXC_XFER_STD(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ - ret_from_except) - -#define EXC_XFER_EE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ - ret_from_except_full) - -#define EXC_XFER_EE_LITE(n, hdlr) \ - EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ - ret_from_except) - - -/* - * 0x0100 - Critical Interrupt Exception - */ - CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception) - -/* - * 0x0200 - Machine Check Exception - */ - CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) - -/* - * 0x0300 - Data Storage Exception - * This happens for just a few reasons. U0 set (but we don't do that), - * or zone protection fault (user violation, write to protected page). - * If this is just an update of modified status, we do that quickly - * and exit. Otherwise, we call heavywight functions to do the work. - */ - START_EXCEPTION(0x0300, DataStorage) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG4, r12 - mtspr SPRN_SPRG5, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG7, r11 - mtspr SPRN_SPRG6, r12 -#endif - - /* First, check if it was a zone fault (which means a user - * tried to access a kernel or read-protected page - always - * a SEGV). All other faults here must be stores, so no - * need to check ESR_DST as well. */ - mfspr r10, SPRN_ESR - andis. r10, r10, ESR_DIZ@h - bne 2f - - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r11, 0(r11) /* Get L1 entry */ - rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - - andi. r9, r11, _PAGE_RW /* Is it writeable? */ - beq 2f /* Bail if not */ - - /* Update 'changed'. - */ - ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE - stw r11, 0(r12) /* Update Linux page table */ - - /* Most of the Linux PTE is ready to load into the TLB LO. - * We set ZSEL, where only the LS-bit determines user access. - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). - * If shared is set, we cause a zero PID->TID load. - * Many of these bits are software only. Bits we don't set - * here we (properly should) assume have the appropriate value. - */ - li r12, 0x0ce2 - andc r11, r11, r12 /* Make sure 20, 21 are zero */ - - /* find the TLB index that caused the fault. It has to be here. - */ - tlbsx r9, 0, r10 - - tlbwe r11, r9, TLB_DATA /* Load TLB LO */ - - /* Done...restore registers and get out of here. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - PPC405_ERR77_SYNC - rfi /* Should sync shadow TLBs */ - b . /* prevent prefetch past rfi */ - -2: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b DataAccess - -/* - * 0x0400 - Instruction Storage Exception - * This is caused by a fetch from non-execute or guarded pages. - */ - START_EXCEPTION(0x0400, InstructionAccess) - NORMAL_EXCEPTION_PROLOG - mr r4,r12 /* Pass SRR0 as arg2 */ - li r5,0 /* Pass zero as arg3 */ - EXC_XFER_EE_LITE(0x400, handle_page_fault) - -/* 0x0500 - External Interrupt Exception */ - EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) - -/* 0x0600 - Alignment Exception */ - START_EXCEPTION(0x0600, Alignment) - NORMAL_EXCEPTION_PROLOG - mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ - stw r4,_DEAR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_EE(0x600, alignment_exception) - -/* 0x0700 - Program Exception */ - START_EXCEPTION(0x0700, ProgramCheck) - NORMAL_EXCEPTION_PROLOG - mfspr r4,SPRN_ESR /* Grab the ESR and save it */ - stw r4,_ESR(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_STD(0x700, program_check_exception) - - EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE) - -/* 0x0C00 - System Call Exception */ - START_EXCEPTION(0x0C00, SystemCall) - NORMAL_EXCEPTION_PROLOG - EXC_XFER_EE_LITE(0xc00, DoSyscall) - - EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE) - -/* 0x1000 - Programmable Interval Timer (PIT) Exception */ - START_EXCEPTION(0x1000, Decrementer) - NORMAL_EXCEPTION_PROLOG - lis r0,TSR_PIS@h - mtspr SPRN_TSR,r0 /* Clear the PIT exception */ - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_LITE(0x1000, timer_interrupt) - -#if 0 -/* NOTE: - * FIT and WDT handlers are not implemented yet. - */ - -/* 0x1010 - Fixed Interval Timer (FIT) Exception -*/ - STND_EXCEPTION(0x1010, FITException, unknown_exception) - -/* 0x1020 - Watchdog Timer (WDT) Exception -*/ -#ifdef CONFIG_BOOKE_WDT - CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) -#else - CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception) -#endif -#endif - -/* 0x1100 - Data TLB Miss Exception - * As the name implies, translation is not in the MMU, so search the - * page tables and fix it. The only purpose of this function is to - * load TLB entries from the page table if they exist. - */ - START_EXCEPTION(0x1100, DTLBMiss) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG4, r12 - mtspr SPRN_SPRG5, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG7, r11 - mtspr SPRN_SPRG6, r12 -#endif - mfspr r10, SPRN_DEAR /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r12, 0(r11) /* Get L1 entry */ - andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - andi. r9, r11, _PAGE_PRESENT - beq 5f - - ori r11, r11, _PAGE_ACCESSED - stw r11, 0(r12) - - /* Create TLB tag. This is the faulting address plus a static - * set of bits. These are size, valid, E, U0. - */ - li r12, 0x00c0 - rlwimi r10, r12, 0, 20, 31 - - b finish_tlb_load - -2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r12, 2, 22, 24 - beq 5f - - /* Create TLB tag. This is the faulting address, plus a static - * set of bits (valid, E, U0) plus the size from the PMD. - */ - ori r9, r9, 0x40 - rlwimi r10, r9, 0, 20, 31 - mr r11, r12 - - b finish_tlb_load - -5: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b DataAccess - -/* 0x1200 - Instruction TLB Miss Exception - * Nearly the same as above, except we get our information from different - * registers and bailout to a different point. - */ - START_EXCEPTION(0x1200, ITLBMiss) - mtspr SPRN_SPRG0, r10 /* Save some working registers */ - mtspr SPRN_SPRG1, r11 -#ifdef CONFIG_403GCX - stw r12, 0(r0) - stw r9, 4(r0) - mfcr r11 - mfspr r12, SPRN_PID - stw r11, 8(r0) - stw r12, 12(r0) -#else - mtspr SPRN_SPRG4, r12 - mtspr SPRN_SPRG5, r9 - mfcr r11 - mfspr r12, SPRN_PID - mtspr SPRN_SPRG7, r11 - mtspr SPRN_SPRG6, r12 -#endif - mfspr r10, SPRN_SRR0 /* Get faulting address */ - - /* If we are faulting a kernel address, we have to use the - * kernel page tables. - */ - lis r11, TASK_SIZE@h - cmplw r10, r11 - blt+ 3f - lis r11, swapper_pg_dir@h - ori r11, r11, swapper_pg_dir@l - li r9, 0 - mtspr SPRN_PID, r9 /* TLB will have 0 TID */ - b 4f - - /* Get the PGD for the current thread. - */ -3: - mfspr r11,SPRN_SPRG3 - lwz r11,PGDIR(r11) -4: - tophys(r11, r11) - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ - lwz r12, 0(r11) /* Get L1 entry */ - andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ - beq 2f /* Bail if no table */ - - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ - lwz r11, 0(r12) /* Get Linux PTE */ - andi. r9, r11, _PAGE_PRESENT - beq 5f - - ori r11, r11, _PAGE_ACCESSED - stw r11, 0(r12) - - /* Create TLB tag. This is the faulting address plus a static - * set of bits. These are size, valid, E, U0. - */ - li r12, 0x00c0 - rlwimi r10, r12, 0, 20, 31 - - b finish_tlb_load - -2: /* Check for possible large-page pmd entry */ - rlwinm. r9, r12, 2, 22, 24 - beq 5f - - /* Create TLB tag. This is the faulting address, plus a static - * set of bits (valid, E, U0) plus the size from the PMD. - */ - ori r9, r9, 0x40 - rlwimi r10, r9, 0, 20, 31 - mr r11, r12 - - b finish_tlb_load - -5: - /* The bailout. Restore registers to pre-exception conditions - * and call the heavyweights to help us out. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - b InstructionAccess - - EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) -#ifdef CONFIG_IBM405_ERR51 - /* 405GP errata 51 */ - START_EXCEPTION(0x1700, Trap_17) - b DTLBMiss -#else - EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE) -#endif - EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE) - EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE) - -/* Check for a single step debug exception while in an exception - * handler before state has been saved. This is to catch the case - * where an instruction that we are trying to single step causes - * an exception (eg ITLB/DTLB miss) and thus the first instruction of - * the exception handler generates a single step debug exception. - * - * If we get a debug trap on the first instruction of an exception handler, - * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is - * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). - * The exception handler was handling a non-critical interrupt, so it will - * save (and later restore) the MSR via SPRN_SRR1, which will still have - * the MSR_DE bit set. - */ - /* 0x2000 - Debug Exception */ - START_EXCEPTION(0x2000, DebugTrap) - CRITICAL_EXCEPTION_PROLOG - - /* - * If this is a single step or branch-taken exception in an - * exception entry sequence, it was probably meant to apply to - * the code where the exception occurred (since exception entry - * doesn't turn off DE automatically). We simulate the effect - * of turning off DE on entry to an exception handler by turning - * off DE in the SRR3 value and clearing the debug status. - */ - mfspr r10,SPRN_DBSR /* check single-step/branch taken */ - andis. r10,r10,DBSR_IC@h - beq+ 2f - - andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ - beq 1f /* branch and fix it up */ - - mfspr r10,SPRN_SRR2 /* Faulting instruction address */ - cmplwi r10,0x2100 - bgt+ 2f /* address above exception vectors */ - - /* here it looks like we got an inappropriate debug exception. */ -1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ - lis r10,DBSR_IC@h /* clear the IC event */ - mtspr SPRN_DBSR,r10 - /* restore state and get out */ - lwz r10,_CCR(r11) - lwz r0,GPR0(r11) - lwz r1,GPR1(r11) - mtcrf 0x80,r10 - mtspr SPRN_SRR2,r12 - mtspr SPRN_SRR3,r9 - lwz r9,GPR9(r11) - lwz r12,GPR12(r11) - lwz r10,crit_r10@l(0) - lwz r11,crit_r11@l(0) - PPC405_ERR77_SYNC - rfci - b . - - /* continue normal handling for a critical exception... */ -2: mfspr r4,SPRN_DBSR - addi r3,r1,STACK_FRAME_OVERHEAD - EXC_XFER_TEMPLATE(DebugException, 0x2002, \ - (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ - NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) - -/* - * The other Data TLB exceptions bail out to this point - * if they can't resolve the lightweight TLB fault. - */ -DataAccess: - NORMAL_EXCEPTION_PROLOG - mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ - stw r5,_ESR(r11) - mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ - EXC_XFER_EE_LITE(0x300, handle_page_fault) - -/* Other PowerPC processors, namely those derived from the 6xx-series - * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. - * However, for the 4xx-series processors these are neither defined nor - * reserved. - */ - - /* Damn, I came up one instruction too many to fit into the - * exception space :-). Both the instruction and data TLB - * miss get to this point to load the TLB. - * r10 - TLB_TAG value - * r11 - Linux PTE - * r12, r9 - avilable to use - * PID - loaded with proper value when we get here - * Upon exit, we reload everything and RFI. - * Actually, it will fit now, but oh well.....a common place - * to load the TLB. - */ -tlb_4xx_index: - .long 0 -finish_tlb_load: - /* load the next available TLB index. - */ - lwz r9, tlb_4xx_index@l(0) - addi r9, r9, 1 - andi. r9, r9, (PPC4XX_TLB_SIZE-1) - stw r9, tlb_4xx_index@l(0) - -6: - /* - * Clear out the software-only bits in the PTE to generate the - * TLB_DATA value. These are the bottom 2 bits of the RPM, the - * top 3 bits of the zone field, and M. - */ - li r12, 0x0ce2 - andc r11, r11, r12 - - tlbwe r11, r9, TLB_DATA /* Load TLB LO */ - tlbwe r10, r9, TLB_TAG /* Load TLB HI */ - - /* Done...restore registers and get out of here. - */ -#ifdef CONFIG_403GCX - lwz r12, 12(r0) - lwz r11, 8(r0) - mtspr SPRN_PID, r12 - mtcr r11 - lwz r9, 4(r0) - lwz r12, 0(r0) -#else - mfspr r12, SPRN_SPRG6 - mfspr r11, SPRN_SPRG7 - mtspr SPRN_PID, r12 - mtcr r11 - mfspr r9, SPRN_SPRG5 - mfspr r12, SPRN_SPRG4 -#endif - mfspr r11, SPRN_SPRG1 - mfspr r10, SPRN_SPRG0 - PPC405_ERR77_SYNC - rfi /* Should sync shadow TLBs */ - b . /* prevent prefetch past rfi */ - -/* extern void giveup_fpu(struct task_struct *prev) - * - * The PowerPC 4xx family of processors do not have an FPU, so this just - * returns. - */ -_GLOBAL(giveup_fpu) - blr - -/* This is where the main kernel code starts. - */ -start_here: - - /* ptr to current */ - lis r2,init_task@h - ori r2,r2,init_task@l - - /* ptr to phys current thread */ - tophys(r4,r2) - addi r4,r4,THREAD /* init task's THREAD */ - mtspr SPRN_SPRG3,r4 - - /* stack */ - lis r1,init_thread_union@ha - addi r1,r1,init_thread_union@l - li r0,0 - stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - - bl early_init /* We have to do this with MMU on */ - -/* - * Decide what sort of machine this is and initialize the MMU. - */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 - bl machine_init - bl MMU_init - -/* Go back to running unmapped so we can load up new values - * and change to using our exception vectors. - * On the 4xx, all we have to do is invalidate the TLB to clear - * the old 16M byte TLB mappings. - */ - lis r4,2f@h - ori r4,r4,2f@l - tophys(r4,r4) - lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h - ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l - mtspr SPRN_SRR0,r4 - mtspr SPRN_SRR1,r3 - rfi - b . /* prevent prefetch past rfi */ - -/* Load up the kernel context */ -2: - sync /* Flush to memory before changing TLB */ - tlbia - isync /* Flush shadow TLBs */ - - /* set up the PTE pointers for the Abatron bdiGDB. - */ - lis r6, swapper_pg_dir@h - ori r6, r6, swapper_pg_dir@l - lis r5, abatron_pteptrs@h - ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ - tophys(r5,r5) - stw r6, 0(r5) - -/* Now turn on the MMU for real! */ - lis r4,MSR_KERNEL@h - ori r4,r4,MSR_KERNEL@l - lis r3,start_kernel@h - ori r3,r3,start_kernel@l - mtspr SPRN_SRR0,r3 - mtspr SPRN_SRR1,r4 - rfi /* enable MMU and jump to start_kernel */ - b . /* prevent prefetch past rfi */ - -/* Set up the initial MMU state so we can do the first level of - * kernel initialization. This maps the first 16 MBytes of memory 1:1 - * virtual to physical and more importantly sets the cache mode. - */ -initial_mmu: - tlbia /* Invalidate all TLB entries */ - isync - - /* We should still be executing code at physical address 0x0000xxxx - * at this point. However, start_here is at virtual address - * 0xC000xxxx. So, set up a TLB mapping to cover this once - * translation is enabled. - */ - - lis r3,KERNELBASE@h /* Load the kernel virtual address */ - ori r3,r3,KERNELBASE@l - tophys(r4,r3) /* Load the kernel physical address */ - - iccci r0,r3 /* Invalidate the i-cache before use */ - - /* Load the kernel PID. - */ - li r0,0 - mtspr SPRN_PID,r0 - sync - - /* Configure and load two entries into TLB slots 62 and 63. - * In case we are pinning TLBs, these are reserved in by the - * other TLB functions. If not reserving, then it doesn't - * matter where they are loaded. - */ - clrrwi r4,r4,10 /* Mask off the real page number */ - ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ - - clrrwi r3,r3,10 /* Mask off the effective page number */ - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) - - li r0,63 /* TLB slot 63 */ - - tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ - tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ - -#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) - - /* Load a TLB entry for the UART, so that ppc4xx_progress() can use - * the UARTs nice and early. We use a 4k real==virtual mapping. */ - - lis r3,SERIAL_DEBUG_IO_BASE@h - ori r3,r3,SERIAL_DEBUG_IO_BASE@l - mr r4,r3 - clrrwi r4,r4,12 - ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) - - clrrwi r3,r3,12 - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) - - li r0,0 /* TLB slot 0 */ - tlbwe r4,r0,TLB_DATA - tlbwe r3,r0,TLB_TAG -#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ - - isync - - /* Establish the exception vector base - */ - lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ - tophys(r0,r4) /* Use the physical address */ - mtspr SPRN_EVPR,r0 - - blr - -_GLOBAL(abort) - mfspr r13,SPRN_DBCR0 - oris r13,r13,DBCR0_RST_SYSTEM@h - mtspr SPRN_DBCR0,r13 - -_GLOBAL(set_context) - -#ifdef CONFIG_BDI_SWITCH - /* Context switch the PTE pointer for the Abatron BDI2000. - * The PGDIR is the second parameter. - */ - lis r5, KERNELBASE@h - lwz r5, 0xf0(r5) - stw r4, 0x4(r5) -#endif - sync - mtspr SPRN_PID,r3 - isync /* Need an isync to flush shadow */ - /* TLBs after changing PID */ - blr - -/* We put a few things here that have to be page-aligned. This stuff - * goes at the beginning of the data segment, which is page-aligned. - */ - .data - .align 12 - .globl sdata -sdata: - .globl empty_zero_page -empty_zero_page: - .space 4096 - .globl swapper_pg_dir -swapper_pg_dir: - .space 4096 - - -/* Stack for handling critical exceptions from kernel mode */ - .section .bss - .align 12 -exception_stack_bottom: - .space 4096 -critical_stack_top: - .globl exception_stack_top -exception_stack_top: - -/* This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* Room for two PTE pointers, usually the kernel and current user pointers - * to their respective root page table. - */ -abatron_pteptrs: - .space 8 -- cgit v1.1 From 4d922c8dc332f4c7bc156fa832187661d4899cee Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Mon, 20 Aug 2007 07:28:48 -0500 Subject: [POWERPC] 40x MMU Add MMU definitions for 40x platforms. Also fixes two warnings in 40x_mmu.c. Signed-off-by: Josh Boyer Acked-by: David Gibson --- arch/powerpc/kernel/head_40x.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index adc7f80..fe8afb6 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -772,7 +772,7 @@ finish_tlb_load: */ lwz r9, tlb_4xx_index@l(0) addi r9, r9, 1 - andi. r9, r9, (PPC4XX_TLB_SIZE-1) + andi. r9, r9, (PPC40X_TLB_SIZE-1) stw r9, tlb_4xx_index@l(0) 6: -- cgit v1.1 From aab69292e4efd38181cd300d9b83b12592643d6c Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Mon, 20 Aug 2007 07:29:11 -0500 Subject: [POWERPC] 40x decrementer fixes Allow generic_calibrate_decr to work for 40x platforms. Given that the hardware behavior is identical, this also changes the set_dec function to reload the PIT on 40x to match the behavior 44x currently has. Signed-off-by: Josh Boyer --- arch/powerpc/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index c85d9b0..b5944d8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -866,7 +866,7 @@ void __init generic_calibrate_decr(void) "(not found)\n"); } -#ifdef CONFIG_BOOKE +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); -- cgit v1.1 From 1daa194a87b3ea77760b4b5361eb47900dfc00a9 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Mon, 20 Aug 2007 07:29:36 -0500 Subject: [POWERPC] Fix 40x build Remove inclusion of __res on 40x. We don't need it in arch/powerpc Signed-off-by: Josh Boyer Acked-by: David Gibson --- arch/powerpc/kernel/head_40x.S | 1 - arch/powerpc/kernel/ppc_ksyms.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index fe8afb6..a8e0457 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index a20f195..430c502 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -180,7 +180,7 @@ EXPORT_SYMBOL(cacheable_memcpy); EXPORT_SYMBOL(cpm_install_handler); EXPORT_SYMBOL(cpm_free_handler); #endif /* CONFIG_8xx */ -#if defined(CONFIG_8xx) || defined(CONFIG_40x) +#if defined(CONFIG_8xx) EXPORT_SYMBOL(__res); #endif -- cgit v1.1 From 556ecf9be66f4d493e19bc71a7ce84366d512b71 Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Sat, 18 Aug 2007 04:27:17 +1000 Subject: [POWERPC] Advertise correct IDE mode on Pegasos2 The built-in IDE controller is configured in legacy mode, but the PCI registers advertise native mode. Force the PCI class into legacy mode. This allows pata_via to access two drives. The Pegasos specific irq enforcement in the via82cxxx driver must stay because there is apparently no generic way to setup irq per channel. Tested on Pegasos2 with firmware version 20040810, and two IDE disks. Signed-off-by: Olaf Hering Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom_init.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a1d582e..29c2160 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2046,6 +2046,7 @@ static void __init fixup_device_tree_maple(void) /* * Pegasos and BriQ lacks the "ranges" property in the isa node * Pegasos needs decimal IRQ 14/15, not hexadecimal + * Pegasos has the IDE configured in legacy mode, but advertised as native */ static void __init fixup_device_tree_chrp(void) { @@ -2083,9 +2084,13 @@ static void __init fixup_device_tree_chrp(void) prom_printf("Fixing up IDE interrupt on Pegasos...\n"); prop[0] = 14; prop[1] = 0x0; - prop[2] = 15; - prop[3] = 0x0; - prom_setprop(ph, name, "interrupts", prop, 4*sizeof(u32)); + prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32)); + prom_printf("Fixing up IDE class-code on Pegasos...\n"); + rc = prom_getprop(ph, "class-code", prop, sizeof(u32)); + if (rc == sizeof(u32)) { + prop[0] &= ~0x5; + prom_setprop(ph, name, "class-code", prop, sizeof(u32)); + } } } #else -- cgit v1.1 From 16a15a30f8a09af6ce2dc4fd6eec9b454c1fe488 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 20 Aug 2007 14:58:36 +1000 Subject: [POWERPC] iSeries: Clean up lparmap mess We need to have xLparMap in head_64.S so that it is at a fixed address (because the linker will not resolve (address & 0xffffffff) for us). But the assembler miscalculates the KERNEL_VSID() expressions. So put the confusing expressions into asm-offsets.c. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 7 ------- arch/powerpc/kernel/asm-offsets.c | 8 ++++++++ arch/powerpc/kernel/head_64.S | 27 +++++++++++++++++++-------- arch/powerpc/kernel/lparmap.c | 32 -------------------------------- 4 files changed, 27 insertions(+), 47 deletions(-) delete mode 100644 arch/powerpc/kernel/lparmap.c (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a4850dd..967afc5 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -80,13 +80,6 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y) obj-y += iomap.o endif -ifeq ($(CONFIG_PPC_ISERIES),y) -CFLAGS_lparmap.s += -g0 -extra-y += lparmap.s -$(obj)/head_64.o: $(obj)/lparmap.s -AFLAGS_head_64.o += -I$(obj) -endif - else # stuff used from here for ARCH=ppc smpobj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2cb1d948..a408053 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -312,5 +312,13 @@ int main(void) #ifdef CONFIG_BUG DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); #endif + +#ifdef CONFIG_PPC_ISERIES + /* the assembler miscalculates the VSID values */ + DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET)); + DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET)); + DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START)); + DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START)); +#endif return 0; } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 1718000..1e6d9cc 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -34,6 +34,7 @@ #include #include #include +#include #define DO_SOFT_DISABLE @@ -1519,8 +1520,8 @@ _GLOBAL(do_stab_bolted) * Space for CPU0's segment table. * * On iSeries, the hypervisor must fill in at least one entry before - * we get control (with relocate on). The address is give to the hv - * as a page number (see xLparMap in lpardata.c), so this must be at a + * we get control (with relocate on). The address is given to the hv + * as a page number (see xLparMap below), so this must be at a * fixed address (the linker can't compute (u64)&initial_stab >> * PAGE_SHIFT). */ @@ -1542,12 +1543,22 @@ fwnmi_data_area: * both pSeries and iSeries */ #ifdef CONFIG_PPC_ISERIES . = LPARMAP_PHYS -#include "lparmap.s" -/* - * This ".text" is here for old compilers that generate a trailing - * .note section when compiling .c files to .s - */ - .text + .globl xLparMap +xLparMap: + .quad HvEsidsToMap /* xNumberEsids */ + .quad HvRangesToMap /* xNumberRanges */ + .quad STAB0_PAGE /* xSegmentTableOffs */ + .zero 40 /* xRsvd */ + /* xEsids (HvEsidsToMap entries of 2 quads) */ + .quad PAGE_OFFSET_ESID /* xKernelEsid */ + .quad PAGE_OFFSET_VSID /* xKernelVsid */ + .quad VMALLOC_START_ESID /* xKernelEsid */ + .quad VMALLOC_START_VSID /* xKernelVsid */ + /* xRanges (HvRangesToMap entries of 3 quads) */ + .quad HvPagesToMap /* xPages */ + .quad 0 /* xOffset */ + .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ + #endif /* CONFIG_PPC_ISERIES */ . = 0x8000 diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c deleted file mode 100644 index af11285..0000000 --- a/arch/powerpc/kernel/lparmap.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2005 Stephen Rothwell IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include -#include - -/* The # is to stop gcc trying to make .text nonexecutable */ -const struct LparMap __attribute__((__section__(".text #"))) xLparMap = { - .xNumberEsids = HvEsidsToMap, - .xNumberRanges = HvRangesToMap, - .xSegmentTableOffs = STAB0_PAGE, - - .xEsids = { - { .xKernelEsid = GET_ESID(PAGE_OFFSET), - .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), }, - { .xKernelEsid = GET_ESID(VMALLOC_START), - .xKernelVsid = KERNEL_VSID(VMALLOC_START), }, - }, - - .xRanges = { - { .xPages = HvPagesToMap, - .xOffset = 0, - .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT), - }, - }, -}; -- cgit v1.1 From 4b218e9bb2fbbc57b5a05de41d77c056a134528c Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Tue, 21 Aug 2007 02:36:19 +1000 Subject: [POWERPC] Whitespace cleanup in arch/powerpc Signed-off-by: Scott Wood Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 24bea97..dfad0e4 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -272,7 +272,7 @@ void do_IRQ(struct pt_regs *regs) struct thread_info *curtp, *irqtp; #endif - irq_enter(); + irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 2KB free? */ @@ -321,7 +321,7 @@ void do_IRQ(struct pt_regs *regs) /* That's not SMP safe ... but who cares ? */ ppc_spurious_interrupts++; - irq_exit(); + irq_exit(); set_irq_regs(old_regs); #ifdef CONFIG_PPC_ISERIES -- cgit v1.1 From fc68e8699f1f987060ef817cff6a13a7cd7d4c8a Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 22 Aug 2007 13:44:58 +1000 Subject: [POWERPC] Move iSeries startup code out of head_64.S Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 86 +------------------------------------------ 1 file changed, 2 insertions(+), 84 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 1e6d9cc..97f089b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -802,56 +802,6 @@ system_call_iSeries: STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) - .globl system_reset_iSeries -system_reset_iSeries: - mfspr r13,SPRN_SPRG3 /* Get paca address */ - mfmsr r24 - ori r24,r24,MSR_RI - mtmsrd r24 /* RI on */ - lhz r24,PACAPACAINDEX(r13) /* Get processor # */ - cmpwi 0,r24,0 /* Are we processor 0? */ - bne 1f - b .__start_initialization_iSeries /* Start up the first processor */ -1: mfspr r4,SPRN_CTRLF - li r5,CTRL_RUNLATCH /* Turn off the run light */ - andc r4,r4,r5 - mtspr SPRN_CTRLT,r4 - -1: - HMT_LOW -#ifdef CONFIG_SMP - lbz r23,PACAPROCSTART(r13) /* Test if this processor - * should start */ - sync - LOAD_REG_IMMEDIATE(r3,current_set) - sldi r28,r24,3 /* get current_set[cpu#] */ - ldx r3,r3,r28 - addi r1,r3,THREAD_SIZE - subi r1,r1,STACK_FRAME_OVERHEAD - - cmpwi 0,r23,0 - beq iSeries_secondary_smp_loop /* Loop until told to go */ - bne __secondary_start /* Loop until told to go */ -iSeries_secondary_smp_loop: - /* Let the Hypervisor know we are alive */ - /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ - lis r3,0x8002 - rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ -#else /* CONFIG_SMP */ - /* Yield the processor. This is required for non-SMP kernels - which are running on multi-threaded machines. */ - lis r3,0x8000 - rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ - addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ - li r4,0 /* "yield timed" */ - li r5,-1 /* "yield forever" */ -#endif /* CONFIG_SMP */ - li r0,-1 /* r0=-1 indicates a Hypervisor call */ - sc /* Invoke the hypervisor via a system call */ - mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */ - b 1b /* If SMP not configured, secondaries - * loop forever */ - decrementer_iSeries_masked: /* We may not have a valid TOC pointer in here. */ li r11,1 @@ -1622,39 +1572,6 @@ _GLOBAL(generic_secondary_smp_init) b __secondary_start #endif -#ifdef CONFIG_PPC_ISERIES -_INIT_STATIC(__start_initialization_iSeries) - /* Clear out the BSS */ - LOAD_REG_IMMEDIATE(r11,__bss_stop) - LOAD_REG_IMMEDIATE(r8,__bss_start) - sub r11,r11,r8 /* bss size */ - addi r11,r11,7 /* round up to an even double word */ - rldicl. r11,r11,61,3 /* shift right by 3 */ - beq 4f - addi r8,r8,-8 - li r0,0 - mtctr r11 /* zero this many doublewords */ -3: stdu r0,8(r8) - bdnz 3b -4: - LOAD_REG_IMMEDIATE(r1,init_thread_union) - addi r1,r1,THREAD_SIZE - li r0,0 - stdu r0,-STACK_FRAME_OVERHEAD(r1) - - LOAD_REG_IMMEDIATE(r2,__toc_start) - addi r2,r2,0x4000 - addi r2,r2,0x4000 - - bl .iSeries_early_setup - bl .early_setup - - /* relocation is on at this point */ - - b .start_here_common -#endif /* CONFIG_PPC_ISERIES */ - - _STATIC(__mmu_off) mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR @@ -1902,6 +1819,7 @@ _GLOBAL(pmac_secondary_start) * r13 = paca virtual address * SPRG3 = paca virtual address */ + .globl __secondary_start __secondary_start: /* Set thread priority to MEDIUM */ HMT_MEDIUM @@ -2032,7 +1950,7 @@ _INIT_STATIC(start_here_multiplatform) b . /* prevent speculative execution */ /* This is where all platforms converge execution */ -_INIT_STATIC(start_here_common) +_INIT_GLOBAL(start_here_common) /* relocation is on at this point */ /* The following code sets up the SP and TOC now that we are */ -- cgit v1.1 From f9ff0f304833be9a6a605c84e24d630d5aef2230 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 22 Aug 2007 13:46:44 +1000 Subject: [POWERPC] Move the exception macros into a header file It makes head_64.S a bit more readable and will allow us to move the iSeries exceptions elsewhere. This also removes the last line of the comment: * The following macros define the code that appears as * the prologue to each of the exception handlers. They * are split into two parts to allow a single kernel binary * to be used for pSeries and iSeries. * LOL. One day... - paulus Anything is possible. :-) Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 336 +----------------------------------------- 1 file changed, 1 insertion(+), 335 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 97f089b..fe6122b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -35,6 +35,7 @@ #include #include #include +#include #define DO_SOFT_DISABLE @@ -145,344 +146,9 @@ exception_marker: .text /* - * The following macros define the code that appears as - * the prologue to each of the exception handlers. They - * are split into two parts to allow a single kernel binary - * to be used for pSeries and iSeries. - * LOL. One day... - paulus - */ - -/* - * We make as much of the exception code common between native - * exception handlers (including pSeries LPAR) and iSeries LPAR - * implementations as possible. - */ - -/* * This is the start of the interrupt handlers for pSeries * This code runs with relocation off. */ -#define EX_R9 0 -#define EX_R10 8 -#define EX_R11 16 -#define EX_R12 24 -#define EX_R13 32 -#define EX_SRR0 40 -#define EX_DAR 48 -#define EX_DSISR 56 -#define EX_CCR 60 -#define EX_R3 64 -#define EX_LR 72 - -/* - * We're short on space and time in the exception prolog, so we can't - * use the normal SET_REG_IMMEDIATE macro. Normally we just need the - * low halfword of the address, but for Kdump we need the whole low - * word. - */ -#ifdef CONFIG_CRASH_DUMP -#define LOAD_HANDLER(reg, label) \ - oris reg,reg,(label)@h; /* virt addr of handler ... */ \ - ori reg,reg,(label)@l; /* .. and the rest */ -#else -#define LOAD_HANDLER(reg, label) \ - ori reg,reg,(label)@l; /* virt addr of handler ... */ -#endif - -/* - * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode. - * The firmware calls the registered system_reset_fwnmi and - * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run - * a 32bit application at the time of the event. - * This firmware bug is present on POWER4 and JS20. - */ -#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \ - mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ - std r9,area+EX_R9(r13); /* save r9 - r12 */ \ - std r10,area+EX_R10(r13); \ - std r11,area+EX_R11(r13); \ - std r12,area+EX_R12(r13); \ - mfspr r9,SPRN_SPRG1; \ - std r9,area+EX_R13(r13); \ - mfcr r9; \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ - /* force 64bit mode */ \ - li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \ - rldimi r10,r11,61,0; /* insert into top 3 bits */ \ - /* done 64bit mode */ \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ - b . /* prevent speculative execution */ - -#define EXCEPTION_PROLOG_PSERIES(area, label) \ - mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ - std r9,area+EX_R9(r13); /* save r9 - r12 */ \ - std r10,area+EX_R10(r13); \ - std r11,area+EX_R11(r13); \ - std r12,area+EX_R12(r13); \ - mfspr r9,SPRN_SPRG1; \ - std r9,area+EX_R13(r13); \ - mfcr r9; \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ - b . /* prevent speculative execution */ - -/* - * This is the start of the interrupt handlers for iSeries - * This code runs with relocation on. - */ -#define EXCEPTION_PROLOG_ISERIES_1(area) \ - mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ - std r9,area+EX_R9(r13); /* save r9 - r12 */ \ - std r10,area+EX_R10(r13); \ - std r11,area+EX_R11(r13); \ - std r12,area+EX_R12(r13); \ - mfspr r9,SPRN_SPRG1; \ - std r9,area+EX_R13(r13); \ - mfcr r9 - -#define EXCEPTION_PROLOG_ISERIES_2 \ - mfmsr r10; \ - ld r12,PACALPPACAPTR(r13); \ - ld r11,LPPACASRR0(r12); \ - ld r12,LPPACASRR1(r12); \ - ori r10,r10,MSR_RI; \ - mtmsrd r10,1 - -/* - * The common exception prolog is used for all except a few exceptions - * such as a segment miss on a kernel address. We have to be prepared - * to take another exception from the point where we first touch the - * kernel stack onwards. - * - * On entry r13 points to the paca, r9-r13 are saved in the paca, - * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and - * SRR1, and relocation is on. - */ -#define EXCEPTION_PROLOG_COMMON(n, area) \ - andi. r10,r12,MSR_PR; /* See if coming from user */ \ - mr r10,r1; /* Save r1 */ \ - subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ - beq- 1f; \ - ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ - bge- cr1,2f; /* abort if it is */ \ - b 3f; \ -2: li r1,(n); /* will be reloaded later */ \ - sth r1,PACA_TRAP_SAVE(r13); \ - b bad_stack; \ -3: std r9,_CCR(r1); /* save CR in stackframe */ \ - std r11,_NIP(r1); /* save SRR0 in stackframe */ \ - std r12,_MSR(r1); /* save SRR1 in stackframe */ \ - std r10,0(r1); /* make stack chain pointer */ \ - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r10,GPR1(r1); /* save r1 in stackframe */ \ - ACCOUNT_CPU_USER_ENTRY(r9, r10); \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ - SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ - ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ - ld r10,area+EX_R10(r13); \ - std r9,GPR9(r1); \ - std r10,GPR10(r1); \ - ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ - ld r10,area+EX_R12(r13); \ - ld r11,area+EX_R13(r13); \ - std r9,GPR11(r1); \ - std r10,GPR12(r1); \ - std r11,GPR13(r1); \ - ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ - mflr r9; /* save LR in stackframe */ \ - std r9,_LINK(r1); \ - mfctr r10; /* save CTR in stackframe */ \ - std r10,_CTR(r1); \ - lbz r10,PACASOFTIRQEN(r13); \ - mfspr r11,SPRN_XER; /* save XER in stackframe */ \ - std r10,SOFTE(r1); \ - std r11,_XER(r1); \ - li r9,(n)+1; \ - std r9,_TRAP(r1); /* set trap number */ \ - li r10,0; \ - ld r11,exception_marker@toc(r2); \ - std r10,RESULT(r1); /* clear regs->result */ \ - std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ - -/* - * Exception vectors. - */ -#define STD_EXCEPTION_PSERIES(n, label) \ - . = n; \ - .globl label##_pSeries; \ -label##_pSeries: \ - HMT_MEDIUM; \ - mtspr SPRN_SPRG1,r13; /* save r13 */ \ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) - -#define HSTD_EXCEPTION_PSERIES(n, label) \ - . = n; \ - .globl label##_pSeries; \ -label##_pSeries: \ - HMT_MEDIUM; \ - mtspr SPRN_SPRG1,r20; /* save r20 */ \ - mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \ - mtspr SPRN_SRR0,r20; \ - mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \ - mtspr SPRN_SRR1,r20; \ - mfspr r20,SPRN_SPRG1; /* restore r20 */ \ - mtspr SPRN_SPRG1,r13; /* save r13 */ \ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) - - -#define MASKABLE_EXCEPTION_PSERIES(n, label) \ - . = n; \ - .globl label##_pSeries; \ -label##_pSeries: \ - HMT_MEDIUM; \ - mtspr SPRN_SPRG1,r13; /* save r13 */ \ - mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ - std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ - std r10,PACA_EXGEN+EX_R10(r13); \ - lbz r10,PACASOFTIRQEN(r13); \ - mfcr r9; \ - cmpwi r10,0; \ - beq masked_interrupt; \ - mfspr r10,SPRN_SPRG1; \ - std r10,PACA_EXGEN+EX_R13(r13); \ - std r11,PACA_EXGEN+EX_R11(r13); \ - std r12,PACA_EXGEN+EX_R12(r13); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label##_common) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ - b . /* prevent speculative execution */ - -#define STD_EXCEPTION_ISERIES(n, label, area) \ - .globl label##_iSeries; \ -label##_iSeries: \ - HMT_MEDIUM; \ - mtspr SPRN_SPRG1,r13; /* save r13 */ \ - EXCEPTION_PROLOG_ISERIES_1(area); \ - EXCEPTION_PROLOG_ISERIES_2; \ - b label##_common - -#define MASKABLE_EXCEPTION_ISERIES(n, label) \ - .globl label##_iSeries; \ -label##_iSeries: \ - HMT_MEDIUM; \ - mtspr SPRN_SPRG1,r13; /* save r13 */ \ - EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ - lbz r10,PACASOFTIRQEN(r13); \ - cmpwi 0,r10,0; \ - beq- label##_iSeries_masked; \ - EXCEPTION_PROLOG_ISERIES_2; \ - b label##_common; \ - -#ifdef CONFIG_PPC_ISERIES -#define DISABLE_INTS \ - li r11,0; \ - stb r11,PACASOFTIRQEN(r13); \ -BEGIN_FW_FTR_SECTION; \ - stb r11,PACAHARDIRQEN(r13); \ -END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ -BEGIN_FW_FTR_SECTION; \ - mfmsr r10; \ - ori r10,r10,MSR_EE; \ - mtmsrd r10,1; \ -END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) - -#else -#define DISABLE_INTS \ - li r11,0; \ - stb r11,PACASOFTIRQEN(r13); \ - stb r11,PACAHARDIRQEN(r13) - -#endif /* CONFIG_PPC_ISERIES */ - -#define ENABLE_INTS \ - ld r12,_MSR(r1); \ - mfmsr r11; \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - -#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ - .align 7; \ - .globl label##_common; \ -label##_common: \ - EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ - DISABLE_INTS; \ - bl .save_nvgprs; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr; \ - b .ret_from_except - -/* - * Like STD_EXCEPTION_COMMON, but for exceptions that can occur - * in the idle task and therefore need the special idle handling. - */ -#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ - .align 7; \ - .globl label##_common; \ -label##_common: \ - EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ - FINISH_NAP; \ - DISABLE_INTS; \ - bl .save_nvgprs; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr; \ - b .ret_from_except - -#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ - .align 7; \ - .globl label##_common; \ -label##_common: \ - EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ - FINISH_NAP; \ - DISABLE_INTS; \ - bl .ppc64_runlatch_on; \ - addi r3,r1,STACK_FRAME_OVERHEAD; \ - bl hdlr; \ - b .ret_from_except_lite - -/* - * When the idle code in power4_idle puts the CPU into NAP mode, - * it has to do so in a loop, and relies on the external interrupt - * and decrementer interrupt entry code to get it out of the loop. - * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags - * to signal that it is in the loop and needs help to get out. - */ -#ifdef CONFIG_PPC_970_NAP -#define FINISH_NAP \ -BEGIN_FTR_SECTION \ - clrrdi r11,r1,THREAD_SHIFT; \ - ld r9,TI_LOCAL_FLAGS(r11); \ - andi. r10,r9,_TLF_NAPPING; \ - bnel power4_fixup_nap; \ -END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) -#else -#define FINISH_NAP -#endif - -/* - * Start of pSeries system interrupt routines - */ . = 0x100 .globl __start_interrupts __start_interrupts: -- cgit v1.1 From dc8f571a26689102f6abe2565a84226edeaacc61 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 22 Aug 2007 13:47:24 +1000 Subject: [POWERPC] Move the iSeries exception vectors out of head_64.S and into platforms/iseries/exception.S Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 136 ------------------------------------------ 1 file changed, 136 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index fe6122b..33c4e8c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -358,142 +358,6 @@ machine_check_fwnmi: mtspr SPRN_SPRG1,r13 /* save r13 */ EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common) -#ifdef CONFIG_PPC_ISERIES -/*** ISeries-LPAR interrupt handlers ***/ - - STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) - - .globl data_access_iSeries -data_access_iSeries: - mtspr SPRN_SPRG1,r13 -BEGIN_FTR_SECTION - mtspr SPRN_SPRG2,r12 - mfspr r13,SPRN_DAR - mfspr r12,SPRN_DSISR - srdi r13,r13,60 - rlwimi r13,r12,16,0x20 - mfcr r12 - cmpwi r13,0x2c - beq .do_stab_bolted_iSeries - mtcrf 0x80,r12 - mfspr r12,SPRN_SPRG2 -END_FTR_SECTION_IFCLR(CPU_FTR_SLB) - EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) - EXCEPTION_PROLOG_ISERIES_2 - b data_access_common - -.do_stab_bolted_iSeries: - mtcrf 0x80,r12 - mfspr r12,SPRN_SPRG2 - EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) - EXCEPTION_PROLOG_ISERIES_2 - b .do_stab_bolted - - .globl data_access_slb_iSeries -data_access_slb_iSeries: - mtspr SPRN_SPRG1,r13 /* save r13 */ - mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ - std r3,PACA_EXSLB+EX_R3(r13) - mfspr r3,SPRN_DAR - std r9,PACA_EXSLB+EX_R9(r13) - mfcr r9 -#ifdef __DISABLED__ - cmpdi r3,0 - bge slb_miss_user_iseries -#endif - std r10,PACA_EXSLB+EX_R10(r13) - std r11,PACA_EXSLB+EX_R11(r13) - std r12,PACA_EXSLB+EX_R12(r13) - mfspr r10,SPRN_SPRG1 - std r10,PACA_EXSLB+EX_R13(r13) - ld r12,PACALPPACAPTR(r13) - ld r12,LPPACASRR1(r12) - b .slb_miss_realmode - - STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) - - .globl instruction_access_slb_iSeries -instruction_access_slb_iSeries: - mtspr SPRN_SPRG1,r13 /* save r13 */ - mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ - std r3,PACA_EXSLB+EX_R3(r13) - ld r3,PACALPPACAPTR(r13) - ld r3,LPPACASRR0(r3) /* get SRR0 value */ - std r9,PACA_EXSLB+EX_R9(r13) - mfcr r9 -#ifdef __DISABLED__ - cmpdi r3,0 - bge .slb_miss_user_iseries -#endif - std r10,PACA_EXSLB+EX_R10(r13) - std r11,PACA_EXSLB+EX_R11(r13) - std r12,PACA_EXSLB+EX_R12(r13) - mfspr r10,SPRN_SPRG1 - std r10,PACA_EXSLB+EX_R13(r13) - ld r12,PACALPPACAPTR(r13) - ld r12,LPPACASRR1(r12) - b .slb_miss_realmode - -#ifdef __DISABLED__ -slb_miss_user_iseries: - std r10,PACA_EXGEN+EX_R10(r13) - std r11,PACA_EXGEN+EX_R11(r13) - std r12,PACA_EXGEN+EX_R12(r13) - mfspr r10,SPRG1 - ld r11,PACA_EXSLB+EX_R9(r13) - ld r12,PACA_EXSLB+EX_R3(r13) - std r10,PACA_EXGEN+EX_R13(r13) - std r11,PACA_EXGEN+EX_R9(r13) - std r12,PACA_EXGEN+EX_R3(r13) - EXCEPTION_PROLOG_ISERIES_2 - b slb_miss_user_common -#endif - - MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) - STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) - STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) - STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) - MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) - STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) - STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) - - .globl system_call_iSeries -system_call_iSeries: - mr r9,r13 - mfspr r13,SPRN_SPRG3 - EXCEPTION_PROLOG_ISERIES_2 - b system_call_common - - STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) - STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) - STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) - -decrementer_iSeries_masked: - /* We may not have a valid TOC pointer in here. */ - li r11,1 - ld r12,PACALPPACAPTR(r13) - stb r11,LPPACADECRINT(r12) - LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) - lwz r12,0(r12) - mtspr SPRN_DEC,r12 - /* fall through */ - -hardware_interrupt_iSeries_masked: - mtcrf 0x80,r9 /* Restore regs */ - ld r12,PACALPPACAPTR(r13) - ld r11,LPPACASRR0(r12) - ld r12,LPPACASRR1(r12) - mtspr SPRN_SRR0,r11 - mtspr SPRN_SRR1,r12 - ld r9,PACA_EXGEN+EX_R9(r13) - ld r10,PACA_EXGEN+EX_R10(r13) - ld r11,PACA_EXGEN+EX_R11(r13) - ld r12,PACA_EXGEN+EX_R12(r13) - ld r13,PACA_EXGEN+EX_R13(r13) - rfid - b . /* prevent speculative execution */ -#endif /* CONFIG_PPC_ISERIES */ - /*** Common interrupt handlers ***/ STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) -- cgit v1.1 From ed16c20da6f500bc2dfad933078d2987636a7b60 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 17 Aug 2007 01:47:39 -0500 Subject: [POWERPC] Remove old includes from arch/ppc Remove includes of files that existed in arch/ppc that we dont need in arch/powerpc anymore. The following includes were removed: This also caused platforms/embedded6xx/mpc7448_hpc2.h to no longer be needed and thus removed. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/setup_32.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 7ec6ba5..a288a5f 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -18,13 +18,11 @@ #include #include -#include #include #include #include #include #include -#include #include #include #include -- cgit v1.1 From 15fc993e31293f9b179eb5f08b18a4a4f2ca648a Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Wed, 29 Aug 2007 17:40:30 +0400 Subject: [POWERPC] PowerPC 440EPx: Sequoia board support AMCC PPC440EPx Sequoia board support. Signed-off-by: Valentine Barshak Acked-by: David Gibson Signed-off-by: Josh Boyer --- arch/powerpc/kernel/cputable.c | 18 ++++++++++++++++++ arch/powerpc/kernel/head_44x.S | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index b1f8000..5873073 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1132,6 +1132,24 @@ static struct cpu_spec cpu_specs[] = { .dcache_bsize = 32, .platform = "ppc440", }, + { /* 440EPX */ + .pvr_mask = 0xf0000ffb, + .pvr_value = 0x200008D0, + .cpu_name = "440EPX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .icache_bsize = 32, + .dcache_bsize = 32, + }, + { /* 440GRX */ + .pvr_mask = 0xf0000ffb, + .pvr_value = 0x200008D8, + .cpu_name = "440GRX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .icache_bsize = 32, + .dcache_bsize = 32, + }, { /* 440GP Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000440, diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 8869596..e26d26e 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -217,7 +217,7 @@ skpinv: addi r4,r4,1 /* Increment */ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ mtspr SPRN_IVPR,r4 -#ifdef CONFIG_440EP +#if defined(CONFIG_440EP) || defined(CONFIG_440EPX) /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */ mfspr r2,SPRN_CCR0 lis r3,0xffef -- cgit v1.1 From 52964f87c64e6c6ea671b5bf3030fb1494090a48 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Aug 2007 18:47:54 +1000 Subject: [POWERPC] Add an optional device_node pointer to the irq_host The majority of irq_host implementations (3 out of 4) are associated with a device_node, and need to stash it somewhere. Rather than having it somewhere different for each host, add an optional device_node pointer to the irq_host structure. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index dfad0e4..79b4512 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -418,10 +418,11 @@ irq_hw_number_t virq_to_hw(unsigned int virq) } EXPORT_SYMBOL_GPL(virq_to_hw); -__init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type, - unsigned int revmap_arg, - struct irq_host_ops *ops, - irq_hw_number_t inval_irq) +__init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, + unsigned int revmap_type, + unsigned int revmap_arg, + struct irq_host_ops *ops, + irq_hw_number_t inval_irq) { struct irq_host *host; unsigned int size = sizeof(struct irq_host); @@ -446,6 +447,7 @@ __init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type, host->revmap_type = revmap_type; host->inval_irq = inval_irq; host->ops = ops; + host->of_node = of_node; spin_lock_irqsave(&irq_big_lock, flags); -- cgit v1.1 From 8528ab84ebe7a1eeed9b0acc808df86663d506c0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Aug 2007 18:47:55 +1000 Subject: [POWERPC] Invert null match behaviour for irq_hosts Currently if you don't specify a match callback for your irq_host it's assumed you match everything. This is a kind of opt-out approach, and turns out to be the exception rather than the rule. So change the semantics to be opt-in, ie. you don't match anything unless you provide a match callback. This in itself isn't very useful, but will allow us to provide a default match implementation in a subsequent patch. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 79b4512..30fb8e2 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -523,7 +523,7 @@ struct irq_host *irq_find_host(struct device_node *node) */ spin_lock_irqsave(&irq_big_lock, flags); list_for_each_entry(h, &irq_hosts, link) - if (h->ops->match == NULL || h->ops->match(h, node)) { + if (h->ops->match != NULL && h->ops->match(h, node)) { found = h; break; } -- cgit v1.1 From 6815800601d3e46b976c868e4e85fb6de32b9133 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Aug 2007 18:47:55 +1000 Subject: [POWERPC] Provide a default irq_host match, which matches on an exact of_node The most common match semantic is an exact match based on the device node. So provide a default implementation that does this, and hook it up if no match routine is specified. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 30fb8e2..d5c7e4c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -418,6 +418,11 @@ irq_hw_number_t virq_to_hw(unsigned int virq) } EXPORT_SYMBOL_GPL(virq_to_hw); +static int default_irq_host_match(struct irq_host *h, struct device_node *np) +{ + return h->of_node != NULL && h->of_node == np; +} + __init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, unsigned int revmap_type, unsigned int revmap_arg, @@ -449,6 +454,9 @@ __init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, host->ops = ops; host->of_node = of_node; + if (host->ops->match == NULL) + host->ops->match = default_irq_host_match; + spin_lock_irqsave(&irq_big_lock, flags); /* If it's a legacy controller, check for duplicates and @@ -523,7 +531,7 @@ struct irq_host *irq_find_host(struct device_node *node) */ spin_lock_irqsave(&irq_big_lock, flags); list_for_each_entry(h, &irq_hosts, link) - if (h->ops->match != NULL && h->ops->match(h, node)) { + if (h->ops->match(h, node)) { found = h; break; } -- cgit v1.1 From 7866291d4cabf5491d4ecb62787308f8b8958f59 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Aug 2007 18:47:56 +1000 Subject: [POWERPC] Initialise hwirq for legacy irqs Although no one uses the hwirq value for legacy irqs at the moment, we should really setup the correct value in the irq_map. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d5c7e4c..1339f32 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -487,7 +487,7 @@ __init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, host->inval_irq = 0; /* setup us as the host for all legacy interrupts */ for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { - irq_map[i].hwirq = 0; + irq_map[i].hwirq = i; smp_wmb(); irq_map[i].host = host; smp_wmb(); -- cgit v1.1 From 60b332e755da7dbf32f1660973ce4f97ebf05d05 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 28 Aug 2007 18:47:57 +1000 Subject: [POWERPC] Export virq mapping via debugfs This adds a debugfs file "powerpc/virq_mapping", which shows the virtual to real mapping of irq numbers. Enable it with CONFIG_VIRQ_DEBUG. Signed-off-by: Zhang Wei Signed-off-by: Chen Gong Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 1339f32..0e47c8c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -1006,6 +1007,68 @@ static int irq_late_init(void) } arch_initcall(irq_late_init); +#ifdef CONFIG_VIRQ_DEBUG +static int virq_debug_show(struct seq_file *m, void *private) +{ + unsigned long flags; + irq_desc_t *desc; + const char *p; + char none[] = "none"; + int i; + + seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", + "chip name", "host name"); + + for (i = 1; i < NR_IRQS; i++) { + desc = get_irq_desc(i); + spin_lock_irqsave(&desc->lock, flags); + + if (desc->action && desc->action->handler) { + seq_printf(m, "%5d ", i); + seq_printf(m, "0x%05lx ", virq_to_hw(i)); + + if (desc->chip && desc->chip->typename) + p = desc->chip->typename; + else + p = none; + seq_printf(m, "%-15s ", p); + + if (irq_map[i].host && irq_map[i].host->of_node) + p = irq_map[i].host->of_node->full_name; + else + p = none; + seq_printf(m, "%s\n", p); + } + + spin_unlock_irqrestore(&desc->lock, flags); + } + + return 0; +} + +static int virq_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, virq_debug_show, inode->i_private); +} + +static const struct file_operations virq_debug_fops = { + .open = virq_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init irq_debugfs_init(void) +{ + if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, + NULL, &virq_debug_fops)) + return -ENOMEM; + + return 0; +} +__initcall(irq_debugfs_init); +#endif /* CONFIG_VIRQ_DEBUG */ + #endif /* CONFIG_PPC_MERGE */ #ifdef CONFIG_PPC64 -- cgit v1.1 From 2e1957fd47b9d4b7bf35be2ec3d4b5e3eefe5cc0 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Sep 2007 12:09:06 +1000 Subject: [POWERPC] pasemi: Export more SPRs to sysfs when CONFIG_DEBUG_KERNEL=y Export some of the implementation-specific registers via sysfs. Useful when debugging, etc. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/sysfs.c | 60 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 55d29ed..d7835ba 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -197,6 +197,36 @@ SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3); SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); +#ifdef CONFIG_DEBUG_KERNEL +SYSFS_PMCSETUP(hid0, SPRN_HID0); +SYSFS_PMCSETUP(hid1, SPRN_HID1); +SYSFS_PMCSETUP(hid4, SPRN_HID4); +SYSFS_PMCSETUP(hid5, SPRN_HID5); +SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0); +SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1); +SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2); +SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3); +SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4); +SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5); +SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6); +SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7); +SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8); +SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9); +SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT); +SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR); +SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR); +SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR); +SYSFS_PMCSETUP(der, SPRN_PA6T_DER); +SYSFS_PMCSETUP(mer, SPRN_PA6T_MER); +SYSFS_PMCSETUP(ber, SPRN_PA6T_BER); +SYSFS_PMCSETUP(ier, SPRN_PA6T_IER); +SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER); +SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR); +SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0); +SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1); +SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); +SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); +#endif /* CONFIG_DEBUG_KERNEL */ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); @@ -228,6 +258,36 @@ static struct sysdev_attribute pa6t_attrs[] = { _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), +#ifdef CONFIG_DEBUG_KERNEL + _SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0), + _SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1), + _SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4), + _SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5), + _SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0), + _SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1), + _SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2), + _SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3), + _SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4), + _SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5), + _SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6), + _SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7), + _SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8), + _SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9), + _SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat), + _SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr), + _SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr), + _SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr), + _SYSDEV_ATTR(der, 0600, show_der, store_der), + _SYSDEV_ATTR(mer, 0600, show_mer, store_mer), + _SYSDEV_ATTR(ber, 0600, show_ber, store_ber), + _SYSDEV_ATTR(ier, 0600, show_ier, store_ier), + _SYSDEV_ATTR(sier, 0600, show_sier, store_sier), + _SYSDEV_ATTR(siar, 0600, show_siar, store_siar), + _SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0), + _SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1), + _SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2), + _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3), +#endif /* CONFIG_DEBUG_KERNEL */ }; -- cgit v1.1 From 01f1c735f57548e6b862e815cc845e452405643d Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Sep 2007 12:41:09 +1000 Subject: [POWERPC] Remove unused platform_machine_check() Remove leftover cruft from ARCH=ppc. There are no users of platform_machine_check() in ARCH=powerpc, and none should be added (they should use ppc_md.machine_check_handler instead). Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/traps.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d8502e3..ccfc99d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -324,15 +324,6 @@ static inline int check_io_access(struct pt_regs *regs) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) #endif -/* - * This is "fall-back" implementation for configurations - * which don't provide platform-specific machine check info - */ -void __attribute__ ((weak)) -platform_machine_check(struct pt_regs *regs) -{ -} - void machine_check_exception(struct pt_regs *regs) { int recover = 0; @@ -480,12 +471,6 @@ void machine_check_exception(struct pt_regs *regs) } #endif /* CONFIG_4xx */ - /* - * Optional platform-provided routine to print out - * additional info, e.g. bus error registers. - */ - platform_machine_check(regs); - if (debugger_fault_handler(regs)) return; die("Machine check", regs, SIGBUS); -- cgit v1.1 From a416561bf790d55db68b2980c2a6951981018041 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Sep 2007 12:42:30 +1000 Subject: [POWERPC] Move lowlevel runlatch calls under cpu feature control There's no need to call the runlatch on functions on processors that don't implement them (CPU_FTR_CTRL). Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 33c4e8c..cec5908 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -656,7 +656,9 @@ hardware_interrupt_common: FINISH_NAP hardware_interrupt_entry: DISABLE_INTS +BEGIN_FTR_SECTION bl .ppc64_runlatch_on +END_FTR_SECTION_IFSET(CPU_FTR_CTRL) addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite -- cgit v1.1 From 6bcc4c01755fd2066bc374193cf3b0849cbabe47 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Sep 2007 12:43:17 +1000 Subject: [POWERPC] Remove warning in arch/powerpc/kernel/sysfs.c Fixes: arch/powerpc/kernel/sysfs.c: In function 'cpu_add_sysdev_attr_group': arch/powerpc/kernel/sysfs.c:388: warning: ignoring return value of 'sysfs_create_group', declared with attribute warn_unused_result Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/sysfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index d7835ba..25d9a96 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -440,12 +440,14 @@ int cpu_add_sysdev_attr_group(struct attribute_group *attrs) { int cpu; struct sys_device *sysdev; + int ret; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { sysdev = get_cpu_sysdev(cpu); - sysfs_create_group(&sysdev->kobj, attrs); + ret = sysfs_create_group(&sysdev->kobj, attrs); + WARN_ON(ret != 0); } mutex_unlock(&cpu_mutex); -- cgit v1.1 From bde6c6e16aa489ea76c762fb7ffb0abb48660dd8 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Thu, 6 Sep 2007 08:04:38 +1000 Subject: [POWERPC] Check _PAGE_RW and _PAGE_PRESENT on kernel addresses Previously, the TLB miss handlers assumed that pages above KERNELBASE are always present and read/write. This assumption is false in the case of CONFIG_DEBUG_PAGEALLOC. Signed-off-by: Scott Wood Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_32.S | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 7d73a13..0e3df1f 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -475,10 +475,10 @@ InstructionTLBMiss: li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ lwz r2,PGDIR(r2) blt+ 112f + mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ lis r2,swapper_pg_dir@ha /* if kernel address, use */ addi r2,r2,swapper_pg_dir@l /* kernel page table */ - mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ 112: tophys(r2,r2) rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ @@ -549,10 +549,10 @@ DataLoadTLBMiss: li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ lwz r2,PGDIR(r2) blt+ 112f + mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ lis r2,swapper_pg_dir@ha /* if kernel address, use */ addi r2,r2,swapper_pg_dir@l /* kernel page table */ - mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ 112: tophys(r2,r2) rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ @@ -621,10 +621,10 @@ DataStoreTLBMiss: li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ lwz r2,PGDIR(r2) blt+ 112f + mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ + rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ lis r2,swapper_pg_dir@ha /* if kernel address, use */ addi r2,r2,swapper_pg_dir@l /* kernel page table */ - mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ 112: tophys(r2,r2) rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ -- cgit v1.1 From e788ff13be03c2cc4055d5569b7b218dc3f2cb7b Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Fri, 7 Sep 2007 03:45:21 +1000 Subject: [POWERPC] prom_init whitespace cleanup, typo fix Whitespace cleanup: badly indented lines. Typo in comment. Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom_init.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 29c2160..1db10f7 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1199,7 +1199,7 @@ static void __init prom_initialize_tce_table(void) if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL)) continue; - /* Keep the old logic in tack to avoid regression. */ + /* Keep the old logic intact to avoid regression. */ if (compatible[0] != 0) { if ((strstr(compatible, RELOC("python")) == NULL) && (strstr(compatible, RELOC("Speedwagon")) == NULL) && @@ -2231,7 +2231,7 @@ static void __init fixup_device_tree(void) static void __init prom_find_boot_cpu(void) { - struct prom_t *_prom = &RELOC(prom); + struct prom_t *_prom = &RELOC(prom); u32 getprop_rval; ihandle prom_cpu; phandle cpu_pkg; @@ -2251,7 +2251,7 @@ static void __init prom_find_boot_cpu(void) static void __init prom_check_initrd(unsigned long r3, unsigned long r4) { #ifdef CONFIG_BLK_DEV_INITRD - struct prom_t *_prom = &RELOC(prom); + struct prom_t *_prom = &RELOC(prom); if (r3 && r4 && r4 != 0xdeadbeef) { unsigned long val; @@ -2284,7 +2284,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long pp, unsigned long r6, unsigned long r7) { - struct prom_t *_prom; + struct prom_t *_prom; unsigned long hdr; unsigned long offset = reloc_offset(); @@ -2343,8 +2343,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* * Copy the CPU hold code */ - if (RELOC(of_platform) != PLATFORM_POWERMAC) - copy_and_flush(0, KERNELBASE + offset, 0x100, 0); + if (RELOC(of_platform) != PLATFORM_POWERMAC) + copy_and_flush(0, KERNELBASE + offset, 0x100, 0); /* * Do early parsing of command line -- cgit v1.1 From 70c6cc37db342d9f970884e12744ab5ee290d4ad Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Fri, 7 Sep 2007 03:46:15 +1000 Subject: [POWERPC] prom.c whitespace cleanup Whitespace cleanup: badly indented lines. Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 0028fe6..b081413 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -779,13 +779,13 @@ static int __init early_init_dt_scan_chosen(unsigned long node, #endif #ifdef CONFIG_KEXEC - lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); - if (lprop) - crashk_res.start = *lprop; + lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); + if (lprop) + crashk_res.start = *lprop; - lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); - if (lprop) - crashk_res.end = crashk_res.start + *lprop - 1; + lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); + if (lprop) + crashk_res.end = crashk_res.start + *lprop - 1; #endif early_init_dt_check_for_initrd(node); -- cgit v1.1 From 3c607ce2a3213f33b8b6b854b5f7db876021e466 Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Fri, 7 Sep 2007 03:47:29 +1000 Subject: [POWERPC] setup_64.c and prom.c comment cleanup Grammatical corrections to comments. Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 8 +++++--- arch/powerpc/kernel/setup_64.c | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index b081413..172dcc3 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -430,9 +430,11 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); -/* - * The device tree may be allocated below our memory limit, or inside the - * crash kernel region for kdump. If so, move it out now. +/** + * move_device_tree - move tree to an unused area, if needed. + * + * The device tree may be allocated beyond our memory limit, or inside the + * crash kernel region for kdump. If so, move it out of the way. */ static void move_device_tree(void) { diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6018178..3089eae 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -181,9 +181,9 @@ void __init early_setup(unsigned long dt_ptr) DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* - * Do early initializations using the flattened device - * tree, like retreiving the physical memory map or - * calculating/retreiving the hash table size + * Do early initialization using the flattened device + * tree, such as retrieving the physical memory map or + * calculating/retrieving the hash table size. */ early_init_devtree(__va(dt_ptr)); -- cgit v1.1 From 26caeb2ee1924d564e8d8190aa783a569532f81a Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 24 Aug 2007 16:42:53 -0500 Subject: [POWERPC] Handle alignment faults on SPE load/store instructions This adds code to handle alignment traps generated by the following SPE (signal processing engine) load/store instructions, by emulating the instruction in the kernel (as is done for other instructions that generate alignment traps): evldd[x] Vector Load Double Word into Double Word [Indexed] evldw[x] Vector Load Double into Two Words [Indexed] evldh[x] Vector Load Double into Four Half Words [Indexed] evlhhesplat[x] Vector Load Half Word into Half Words Even and Splat [Indexed] evlhhousplat[x] Vector Load Half Word into Half Word Odd Unsigned and Splat [Indexed] evlhhossplat[x] Vector Load Half Word into Half Word Odd Signed and Splat [Indexed] evlwhe[x] Vector Load Word into Two Half Words Even [Indexed] evlwhou[x] Vector Load Word into Two Half Words Odd Unsigned (zero-extended) [Indexed] evlwhos[x] Vector Load Word into Two Half Words Odd Signed (with sign extension) [Indexed] evlwwsplat[x] Vector Load Word into Word and Splat [Indexed] evlwhsplat[x] Vector Load Word into Two Half Words and Splat [Indexed] evstdd[x] Vector Store Double of Double [Indexed] evstdw[x] Vector Store Double of Two Words [Indexed] evstdh[x] Vector Store Double of Four Half Words [Indexed] evstwhe[x] Vector Store Word of Two Half Words from Even [Indexed] evstwho[x] Vector Store Word of Two Half Words from Odd [Indexed] evstwwe[x] Vector Store Word of Word from Even [Indexed] evstwwo[x] Vector Store Word of Word from Odd [Indexed] Signed-off-by: Kumar Gala --- arch/powerpc/kernel/align.c | 250 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 250 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 4c47f9c..e06f75d 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -46,6 +46,8 @@ struct aligninfo { #define S 0x40 /* single-precision fp or... */ #define SX 0x40 /* ... byte count in XER */ #define HARD 0x80 /* string, stwcx. */ +#define E4 0x40 /* SPE endianness is word */ +#define E8 0x80 /* SPE endianness is double word */ /* DSISR bits reported for a DCBZ instruction: */ #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ @@ -392,6 +394,248 @@ static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr, return 1; /* exception handled and fixed up */ } +#ifdef CONFIG_SPE + +static struct aligninfo spe_aligninfo[32] = { + { 8, LD+E8 }, /* 0 00 00: evldd[x] */ + { 8, LD+E4 }, /* 0 00 01: evldw[x] */ + { 8, LD }, /* 0 00 10: evldh[x] */ + INVALID, /* 0 00 11 */ + { 2, LD }, /* 0 01 00: evlhhesplat[x] */ + INVALID, /* 0 01 01 */ + { 2, LD }, /* 0 01 10: evlhhousplat[x] */ + { 2, LD+SE }, /* 0 01 11: evlhhossplat[x] */ + { 4, LD }, /* 0 10 00: evlwhe[x] */ + INVALID, /* 0 10 01 */ + { 4, LD }, /* 0 10 10: evlwhou[x] */ + { 4, LD+SE }, /* 0 10 11: evlwhos[x] */ + { 4, LD+E4 }, /* 0 11 00: evlwwsplat[x] */ + INVALID, /* 0 11 01 */ + { 4, LD }, /* 0 11 10: evlwhsplat[x] */ + INVALID, /* 0 11 11 */ + + { 8, ST+E8 }, /* 1 00 00: evstdd[x] */ + { 8, ST+E4 }, /* 1 00 01: evstdw[x] */ + { 8, ST }, /* 1 00 10: evstdh[x] */ + INVALID, /* 1 00 11 */ + INVALID, /* 1 01 00 */ + INVALID, /* 1 01 01 */ + INVALID, /* 1 01 10 */ + INVALID, /* 1 01 11 */ + { 4, ST }, /* 1 10 00: evstwhe[x] */ + INVALID, /* 1 10 01 */ + { 4, ST }, /* 1 10 10: evstwho[x] */ + INVALID, /* 1 10 11 */ + { 4, ST+E4 }, /* 1 11 00: evstwwe[x] */ + INVALID, /* 1 11 01 */ + { 4, ST+E4 }, /* 1 11 10: evstwwo[x] */ + INVALID, /* 1 11 11 */ +}; + +#define EVLDD 0x00 +#define EVLDW 0x01 +#define EVLDH 0x02 +#define EVLHHESPLAT 0x04 +#define EVLHHOUSPLAT 0x06 +#define EVLHHOSSPLAT 0x07 +#define EVLWHE 0x08 +#define EVLWHOU 0x0A +#define EVLWHOS 0x0B +#define EVLWWSPLAT 0x0C +#define EVLWHSPLAT 0x0E +#define EVSTDD 0x10 +#define EVSTDW 0x11 +#define EVSTDH 0x12 +#define EVSTWHE 0x18 +#define EVSTWHO 0x1A +#define EVSTWWE 0x1C +#define EVSTWWO 0x1E + +/* + * Emulate SPE loads and stores. + * Only Book-E has these instructions, and it does true little-endian, + * so we don't need the address swizzling. + */ +static int emulate_spe(struct pt_regs *regs, unsigned int reg, + unsigned int instr) +{ + int t, ret; + union { + u64 ll; + u32 w[2]; + u16 h[4]; + u8 v[8]; + } data, temp; + unsigned char __user *p, *addr; + unsigned long *evr = ¤t->thread.evr[reg]; + unsigned int nb, flags; + + instr = (instr >> 1) & 0x1f; + + /* DAR has the operand effective address */ + addr = (unsigned char __user *)regs->dar; + + nb = spe_aligninfo[instr].len; + flags = spe_aligninfo[instr].flags; + + /* Verify the address of the operand */ + if (unlikely(user_mode(regs) && + !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), + addr, nb))) + return -EFAULT; + + /* userland only */ + if (unlikely(!user_mode(regs))) + return 0; + + flush_spe_to_thread(current); + + /* If we are loading, get the data from user space, else + * get it from register values + */ + if (flags & ST) { + data.ll = 0; + switch (instr) { + case EVSTDD: + case EVSTDW: + case EVSTDH: + data.w[0] = *evr; + data.w[1] = regs->gpr[reg]; + break; + case EVSTWHE: + data.h[2] = *evr >> 16; + data.h[3] = regs->gpr[reg] >> 16; + break; + case EVSTWHO: + data.h[2] = *evr & 0xffff; + data.h[3] = regs->gpr[reg] & 0xffff; + break; + case EVSTWWE: + data.w[1] = *evr; + break; + case EVSTWWO: + data.w[1] = regs->gpr[reg]; + break; + default: + return -EINVAL; + } + } else { + temp.ll = data.ll = 0; + ret = 0; + p = addr; + + switch (nb) { + case 8: + ret |= __get_user_inatomic(temp.v[0], p++); + ret |= __get_user_inatomic(temp.v[1], p++); + ret |= __get_user_inatomic(temp.v[2], p++); + ret |= __get_user_inatomic(temp.v[3], p++); + case 4: + ret |= __get_user_inatomic(temp.v[4], p++); + ret |= __get_user_inatomic(temp.v[5], p++); + case 2: + ret |= __get_user_inatomic(temp.v[6], p++); + ret |= __get_user_inatomic(temp.v[7], p++); + if (unlikely(ret)) + return -EFAULT; + } + + switch (instr) { + case EVLDD: + case EVLDW: + case EVLDH: + data.ll = temp.ll; + break; + case EVLHHESPLAT: + data.h[0] = temp.h[3]; + data.h[2] = temp.h[3]; + break; + case EVLHHOUSPLAT: + case EVLHHOSSPLAT: + data.h[1] = temp.h[3]; + data.h[3] = temp.h[3]; + break; + case EVLWHE: + data.h[0] = temp.h[2]; + data.h[2] = temp.h[3]; + break; + case EVLWHOU: + case EVLWHOS: + data.h[1] = temp.h[2]; + data.h[3] = temp.h[3]; + break; + case EVLWWSPLAT: + data.w[0] = temp.w[1]; + data.w[1] = temp.w[1]; + break; + case EVLWHSPLAT: + data.h[0] = temp.h[2]; + data.h[1] = temp.h[2]; + data.h[2] = temp.h[3]; + data.h[3] = temp.h[3]; + break; + default: + return -EINVAL; + } + } + + if (flags & SW) { + switch (flags & 0xf0) { + case E8: + SWAP(data.v[0], data.v[7]); + SWAP(data.v[1], data.v[6]); + SWAP(data.v[2], data.v[5]); + SWAP(data.v[3], data.v[4]); + break; + case E4: + + SWAP(data.v[0], data.v[3]); + SWAP(data.v[1], data.v[2]); + SWAP(data.v[4], data.v[7]); + SWAP(data.v[5], data.v[6]); + break; + /* Its half word endian */ + default: + SWAP(data.v[0], data.v[1]); + SWAP(data.v[2], data.v[3]); + SWAP(data.v[4], data.v[5]); + SWAP(data.v[6], data.v[7]); + break; + } + } + + if (flags & SE) { + data.w[0] = (s16)data.h[1]; + data.w[1] = (s16)data.h[3]; + } + + /* Store result to memory or update registers */ + if (flags & ST) { + ret = 0; + p = addr; + switch (nb) { + case 8: + ret |= __put_user_inatomic(data.v[0], p++); + ret |= __put_user_inatomic(data.v[1], p++); + ret |= __put_user_inatomic(data.v[2], p++); + ret |= __put_user_inatomic(data.v[3], p++); + case 4: + ret |= __put_user_inatomic(data.v[4], p++); + ret |= __put_user_inatomic(data.v[5], p++); + case 2: + ret |= __put_user_inatomic(data.v[6], p++); + ret |= __put_user_inatomic(data.v[7], p++); + } + if (unlikely(ret)) + return -EFAULT; + } else { + *evr = data.w[0]; + regs->gpr[reg] = data.w[1]; + } + + return 1; +} +#endif /* CONFIG_SPE */ /* * Called on alignment exception. Attempts to fixup @@ -450,6 +694,12 @@ int fix_alignment(struct pt_regs *regs) /* extract the operation and registers from the dsisr */ reg = (dsisr >> 5) & 0x1f; /* source/dest register */ areg = dsisr & 0x1f; /* register to update */ + +#ifdef CONFIG_SPE + if ((instr >> 26) == 0x4) + return emulate_spe(regs, reg, instr); +#endif + instr = (dsisr >> 10) & 0x7f; instr |= (dsisr >> 13) & 0x60; -- cgit v1.1 From 5e14d21e3f28a4181dacff0336040e30942f4921 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 13 Sep 2007 01:44:20 -0500 Subject: [POWERPC] Add cpu feature for SPE handling Make it so that SPE support can be determined at runtime. This is similiar to how we handle AltiVec. This allows us to have SPE support built in and work on processors with and without SPE. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/cputable.c | 23 +++++++---------------- arch/powerpc/kernel/entry_32.S | 4 ++++ arch/powerpc/kernel/process.c | 15 +++++++++++---- arch/powerpc/kernel/ptrace.c | 6 ++---- 4 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 5873073..8eb8087 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -68,15 +68,6 @@ extern void __restore_cpu_ppc970(void); #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) -/* We only set the spe features if the kernel was compiled with - * spe support - */ -#ifdef CONFIG_SPE -#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE -#else -#define PPC_FEATURE_SPE_COMP 0 -#endif - static struct cpu_spec cpu_specs[] = { #ifdef CONFIG_PPC64 { /* Power3 */ @@ -1261,8 +1252,8 @@ static struct cpu_spec cpu_specs[] = { /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE | + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_UNIFIED_CACHE, .dcache_bsize = 32, .platform = "ppc5554", @@ -1274,8 +1265,8 @@ static struct cpu_spec cpu_specs[] = { /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E500, .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE, + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, @@ -1290,9 +1281,9 @@ static struct cpu_spec cpu_specs[] = { /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E500_2, .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE | - PPC_FEATURE_HAS_EFP_DOUBLE, + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP | + PPC_FEATURE_HAS_EFP_DOUBLE_COMP, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4074c0b..21d889e 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -504,9 +504,11 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_SPE +BEGIN_FTR_SECTION oris r0,r0,MSR_SPE@h /* Disable SPE */ mfspr r12,SPRN_SPEFSCR /* save spefscr register value */ stw r12,THREAD+THREAD_SPEFSCR(r2) +END_FTR_SECTION_IFSET(CPU_FTR_SPE) #endif /* CONFIG_SPE */ and. r0,r0,r11 /* FP or altivec or SPE enabled? */ beq+ 1f @@ -542,8 +544,10 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_SPE +BEGIN_FTR_SECTION lwz r0,THREAD+THREAD_SPEFSCR(r2) mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ +END_FTR_SECTION_IFSET(CPU_FTR_SPE) #endif /* CONFIG_SPE */ lwz r0,_CCR(r1) diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e477c9d..57c589c 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -669,9 +669,13 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * mode (asyn, precise, disabled) for 'Classic' FP. */ if (val & PR_FP_EXC_SW_ENABLE) { #ifdef CONFIG_SPE - tsk->thread.fpexc_mode = val & - (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); - return 0; + if (cpu_has_feature(CPU_FTR_SPE)) { + tsk->thread.fpexc_mode = val & + (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); + return 0; + } else { + return -EINVAL; + } #else return -EINVAL; #endif @@ -697,7 +701,10 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) #ifdef CONFIG_SPE - val = tsk->thread.fpexc_mode; + if (cpu_has_feature(CPU_FTR_SPE)) + val = tsk->thread.fpexc_mode; + else + return -EINVAL; #else return -EINVAL; #endif diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 8a177bd..fb8866e 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -576,8 +576,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) #ifdef CONFIG_SPE case PTRACE_GETEVRREGS: /* Get the child spe register state. */ - if (child->thread.regs->msr & MSR_SPE) - giveup_spe(child); + flush_spe_to_thread(child); ret = get_evrregs((unsigned long __user *)data, child); break; @@ -585,8 +584,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) /* Set the child spe register state. */ /* this is to clear the MSR_SPE bit to force a reload * of register state from memory */ - if (child->thread.regs->msr & MSR_SPE) - giveup_spe(child); + flush_spe_to_thread(child); ret = set_evrregs(child, (unsigned long __user *)data); break; #endif -- cgit v1.1 From 748a768384e05c021ea6be221b80c62a83d7b520 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 13 Sep 2007 15:42:35 -0500 Subject: [POWERPC] Fix modpost warnings from head*.S on ppc32 We get warnings like the following from the various ppc32 head*.S files: WARNING: vmlinux.o(.text+0x358): Section mismatch: reference to .init.text:early_init (between 'skpinv' and 'interrupt_base') WARNING: vmlinux.o(.text+0x380): Section mismatch: reference to .init.text:machine_init (between 'skpinv' and 'interrupt_base') WARNING: vmlinux.o(.text+0x384): Section mismatch: reference to .init.text:MMU_init (between 'skpinv' and 'interrupt_base') WARNING: vmlinux.o(.text+0x3aa): Section mismatch: reference to .init.text:start_kernel (between 'skpinv' and 'interrupt_base') WARNING: vmlinux.o(.text+0x3ae): Section mismatch: reference to .init.text:start_kernel (between 'skpinv' and 'interrupt_base') Added a .text.head section simliar to what other architectures do since modpost already excludes this from its warnings. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_32.S | 17 +++++++---------- arch/powerpc/kernel/head_40x.S | 12 ++++++------ arch/powerpc/kernel/head_44x.S | 6 +++--- arch/powerpc/kernel/head_8xx.S | 9 +++------ arch/powerpc/kernel/head_fsl_booke.S | 6 +++--- arch/powerpc/kernel/vmlinux.lds.S | 2 ++ 6 files changed, 24 insertions(+), 28 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0e3df1f..12febfe 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -48,20 +48,17 @@ mtspr SPRN_DBAT##n##L,RB; \ 1: - .text + .section .text.head, "ax" .stabs "arch/powerpc/kernel/",N_SO,0,0,0f .stabs "head_32.S",N_SO,0,0,0f 0: - .globl _stext -_stext: +_ENTRY(_stext); /* * _start is defined this way because the XCOFF loader in the OpenFirmware * on the powermac expects the entry point to be a procedure descriptor. */ - .text - .globl _start -_start: +_ENTRY(_start); /* * These are here for legacy reasons, the kernel used to * need to look like a coff function entry for the pmac @@ -841,7 +838,7 @@ relocate_kernel: * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */ -_GLOBAL(copy_and_flush) +_ENTRY(copy_and_flush) addi r5,r5,-4 addi r6,r6,-4 4: li r0,L1_CACHE_BYTES/4 @@ -954,9 +951,9 @@ __secondary_start: * included in CONFIG_6xx */ #if !defined(CONFIG_6xx) -_GLOBAL(__save_cpu_setup) +_ENTRY(__save_cpu_setup) blr -_GLOBAL(__restore_cpu_setup) +_ENTRY(__restore_cpu_setup) blr #endif /* !defined(CONFIG_6xx) */ @@ -1080,7 +1077,7 @@ start_here: /* * Set up the segment registers for a new context. */ -_GLOBAL(set_context) +_ENTRY(set_context) mulli r3,r3,897 /* multiply context by skew factor */ rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ addis r3,r3,0x6000 /* Set Ks, Ku bits */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a8e0457..00bdb6d 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -52,9 +52,9 @@ * * This is all going to change RSN when we add bi_recs....... -- Dan */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) + .section .text.head, "ax" +_ENTRY(_stext); +_ENTRY(_start); /* Save parameters we are passed. */ @@ -89,9 +89,9 @@ turn_on_mmu: */ . = 0xc0 crit_save: -_GLOBAL(crit_r10) +_ENTRY(crit_r10) .space 4 -_GLOBAL(crit_r11) +_ENTRY(crit_r11) .space 4 /* @@ -814,7 +814,7 @@ finish_tlb_load: * The PowerPC 4xx family of processors do not have an FPU, so this just * returns. */ -_GLOBAL(giveup_fpu) +_ENTRY(giveup_fpu) blr /* This is where the main kernel code starts. diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index e26d26e..a3dc0e4 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -50,9 +50,9 @@ * r7 - End of kernel command line string * */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) + .section .text.head, "ax" +_ENTRY(_stext); +_ENTRY(_start); /* * Reserve a word at a fixed location to store the address * of abatron_pteptrs diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 901be47..a6ecdd6 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -38,12 +38,9 @@ #else #define DO_8xx_CPU6(val, reg) #endif - .text - .globl _stext -_stext: - .text - .globl _start -_start: + .section .text.head, "ax" +_ENTRY(_stext); +_ENTRY(_start); /* MPC8xx * This port was done on an MBX board with an 860. Right now I only diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 1f155d3..d83cbbb 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -52,9 +52,9 @@ * r7 - End of kernel command line string * */ - .text -_GLOBAL(_stext) -_GLOBAL(_start) + .section .text.head, "ax" +_ENTRY(_stext); +_ENTRY(_start); /* * Reserve a word at a fixed location to store the address * of abatron_pteptrs diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0c45855..823a8cb 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -34,6 +34,8 @@ SECTIONS /* Text and gots */ .text : { + ALIGN_FUNCTION(); + *(.text.head) _text = .; TEXT_TEXT SCHED_TEXT -- cgit v1.1 From 61a564fd2e7ab13ab11a6ce8305433baacf344ef Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Fri, 24 Aug 2007 09:45:08 +1000 Subject: [POWERPC] Don't cast kmalloc return value in ibmebus.c kmalloc() returns a void pointer so there is absolutely no need to cast it in ibmebus_chomp(). Signed-off-by: Jesper Juhl Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ibmebus.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index d6a38cd..7697d5b 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -371,7 +371,8 @@ static int ibmebus_match_path(struct device *dev, void *data) static char *ibmebus_chomp(const char *in, size_t count) { - char *out = (char*)kmalloc(count + 1, GFP_KERNEL); + char *out = kmalloc(count + 1, GFP_KERNEL); + if (!out) return NULL; -- cgit v1.1 From 00efee7d5d0d7888aafbf0d2de76943ee8aca47a Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Fri, 24 Aug 2007 16:58:37 +1000 Subject: [POWERPC] Remove barriers from the SLB shadow buffer update After talking to an IBM POWER hypervisor (PHYP) design and development guy, there seems to be no need for memory barriers when updating the SLB shadow buffer provided we only update it from the current CPU, which we do. Also, these guys see no need in the future for these barriers. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/entry_64.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 952eba6..fbbd3f6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -385,15 +385,15 @@ BEGIN_FTR_SECTION oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l - /* Update the last bolted SLB */ + /* Update the last bolted SLB. No write barriers are needed + * here, provided we only update the current CPU's SLB shadow + * buffer. + */ ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ - eieio std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ - eieio std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ - eieio slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ -- cgit v1.1 From 7b2c3c5b1d6dd77d7bb5a7d57ab7280e051c59bc Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 17 Sep 2007 14:08:06 +1000 Subject: [POWERPC] Fix section mismatch in PCI code Create a helper function (alloc_maybe_bootmem) that is marked __init_refok to limit the chances of mistakenly referring to other __init routines. WARNING: vmlinux.o(.text+0x2a9c4): Section mismatch: reference to .init.text:.__alloc_bootmem (between '.update_dn_pci_info' and '.pci_dn_reconfig_notifier') WARNING: vmlinux.o(.text+0x36430): Section mismatch: reference to .init.text:.__alloc_bootmem (between '.mpic_msi_init_allocator' and '.find_ht_magic_addr') WARNING: vmlinux.o(.text+0x5e804): Section mismatch: reference to .init.text:.__alloc_bootmem (between '.celleb_setup_phb' and '.celleb_fake_pci_write_config') WARNING: vmlinux.o(.text+0x5e8e8): Section mismatch: reference to .init.text:.__alloc_bootmem (between '.celleb_setup_phb' and '.celleb_fake_pci_write_config') WARNING: vmlinux.o(.text+0x5e968): Section mismatch: reference to .init.text:.__alloc_bootmem (between '.celleb_setup_phb' and '.celleb_fake_pci_write_config') Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/pci_dn.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index d7d36df..b483903 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -23,8 +23,6 @@ #include #include #include -#include -#include #include #include @@ -45,10 +43,7 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data) const u32 *regs; struct pci_dn *pdn; - if (mem_init_done) - pdn = kmalloc(sizeof(*pdn), GFP_KERNEL); - else - pdn = alloc_bootmem(sizeof(*pdn)); + pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); if (pdn == NULL) return NULL; memset(pdn, 0, sizeof(*pdn)); -- cgit v1.1 From 19a8d97d89442e2bda6245b8a3de2c1fec69a7ad Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 17 Sep 2007 14:41:38 +1000 Subject: [POWERPC] Remove cmd_line from head*.S It is just a C char array, so declare it thusly. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_32.S | 8 -------- arch/powerpc/kernel/head_40x.S | 7 ------- arch/powerpc/kernel/head_44x.S | 8 -------- arch/powerpc/kernel/head_64.S | 8 -------- arch/powerpc/kernel/head_8xx.S | 8 -------- arch/powerpc/kernel/head_fsl_booke.S | 8 -------- arch/powerpc/kernel/setup-common.c | 2 ++ 7 files changed, 2 insertions(+), 47 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 12febfe..c86c626 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -1297,14 +1297,6 @@ empty_zero_page: swapper_pg_dir: .space 4096 -/* - * This space gets a copy of optional info passed to us by the bootstrap - * Used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - .globl intercept_table intercept_table: .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 00bdb6d..e312824 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -1006,13 +1006,6 @@ critical_stack_top: .globl exception_stack_top exception_stack_top: -/* This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - /* Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index a3dc0e4..c6a510b 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -744,14 +744,6 @@ exception_stack_bottom: exception_stack_top: /* - * This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index cec5908..f4ae82e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1540,11 +1540,3 @@ empty_zero_page: .globl swapper_pg_dir swapper_pg_dir: .space PAGE_SIZE - -/* - * This space gets a copy of optional info passed to us by the bootstrap - * Used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space COMMAND_LINE_SIZE diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index a6ecdd6..96cea8e 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -832,14 +832,6 @@ empty_zero_page: swapper_pg_dir: .space 4096 -/* - * This space gets a copy of optional info passed to us by the bootstrap - * Used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - /* Room for two PTE table poiners, usually the kernel and current user * pointer to their respective root page table (pgdir). */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index d83cbbb..bfc3870 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -1050,14 +1050,6 @@ exception_stack_bottom: exception_stack_top: /* - * This space gets a copy of optional info passed to us by the bootstrap - * which is used to pass parameters into the kernel like root=/dev/sda1, etc. - */ - .globl cmd_line -cmd_line: - .space 512 - -/* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 50ef38c..36c90ba 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -76,6 +76,8 @@ EXPORT_SYMBOL(machine_id); unsigned long klimit = (unsigned long) _end; +char cmd_line[COMMAND_LINE_SIZE]; + /* * This still seems to be needed... -- paulus */ -- cgit v1.1 From ee7a76da1ef5e3e5e0e54e84319e435ea25c267c Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 18 Sep 2007 17:22:59 +1000 Subject: [POWERPC] Size swapper_pg_dir correctly David Gibson pointed out that swapper_pg_dir actually need to be PGD_TABLE_SIZE bytes long not PAGE_SIZE. This actually saves 64k in the bss for a kernel ppc64_defconfig built with CONFIG_PPC_64K_PAGES. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/asm-offsets.c | 4 ++++ arch/powerpc/kernel/head_64.S | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index a408053..0ae5d57 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -320,5 +320,9 @@ int main(void) DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START)); DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START)); #endif + +#ifdef CONFIG_PPC64 + DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); +#endif return 0; } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index f4ae82e..384cc75 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1539,4 +1539,4 @@ empty_zero_page: .globl swapper_pg_dir swapper_pg_dir: - .space PAGE_SIZE + .space PGD_TABLE_SIZE -- cgit v1.1 From 9e4859ef5462193643fd2b3c8ffb298e5a4a4319 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 18 Sep 2007 17:25:12 +1000 Subject: [POWERPC] FWNMI is only used on pSeries This saves 4k on non pSeries builds (except for iSeries where it saves almost 4k). Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/crash_dump.c | 2 ++ arch/powerpc/kernel/head_64.S | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index ffa91d6..29ff77c 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -54,8 +54,10 @@ void __init setup_kdump_trampoline(void) create_trampoline(i); } +#ifdef CONFIG_PPC_PSERIES create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); +#endif /* CONFIG_PPC_PSERIES */ DBG(" <- setup_kdump_trampoline()\n"); } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 384cc75..22ac245 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -341,6 +341,7 @@ slb_miss_user_pseries: b . /* prevent spec. execution */ #endif /* __DISABLED__ */ +#ifdef CONFIG_PPC_PSERIES /* * Vectors for the FWNMI option. Share common code. */ @@ -358,6 +359,8 @@ machine_check_fwnmi: mtspr SPRN_SPRG1,r13 /* save r13 */ EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common) +#endif /* CONFIG_PPC_PSERIES */ + /*** Common interrupt handlers ***/ STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) @@ -1012,6 +1015,7 @@ _GLOBAL(do_stab_bolted) initial_stab: .space 4096 +#ifdef CONFIG_PPC_PSERIES /* * Data area reserved for FWNMI option. * This address (0x7000) is fixed by the RPA. @@ -1019,6 +1023,7 @@ initial_stab: .= 0x7000 .globl fwnmi_data_area fwnmi_data_area: +#endif /* CONFIG_PPC_PSERIES */ /* iSeries does not use the FWNMI stuff, so it is safe to put * this here, even if we later allow kernels that will boot on @@ -1043,7 +1048,9 @@ xLparMap: #endif /* CONFIG_PPC_ISERIES */ +#ifdef CONFIG_PPC_PSERIES . = 0x8000 +#endif /* CONFIG_PPC_PSERIES */ /* * On pSeries and most other platforms, secondary processors spin -- cgit v1.1 From 70dea47da12932cc512e09124a836ddd3499ab39 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Mon, 17 Sep 2007 05:56:47 -0500 Subject: [POWERPC] 4xx: Implement udbg_getc() for 440 Implement udbg_getc() for 440, which fixes xmon input. Signed-off-by: Hollis Blanchard Acked-by: David Gibson Signed-off-by: Josh Boyer --- arch/powerpc/kernel/udbg_16550.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 7afab5b..833a3d0 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -206,11 +206,22 @@ static void udbg_44x_as1_putc(char c) } } +static int udbg_44x_as1_getc(void) +{ + if (udbg_comport) { + while ((as1_readb(&udbg_comport->lsr) & LSR_DR) == 0) + ; /* wait for char */ + return as1_readb(&udbg_comport->rbr); + } + return -1; +} + void __init udbg_init_44x_as1(void) { udbg_comport = (volatile struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR; udbg_putc = udbg_44x_as1_putc; + udbg_getc = udbg_44x_as1_getc; } #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ -- cgit v1.1 From 472b5b43bec45e7e79f86094ca9091bd9eb474ed Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Thu, 6 Sep 2007 03:30:16 +1000 Subject: [POWERPC] Add 64-bit resources support to pci_iomap The patch adds support for the 64-bit resources to the PCI iomap code. Signed-off-by: Valentine Barshak Acked-by: David Gibson Signed-off-by: Josh Boyer --- arch/powerpc/kernel/iomap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 2a5cf86..1577434 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -119,8 +119,8 @@ EXPORT_SYMBOL(ioport_unmap); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { - unsigned long start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len) -- cgit v1.1 From 8fd7675c092f79f240246c76728477ec4e7f7f09 Mon Sep 17 00:00:00 2001 From: Satyam Sharma Date: Tue, 18 Sep 2007 09:43:40 +1000 Subject: [POWERPC] Avoid pointless WARN_ON(irqs_disabled()) from panic codepath > ------------[ cut here ]------------ > Badness at arch/powerpc/kernel/smp.c:202 comes when smp_call_function_map() has been called with irqs disabled, which is illegal. However, there is a special case, the panic() codepath, when we do not want to warn about this -- warning at that time is pointless anyway, and only serves to scroll away the *real* cause of the panic and distracts from the real bug. * So let's extract the WARN_ON() from smp_call_function_map() into all its callers -- smp_call_function() and smp_call_function_single() * Also, introduce another caller of smp_call_function_map(), namely __smp_call_function() (and make smp_call_function() a wrapper over this) which does *not* warn about disabled irqs * Use this __smp_call_function() from the panic codepath's smp_send_stop() We also end having to move code of smp_send_stop() below the definition of __smp_call_function(). Signed-off-by: Satyam Sharma Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/smp.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1ea4316..b24dcba 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -152,11 +152,6 @@ static void stop_this_cpu(void *dummy) ; } -void smp_send_stop(void) -{ - smp_call_function(stop_this_cpu, NULL, 1, 0); -} - /* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. @@ -198,9 +193,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, int cpu; u64 timeout; - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - if (unlikely(smp_ops == NULL)) return ret; @@ -270,10 +262,19 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, return ret; } +static int __smp_call_function(void (*func)(void *info), void *info, + int nonatomic, int wait) +{ + return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map); +} + int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) { - return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map); + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + return __smp_call_function(func, info, nonatomic, wait); } EXPORT_SYMBOL(smp_call_function); @@ -283,6 +284,9 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int cpumask_t map = CPU_MASK_NONE; int ret = 0; + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + if (!cpu_online(cpu)) return -EINVAL; @@ -299,6 +303,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int } EXPORT_SYMBOL(smp_call_function_single); +void smp_send_stop(void) +{ + __smp_call_function(stop_this_cpu, NULL, 1, 0); +} + void smp_call_function_interrupt(void) { void (*func) (void *info); -- cgit v1.1 From 576e393e74e58bd4c949d551a3340accc8dbab0f Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Thu, 20 Sep 2007 12:25:17 +1000 Subject: [POWERPC] Fix build errors when BLOCK=n These are the symptom error messages: CC arch/powerpc/kernel/setup_32.o In file included from include/linux/blkdev.h:17, from include/linux/ide.h:13, from arch/powerpc/kernel/setup_32.c:13: include/linux/bsg.h:67: warning: 'struct request_queue' declared inside parameter list include/linux/bsg.h:67: warning: its scope is only this definition or declaration, which is probably not what you want include/linux/bsg.h:71: warning: 'struct request_queue' declared inside parameter list In file included from arch/powerpc/kernel/setup_32.c:13: include/linux/ide.h:857: error: field 'wrq' has incomplete type CC arch/powerpc/kernel/ppc_ksyms.o In file included from include/linux/blkdev.h:17, from include/linux/ide.h:13, from arch/powerpc/kernel/ppc_ksyms.c:15: include/linux/bsg.h:67: warning: 'struct request_queue' declared inside parameter list include/linux/bsg.h:67: warning: its scope is only this definition or declaration, which is probably not what you want include/linux/bsg.h:71: warning: 'struct request_queue' declared inside parameter list In file included from arch/powerpc/kernel/ppc_ksyms.c:15: include/linux/ide.h:857: error: field 'wrq' has incomplete type The fix tries to use the smallest scope CONFIG_* symbols that will fix the build problem. In this case needs to be included only if IDE=y or IDE=m were selected. Also, ppc_ide_md is needed only if BLK_DEV_IDE=y or BLK_DEV_IDE=m Moved the EXPORT_SYMBOL(ppc_ide_md) from ppc_ksysms.c next to its declaration in setup_32.c which made not needed. With gone from ppc_ksyms.c, is needed to address the following warnings and errors: CC arch/powerpc/kernel/ppc_ksyms.o arch/powerpc/kernel/ppc_ksyms.c:122: error: '__flush_icache_range' undeclared here (not in a function) arch/powerpc/kernel/ppc_ksyms.c:122: warning: type defaults to 'int' in declaration of '__flush_icache_range' arch/powerpc/kernel/ppc_ksyms.c:123: error: 'flush_dcache_range' undeclared here (not in a function) arch/powerpc/kernel/ppc_ksyms.c:123: warning: type defaults to 'int' in declaration of 'flush_dcache_range' Signed-off-by: Emil Medve Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc_ksyms.c | 6 +----- arch/powerpc/kernel/setup_32.c | 5 +++++ 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 430c502..c6b1aa3 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -12,12 +12,12 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include @@ -95,10 +95,6 @@ EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(copy_4K_page); #endif -#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)) -EXPORT_SYMBOL(ppc_ide_md); -#endif - #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) EXPORT_SYMBOL(isa_io_base); EXPORT_SYMBOL(isa_mem_base); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a288a5f..7474502 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -10,7 +10,9 @@ #include #include #include +#if defined(CONFIG_IDE) || defined(CONFIG_IDE_MODULE) #include +#endif #include #include #include @@ -49,7 +51,10 @@ extern void bootx_init(unsigned long r4, unsigned long phys); +#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) struct ide_machdep_calls ppc_ide_md; +EXPORT_SYMBOL(ppc_ide_md); +#endif int boot_cpuid; EXPORT_SYMBOL_GPL(boot_cpuid); -- cgit v1.1 From 6f6682809b994fd9a61081fa0410df31481d5f7f Mon Sep 17 00:00:00 2001 From: Domen Puncer Date: Fri, 21 Sep 2007 00:00:11 +1000 Subject: [POWERPC] clk.h interface for platforms This provides an implementation of the interface for arch/powerpc using a set of function pointers in clk_functions. Platforms that want to support this interface should fill clk_functions and select CONFIG_PPC_CLOCK in Kconfig. Signed-off-by: Domen Puncer Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/clock.c | 82 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 arch/powerpc/kernel/clock.c (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 967afc5..b37165e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o +obj-$(CONFIG_PPC_CLOCK) += clock.o procfs-$(CONFIG_PPC64) := proc_ppc64.o obj-$(CONFIG_PROC_FS) += $(procfs-y) rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o diff --git a/arch/powerpc/kernel/clock.c b/arch/powerpc/kernel/clock.c new file mode 100644 index 0000000..ce668f5 --- /dev/null +++ b/arch/powerpc/kernel/clock.c @@ -0,0 +1,82 @@ +/* + * Dummy clk implementations for powerpc. + * These need to be overridden in platform code. + */ + +#include +#include +#include +#include +#include + +struct clk_interface clk_functions; + +struct clk *clk_get(struct device *dev, const char *id) +{ + if (clk_functions.clk_get) + return clk_functions.clk_get(dev, id); + return ERR_PTR(-ENOSYS); +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ + if (clk_functions.clk_put) + clk_functions.clk_put(clk); +} +EXPORT_SYMBOL(clk_put); + +int clk_enable(struct clk *clk) +{ + if (clk_functions.clk_enable) + return clk_functions.clk_enable(clk); + return -ENOSYS; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ + if (clk_functions.clk_disable) + clk_functions.clk_disable(clk); +} +EXPORT_SYMBOL(clk_disable); + +unsigned long clk_get_rate(struct clk *clk) +{ + if (clk_functions.clk_get_rate) + return clk_functions.clk_get_rate(clk); + return 0; +} +EXPORT_SYMBOL(clk_get_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + if (clk_functions.clk_round_rate) + return clk_functions.clk_round_rate(clk, rate); + return -ENOSYS; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + if (clk_functions.clk_set_rate) + return clk_functions.clk_set_rate(clk, rate); + return -ENOSYS; +} +EXPORT_SYMBOL(clk_set_rate); + +struct clk *clk_get_parent(struct clk *clk) +{ + if (clk_functions.clk_get_parent) + return clk_functions.clk_get_parent(clk); + return ERR_PTR(-ENOSYS); +} +EXPORT_SYMBOL(clk_get_parent); + +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + if (clk_functions.clk_set_parent) + return clk_functions.clk_set_parent(clk, parent); + return -ENOSYS; +} +EXPORT_SYMBOL(clk_set_parent); -- cgit v1.1 From 75918a4b5998c93ee1ab131fbe64b97b5d0d2315 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Fri, 21 Sep 2007 05:11:20 +1000 Subject: [POWERPC] Separate out legacy machine check exception parsers Move out the old-style exception parsers to a separate function, and don't call it on platforms that have a platform-specific handler. It would make sense to move out the generic versions into their platforms instead, but that can be done gradually down the road. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/traps.c | 66 +++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 29 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index ccfc99d..5a49eab 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -324,38 +324,10 @@ static inline int check_io_access(struct pt_regs *regs) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) #endif -void machine_check_exception(struct pt_regs *regs) +static int generic_machine_check_exception(struct pt_regs *regs) { - int recover = 0; unsigned long reason = get_mc_reason(regs); - /* See if any machine dependent calls */ - if (ppc_md.machine_check_exception) - recover = ppc_md.machine_check_exception(regs); - - if (recover) - return; - - if (user_mode(regs)) { - regs->msr |= MSR_RI; - _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); - return; - } - -#if defined(CONFIG_8xx) && defined(CONFIG_PCI) - /* the qspan pci read routines can cause machine checks -- Cort */ - bad_page_fault(regs, regs->dar, SIGBUS); - return; -#endif - - if (debugger_fault_handler(regs)) { - regs->msr |= MSR_RI; - return; - } - - if (check_io_access(regs)) - return; - #if defined(CONFIG_4xx) && !defined(CONFIG_440A) if (reason & ESR_IMCP) { printk("Instruction"); @@ -471,6 +443,42 @@ void machine_check_exception(struct pt_regs *regs) } #endif /* CONFIG_4xx */ + return 0; +} + +void machine_check_exception(struct pt_regs *regs) +{ + int recover = 0; + + /* See if any machine dependent calls */ + if (ppc_md.machine_check_exception) + recover = ppc_md.machine_check_exception(regs); + else + recover = generic_machine_check_exception(regs); + + if (recover) + return; + + if (user_mode(regs)) { + regs->msr |= MSR_RI; + _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); + return; + } + +#if defined(CONFIG_8xx) && defined(CONFIG_PCI) + /* the qspan pci read routines can cause machine checks -- Cort */ + bad_page_fault(regs, regs->dar, SIGBUS); + return; +#endif + + if (debugger_fault_handler(regs)) { + regs->msr |= MSR_RI; + return; + } + + if (check_io_access(regs)) + return; + if (debugger_fault_handler(regs)) return; die("Machine check", regs, SIGBUS); -- cgit v1.1 From 2578bfae84a78bd46fdbc0d2f9d39e9fbc9c8a3f Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 21 Sep 2007 10:16:20 +1000 Subject: [POWERPC] Create and use CONFIG_WORD_SIZE Linus made this suggestion for the x86 merge and this starts the process for powerpc. We assume that CONFIG_PPC64 implies CONFIG_PPC_MERGE and CONFIG_PPC_STD_MMU_32 implies CONFIG_PPC_STD_MMU. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b37165e..fb33a7e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,10 +38,10 @@ obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o -obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o -obj32-$(CONFIG_HIBERNATION) += swsusp_32.o -obj64-$(CONFIG_HIBERNATION) += swsusp_64.o swsusp_asm64.o -obj32-$(CONFIG_MODULES) += module_32.o +obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \ + swsusp_$(CONFIG_WORD_SIZE).o +obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o +obj-$(CONFIG_MODULES) += module_$(CONFIG_WORD_SIZE).o ifeq ($(CONFIG_PPC_MERGE),y) @@ -54,9 +54,10 @@ extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds obj-y += time.o prom.o traps.o setup-common.o \ - udbg.o misc.o io.o -obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o -obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o + udbg.o misc.o io.o \ + misc_$(CONFIG_WORD_SIZE).o +obj-$(CONFIG_PPC32) += entry_32.o setup_32.o +obj-$(CONFIG_PPC64) += dma_64.o iommu.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_BOOTX_TEXT) += btext.o @@ -64,16 +65,12 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o -module-$(CONFIG_PPC64) += module_64.o -obj-$(CONFIG_MODULES) += $(module-y) - -pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o isa-bridge.o -pci32-$(CONFIG_PPC32) := pci_32.o -obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) pci-common.o +pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o +obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ + pci-common.o obj-$(CONFIG_PCI_MSI) += msi.o -kexec-$(CONFIG_PPC64) := machine_kexec_64.o -kexec-$(CONFIG_PPC32) := machine_kexec_32.o -obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y) +obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ + machine_kexec_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o @@ -87,7 +84,6 @@ smpobj-$(CONFIG_SMP) += smp.o endif -obj-$(CONFIG_PPC32) += $(obj32-y) obj-$(CONFIG_PPC64) += $(obj64-y) extra-$(CONFIG_PPC_FPU) += fpu.o -- cgit v1.1 From aa3be5f32db137bc4404f32a24b36fb47d48d260 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 21 Sep 2007 13:26:02 +1000 Subject: [POWERPC] Implement {read,update}_persistent_clock With these functions implemented we cooperate better with the generic timekeeping code. This obsoletes the need for the timer sysdev as a bonus. Signed-off-by: Tony Breeds Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/time.c | 85 ++++++++++++++-------------------------------- 1 file changed, 26 insertions(+), 59 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index d95e68c..b94e4df 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -73,16 +73,11 @@ #include #endif -/* keep track of when we need to update the rtc */ -time_t last_rtc_update; #ifdef CONFIG_PPC_ISERIES static unsigned long __initdata iSeries_recal_titan; static signed long __initdata iSeries_recal_tb; #endif -/* The decrementer counts down by 128 every 128ns on a 601. */ -#define DECREMENTER_COUNT_601 (1000000000 / HZ) - #define XSEC_PER_SEC (1024*1024) #ifdef CONFIG_PPC64 @@ -348,39 +343,6 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); -static __inline__ void timer_check_rtc(void) -{ - /* - * update the rtc when needed, this should be performed on the - * right fraction of a second. Half or full second ? - * Full second works on mk48t59 clocks, others need testing. - * Note that this update is basically only used through - * the adjtimex system calls. Setting the HW clock in - * any other way is a /dev/rtc and userland business. - * This is still wrong by -0.5/+1.5 jiffies because of the - * timer interrupt resolution and possible delay, but here we - * hit a quantization limit which can only be solved by higher - * resolution timers and decoupling time management from timer - * interrupts. This is also wrong on the clocks - * which require being written at the half second boundary. - * We should have an rtc call that only sets the minutes and - * seconds like on Intel to avoid problems with non UTC clocks. - */ - if (ppc_md.set_rtc_time && ntp_synced() && - xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { - struct rtc_time tm; - to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); - tm.tm_year -= 1900; - tm.tm_mon -= 1; - if (ppc_md.set_rtc_time(&tm) == 0) - last_rtc_update = xtime.tv_sec + 1; - else - /* Try again one minute later */ - last_rtc_update += 60; - } -} - /* * This version of gettimeofday has microsecond resolution. */ @@ -689,7 +651,6 @@ void timer_interrupt(struct pt_regs * regs) tb_last_jiffy = tb_next_jiffy; do_timer(1); timer_recalc_offset(tb_last_jiffy); - timer_check_rtc(); } write_sequnlock(&xtime_lock); } @@ -801,11 +762,6 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&xtime, new_sec, new_nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - /* In case of a large backwards jump in time with NTP, we want the - * clock to be updated as soon as the PLL is again in lock. - */ - last_rtc_update = new_sec - 658; - ntp_clear(); new_xsec = xtime.tv_nsec; @@ -881,12 +837,35 @@ void __init generic_calibrate_decr(void) #endif } -unsigned long get_boot_time(void) +int update_persistent_clock(struct timespec now) +{ + struct rtc_time tm; + + if (!ppc_md.set_rtc_time) + return 0; + + to_tm(now.tv_sec + 1 + timezone_offset, &tm); + tm.tm_year -= 1900; + tm.tm_mon -= 1; + + return ppc_md.set_rtc_time(&tm); +} + +unsigned long read_persistent_clock(void) { struct rtc_time tm; + static int first = 1; + + /* XXX this is a litle fragile but will work okay in the short term */ + if (first) { + first = 0; + if (ppc_md.time_init) + timezone_offset = ppc_md.time_init(); - if (ppc_md.get_boot_time) - return ppc_md.get_boot_time(); + /* get_boot_time() isn't guaranteed to be safe to call late */ + if (ppc_md.get_boot_time) + return ppc_md.get_boot_time() -timezone_offset; + } if (!ppc_md.get_rtc_time) return 0; ppc_md.get_rtc_time(&tm); @@ -898,14 +877,10 @@ unsigned long get_boot_time(void) void __init time_init(void) { unsigned long flags; - unsigned long tm = 0; struct div_result res; u64 scale, x; unsigned shift; - if (ppc_md.time_init != NULL) - timezone_offset = ppc_md.time_init(); - if (__USE_RTC()) { /* 601 processor: dec counts down by 128 every 128ns */ ppc_tb_freq = 1000000000; @@ -980,19 +955,14 @@ void __init time_init(void) /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ boot_tb = get_tb_or_rtc(); - tm = get_boot_time(); - write_seqlock_irqsave(&xtime_lock, flags); /* If platform provided a timezone (pmac), we correct the time */ if (timezone_offset) { sys_tz.tz_minuteswest = -timezone_offset / 60; sys_tz.tz_dsttime = 0; - tm -= timezone_offset; } - xtime.tv_sec = tm; - xtime.tv_nsec = 0; do_gtod.varp = &do_gtod.vars[0]; do_gtod.var_idx = 0; do_gtod.varp->tb_orig_stamp = tb_last_jiffy; @@ -1010,9 +980,6 @@ void __init time_init(void) time_freq = 0; - last_rtc_update = xtime.tv_sec; - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); /* Not exact, but the timer interrupt takes care of this */ -- cgit v1.1 From 4a4cfe3836916e12282ceb5c4bdd799dc71af567 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Sat, 22 Sep 2007 07:35:52 +1000 Subject: [POWERPC] Implement generic time of day clocksource for powerpc Signed-off-by: Tony Breeds Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/time.c | 270 ++++++++++++++++----------------------------- 1 file changed, 98 insertions(+), 172 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index b94e4df..e71a0d8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -65,17 +65,44 @@ #include #include #include -#ifdef CONFIG_PPC64 #include -#endif #ifdef CONFIG_PPC_ISERIES #include #include #endif +/* powerpc clocksource/clockevent code */ + +#include + +static cycle_t rtc_read(void); +static struct clocksource clocksource_rtc = { + .name = "rtc", + .rating = 400, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ + .read = rtc_read, +}; + +static cycle_t timebase_read(void); +static struct clocksource clocksource_timebase = { + .name = "timebase", + .rating = 400, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ + .read = timebase_read, +}; + #ifdef CONFIG_PPC_ISERIES static unsigned long __initdata iSeries_recal_titan; static signed long __initdata iSeries_recal_tb; + +/* Forward declaration is only needed for iSereis compiles */ +void __init clocksource_init(void); #endif #define XSEC_PER_SEC (1024*1024) @@ -343,65 +370,6 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); -/* - * This version of gettimeofday has microsecond resolution. - */ -static inline void __do_gettimeofday(struct timeval *tv) -{ - unsigned long sec, usec; - u64 tb_ticks, xsec; - struct gettimeofday_vars *temp_varp; - u64 temp_tb_to_xs, temp_stamp_xsec; - - /* - * These calculations are faster (gets rid of divides) - * if done in units of 1/2^20 rather than microseconds. - * The conversion to microseconds at the end is done - * without a divide (and in fact, without a multiply) - */ - temp_varp = do_gtod.varp; - - /* Sampling the time base must be done after loading - * do_gtod.varp in order to avoid racing with update_gtod. - */ - data_barrier(temp_varp); - tb_ticks = get_tb() - temp_varp->tb_orig_stamp; - temp_tb_to_xs = temp_varp->tb_to_xs; - temp_stamp_xsec = temp_varp->stamp_xsec; - xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); - sec = xsec / XSEC_PER_SEC; - usec = (unsigned long)xsec & (XSEC_PER_SEC - 1); - usec = SCALE_XSEC(usec, 1000000); - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -void do_gettimeofday(struct timeval *tv) -{ - if (__USE_RTC()) { - /* do this the old way */ - unsigned long flags, seq; - unsigned int sec, nsec, usec; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - sec = xtime.tv_sec; - nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy); - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec = nsec / 1000; - while (usec >= 1000000) { - usec -= 1000000; - ++sec; - } - tv->tv_sec = sec; - tv->tv_usec = usec; - return; - } - __do_gettimeofday(tv); -} - -EXPORT_SYMBOL(do_gettimeofday); /* * There are two copies of tb_to_xs and stamp_xsec so that no @@ -447,56 +415,6 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, ++(vdso_data->tb_update_count); } -/* - * When the timebase - tb_orig_stamp gets too big, we do a manipulation - * between tb_orig_stamp and stamp_xsec. The goal here is to keep the - * difference tb - tb_orig_stamp small enough to always fit inside a - * 32 bits number. This is a requirement of our fast 32 bits userland - * implementation in the vdso. If we "miss" a call to this function - * (interrupt latency, CPU locked in a spinlock, ...) and we end up - * with a too big difference, then the vdso will fallback to calling - * the syscall - */ -static __inline__ void timer_recalc_offset(u64 cur_tb) -{ - unsigned long offset; - u64 new_stamp_xsec; - u64 tlen, t2x; - u64 tb, xsec_old, xsec_new; - struct gettimeofday_vars *varp; - - if (__USE_RTC()) - return; - tlen = current_tick_length(); - offset = cur_tb - do_gtod.varp->tb_orig_stamp; - if (tlen == last_tick_len && offset < 0x80000000u) - return; - if (tlen != last_tick_len) { - t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); - last_tick_len = tlen; - } else - t2x = do_gtod.varp->tb_to_xs; - new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; - do_div(new_stamp_xsec, 1000000000); - new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; - - ++vdso_data->tb_update_count; - smp_mb(); - - /* - * Make sure time doesn't go backwards for userspace gettimeofday. - */ - tb = get_tb(); - varp = do_gtod.varp; - xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs) - + varp->stamp_xsec; - xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec; - if (xsec_new < xsec_old) - new_stamp_xsec += xsec_old - xsec_new; - - update_gtod(cur_tb, new_stamp_xsec, t2x); -} - #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { @@ -568,6 +486,8 @@ static int __init iSeries_tb_recal(void) iSeries_recal_titan = titan; iSeries_recal_tb = tb; + /* Called here as now we know accurate values for the timebase */ + clocksource_init(); return 0; } late_initcall(iSeries_tb_recal); @@ -650,7 +570,6 @@ void timer_interrupt(struct pt_regs * regs) if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { tb_last_jiffy = tb_next_jiffy; do_timer(1); - timer_recalc_offset(tb_last_jiffy); } write_sequnlock(&xtime_lock); } @@ -722,66 +641,6 @@ unsigned long long sched_clock(void) return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; } -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, new_sec = tv->tv_sec; - long wtm_nsec, new_nsec = tv->tv_nsec; - unsigned long flags; - u64 new_xsec; - unsigned long tb_delta; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irqsave(&xtime_lock, flags); - - /* - * Updating the RTC is not the job of this code. If the time is - * stepped under NTP, the RTC will be updated after STA_UNSYNC - * is cleared. Tools like clock/hwclock either copy the RTC - * to the system time, in which case there is no point in writing - * to the RTC again, or write to the RTC but then they don't call - * settimeofday to perform this operation. - */ - - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_mb(); - - /* - * Subtract off the number of nanoseconds since the - * beginning of the last tick. - */ - tb_delta = tb_ticks_since(tb_last_jiffy); - tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ - new_nsec -= SCALE_XSEC(tb_delta, 1000000000); - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); - - set_normalized_timespec(&xtime, new_sec, new_nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - - new_xsec = xtime.tv_nsec; - if (new_xsec != 0) { - new_xsec *= XSEC_PER_SEC; - do_div(new_xsec, NSEC_PER_SEC); - } - new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; - update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); - - vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; - vdso_data->tz_dsttime = sys_tz.tz_dsttime; - - write_sequnlock_irqrestore(&xtime_lock, flags); - clock_was_set(); - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); - static int __init get_freq(char *name, int cells, unsigned long *val) { struct device_node *cpu; @@ -873,6 +732,69 @@ unsigned long read_persistent_clock(void) tm.tm_hour, tm.tm_min, tm.tm_sec); } +/* clocksource code */ +static cycle_t rtc_read(void) +{ + return (cycle_t)get_rtc(); +} + +static cycle_t timebase_read(void) +{ + return (cycle_t)get_tb(); +} + +void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) +{ + u64 t2x, stamp_xsec; + + if (clock != &clocksource_timebase) + return; + + /* Make userspace gettimeofday spin until we're done. */ + ++vdso_data->tb_update_count; + smp_mb(); + + /* XXX this assumes clock->shift == 22 */ + /* 4611686018 ~= 2^(20+64-22) / 1e9 */ + t2x = (u64) clock->mult * 4611686018ULL; + stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + do_div(stamp_xsec, 1000000000); + stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + update_gtod(clock->cycle_last, stamp_xsec, t2x); +} + +void update_vsyscall_tz(void) +{ + /* Make userspace gettimeofday spin until we're done. */ + ++vdso_data->tb_update_count; + smp_mb(); + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; + smp_mb(); + ++vdso_data->tb_update_count; +} + +void __init clocksource_init(void) +{ + struct clocksource *clock; + + if (__USE_RTC()) + clock = &clocksource_rtc; + else + clock = &clocksource_timebase; + + clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); + + if (clocksource_register(clock)) { + printk(KERN_ERR "clocksource: %s is already registered\n", + clock->name); + return; + } + + printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", + clock->name, clock->mult, clock->shift); +} + /* This function is only called on the boot processor */ void __init time_init(void) { @@ -982,6 +904,10 @@ void __init time_init(void) write_sequnlock_irqrestore(&xtime_lock, flags); + /* Register the clocksource, if we're not running on iSeries */ + if (!firmware_has_feature(FW_FEATURE_ISERIES)) + clocksource_init(); + /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); } -- cgit v1.1 From c546726293912c932f3c14d06b0f68e175d70781 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 21 Sep 2007 14:29:28 +1000 Subject: [POWERPC] Remove debug printk from vio_bus_init As it just adds noise to the boot messages. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 62c1bc1..54645ba 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -317,11 +317,8 @@ static int __init vio_bus_init(void) * the device tree. Drivers will associate with them later. */ for (of_node = node_vroot->child; of_node != NULL; - of_node = of_node->sibling) { - printk(KERN_DEBUG "%s: processing %p\n", - __FUNCTION__, of_node); + of_node = of_node->sibling) vio_register_device_node(of_node); - } of_node_put(node_vroot); } -- cgit v1.1 From c868078ed82e3651b16f68a420ae7568de2102db Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 21 Sep 2007 14:31:02 +1000 Subject: [POWERPC] Simplify vio_bus_init a little for legacy iSeries iSeries_vio_dev was already statically initialised and we can remove one set of #ifdef CONFIG_PPC_ISERIES guards. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 54645ba..ee15c22 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -64,6 +64,12 @@ static void __init iommu_vio_init(void) printk("Virtual Bus VETH TCE table failed.\n"); if (!iommu_init_table(&vio_iommu_table, -1)) printk("Virtual Bus VIO TCE table failed.\n"); + vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops; + vio_bus_device.dev.archdata.dma_data = &vio_iommu_table; +} +#else +static void __init iommu_vio_init(void) +{ } #endif @@ -282,14 +288,8 @@ static int __init vio_bus_init(void) int err; struct device_node *node_vroot; -#ifdef CONFIG_PPC_ISERIES - if (firmware_has_feature(FW_FEATURE_ISERIES)) { + if (firmware_has_feature(FW_FEATURE_ISERIES)) iommu_vio_init(); - vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops; - vio_bus_device.dev.archdata.dma_data = &vio_iommu_table; - iSeries_vio_dev = &vio_bus_device.dev; - } -#endif /* CONFIG_PPC_ISERIES */ err = bus_register(&vio_bus_type); if (err) { -- cgit v1.1 From 6fccab26df4f59815d7ec912e4111a92807780de Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 21 Sep 2007 14:32:05 +1000 Subject: [POWERPC] Make vio_bus_type static Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index ee15c22..1d7b272 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -39,6 +39,8 @@ extern struct kset devices_subsys; /* needed for vio_find_name() */ +static struct bus_type vio_bus_type; + static struct vio_dev vio_bus_device = { /* fake "parent" device */ .name = vio_bus_device.dev.bus_id, .type = "", @@ -388,7 +390,7 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp, return 0; } -struct bus_type vio_bus_type = { +static struct bus_type vio_bus_type = { .name = "vio", .dev_attrs = vio_dev_attrs, .uevent = vio_hotplug, -- cgit v1.1 From fabca2c0a461bd82a35194e3a4bb1e98f3ffa789 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 25 Sep 2007 09:50:52 +1000 Subject: [POWERPC] Add CHECK_FULL_REGS in several places in ptrace code This restores the CHECK_FULL_REGS sanity check to every place that can access the nonvolatile GPRs for ptrace. This is already done for native-bitwidth PTRACE_PEEKUSR, but was omitted for many other cases (32-bit ptrace, PTRACE_GETREGS, etc.); I think there may have been more uniform checks before that were lost in the recent cleanup of GETREGS et al. Signed-off-by: Roland McGrath Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ptrace.c | 4 ++++ arch/powerpc/kernel/ptrace32.c | 8 ++++++++ 2 files changed, 12 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index fb8866e..cf7732c 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -331,6 +331,7 @@ static long arch_ptrace_old(struct task_struct *child, long request, long addr, unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; + CHECK_FULL_REGS(child->thread.regs); for (i = 0; i < 32; i++) { ret = put_user(*reg, tmp); if (ret) @@ -346,6 +347,7 @@ static long arch_ptrace_old(struct task_struct *child, long request, long addr, unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; + CHECK_FULL_REGS(child->thread.regs); for (i = 0; i < 32; i++) { ret = get_user(*reg, tmp); if (ret) @@ -517,6 +519,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ret = -EIO; break; } + CHECK_FULL_REGS(child->thread.regs); ret = 0; for (ui = 0; ui < PT_REGS_COUNT; ui ++) { ret |= __put_user(ptrace_get_reg(child, ui), @@ -537,6 +540,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ret = -EIO; break; } + CHECK_FULL_REGS(child->thread.regs); ret = 0; for (ui = 0; ui < PT_REGS_COUNT; ui ++) { ret = __get_user(tmp, (unsigned long __user *) data); diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 9e6baea..fea6206 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -53,6 +53,7 @@ static long compat_ptrace_old(struct task_struct *child, long request, unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned int __user *tmp = (unsigned int __user *)addr; + CHECK_FULL_REGS(child->thread.regs); for (i = 0; i < 32; i++) { ret = put_user(*reg, tmp); if (ret) @@ -68,6 +69,7 @@ static long compat_ptrace_old(struct task_struct *child, long request, unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned int __user *tmp = (unsigned int __user *)addr; + CHECK_FULL_REGS(child->thread.regs); for (i = 0; i < 32; i++) { ret = get_user(*reg, tmp); if (ret) @@ -164,6 +166,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr, if ((addr & 3) || (index > PT_FPSCR32)) break; + CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { tmp = ptrace_get_reg(child, index); } else { @@ -210,6 +213,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr, if ((addr & 3) || numReg > PT_FPSCR) break; + CHECK_FULL_REGS(child->thread.regs); if (numReg >= PT_FPR0) { flush_fp_to_thread(child); tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0]; @@ -270,6 +274,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr, if ((addr & 3) || (index > PT_FPSCR32)) break; + CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { @@ -307,6 +312,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr, */ if ((addr & 3) || (numReg > PT_FPSCR)) break; + CHECK_FULL_REGS(child->thread.regs); if (numReg < PT_FPR0) { unsigned long freg = ptrace_get_reg(child, numReg); if (index % 2) @@ -342,6 +348,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr, ret = -EIO; break; } + CHECK_FULL_REGS(child->thread.regs); ret = 0; for (ui = 0; ui < PT_REGS_COUNT; ui ++) { ret |= __put_user(ptrace_get_reg(child, ui), @@ -359,6 +366,7 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr, ret = -EIO; break; } + CHECK_FULL_REGS(child->thread.regs); ret = 0; for (ui = 0; ui < PT_REGS_COUNT; ui ++) { ret = __get_user(tmp, (unsigned int __user *) data); -- cgit v1.1 From 3eb523b939d59fd90518188750c26df5d357478f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 27 Sep 2007 00:02:05 +1000 Subject: [POWERPC] Fix pci domain detection The /proc/bus/pci/* files list PCI domain numbers only for devices that claim to be on a multi-domain system. The check for this is broken on powerpc, because the buid value is truncated to 32 bits. There is at least one machine (IBM QS21) that only uses the high-order bits of the buid, so the return value of pci_proc_domain() ends up being always zero, which makes /proc/bus/pci useless. Change the logic to always return '1' for a nonzero buid value. Signed-off-by: Arnd Bergmann Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/pci_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 291ffbc..9f63bdc 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -588,7 +588,7 @@ int pci_proc_domain(struct pci_bus *bus) return 0; else { struct pci_controller *hose = pci_bus_to_host(bus); - return hose->buid; + return hose->buid != 0; } } -- cgit v1.1 From 0de2d820067e03ca93f6bf5320d362d5262fb7a3 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 28 Sep 2007 04:38:55 +1000 Subject: [POWERPC] Make instruction dumping work in real mode On non-book-E, exceptions execute in real mode. If a fault happens that leads to a register dump, the kernel currently prints XXXXXXXX because it doesn't realize that PC is a physical address. This patch checks whether instruction address translation is turned on, and if not converts PC into a virtual address. Signed-off-by: Scott Wood Acked-by: Kumar Gala Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/process.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 57c589c..588c0cb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -354,6 +354,14 @@ static void show_instructions(struct pt_regs *regs) if (!(i % 8)) printk("\n"); +#if !defined(CONFIG_BOOKE) + /* If executing with the IMMU off, adjust pc rather + * than print XXXXXXXX. + */ + if (!(regs->msr & MSR_IR)) + pc = (unsigned long)phys_to_virt(pc); +#endif + /* We use __get_user here *only* to avoid an OOPS on a * bad address because the pc *should* only be a * kernel address. -- cgit v1.1 From 5669c3cf19fbadaa9120b59914beec8431277efe Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 2 Oct 2007 13:37:53 +1000 Subject: [POWERPC] Limit range of __init_ref_ok somewhat This patch introduces zalloc_maybe_bootmem and uses it so that we don't have to mark a whole (largish) routine as __init_ref_ok. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 0e47c8c..151b131 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -424,7 +424,7 @@ static int default_irq_host_match(struct irq_host *h, struct device_node *np) return h->of_node != NULL && h->of_node == np; } -__init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, +struct irq_host *irq_alloc_host(struct device_node *of_node, unsigned int revmap_type, unsigned int revmap_arg, struct irq_host_ops *ops, @@ -439,13 +439,7 @@ __init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, /* Allocate structure and revmap table if using linear mapping */ if (revmap_type == IRQ_HOST_MAP_LINEAR) size += revmap_arg * sizeof(unsigned int); - if (mem_init_done) - host = kzalloc(size, GFP_KERNEL); - else { - host = alloc_bootmem(size); - if (host) - memset(host, 0, size); - } + host = zalloc_maybe_bootmem(size, GFP_KERNEL); if (host == NULL) return NULL; -- cgit v1.1 From 048c8bc90e53bf1f5feec020a7d482da94894e93 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 1 Nov 2006 05:44:54 +1100 Subject: [POWERPC] ppc64: support CONFIG_DEBUG_PREEMPT Add CONFIG_DEBUG_PREEMPT support to ppc64: it was useful for testing get_paca() preemption. Cheat a little, just use debug_smp_processor_id() in the debug version of get_paca(): it contains all the right checks and reporting, though get_paca() doesn't really use smp_processor_id(). Use local_paca for what might have been called __raw_get_paca(). Silence harmless warnings from io.h and lparcfg.c with local_paca - it is okay for iseries_lparcfg_data to be referencing shared_proc with preemption enabled: all cpus should show the same value for shared_proc. Why do other architectures need TRACE_IRQFLAGS_SUPPORT for DEBUG_PREEMPT? I don't know, ppc64 appears to get along fine without it. Signed-off-by: Hugh Dickins Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/lparcfg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 6444eaa..ff781b2 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -77,7 +77,7 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v) int processors, max_processors; unsigned long purr = get_purr(); - shared = (int)(get_lppaca()->shared_proc); + shared = (int)(local_paca->lppaca_ptr->shared_proc); seq_printf(m, "system_active_processors=%d\n", (int)HvLpConfig_getSystemPhysicalProcessors()); -- cgit v1.1 From 8150caad02266623b5b9f58088d589f130fccd97 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 2 Oct 2007 13:30:04 -0700 Subject: [POWERPC] powerpc vDSO: install unstripped copies on disk This keeps an unstripped copy of the vDSO images built before they are stripped and embedded in the kernel. The unstripped copies get installed in $(MODLIB)/vdso/ by "make install". These files can be useful when they contain source-level debugging information. Signed-off-by: Roland McGrath Cc: Sam Ravnborg Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vdso32/Makefile | 20 +++++++++++++++++--- arch/powerpc/kernel/vdso64/Makefile | 19 ++++++++++++++++--- 2 files changed, 33 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 3726358..c3d57bd 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -9,11 +9,11 @@ ifeq ($(CONFIG_PPC32),y) CROSS32CC := $(CC) endif -targets := $(obj-vdso32) vdso32.so +targets := $(obj-vdso32) vdso32.so vdso32.so.dbg obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) -EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin +EXTRA_CFLAGS := -shared -fno-common -fno-builtin EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ $(call ld-option, -Wl$(comma)--hash-style=sysv) EXTRA_AFLAGS := -D__VDSO32__ -s @@ -26,9 +26,14 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) +$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(call if_changed,vdso32ld) +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + # assembly rules for the .S files $(obj-vdso32): %.o: %.S $(call if_changed_dep,vdso32as) @@ -39,3 +44,12 @@ quiet_cmd_vdso32ld = VDSO32L $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso32.so: $(obj)/vdso32.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso32.so diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 43af9b2..fa7f1b8 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -4,10 +4,10 @@ obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o # Build rules -targets := $(obj-vdso64) vdso64.so +targets := $(obj-vdso64) vdso64.so vdso64.so.dbg obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) -EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin +EXTRA_CFLAGS := -shared -fno-common -fno-builtin EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ $(call ld-option, -Wl$(comma)--hash-style=sysv) EXTRA_AFLAGS := -D__VDSO64__ -s @@ -20,9 +20,14 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) +$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(call if_changed,vdso64ld) +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + # assembly rules for the .S files $(obj-vdso64): %.o: %.S $(call if_changed_dep,vdso64as) @@ -33,4 +38,12 @@ quiet_cmd_vdso64ld = VDSO64L $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso64.so: $(obj)/vdso64.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) +vdso_install: vdso64.so -- cgit v1.1 From 74c9b99d4dcadd144fab7326c99d0ffb1de19245 Mon Sep 17 00:00:00 2001 From: Joachim Fenkes Date: Wed, 26 Sep 2007 19:46:34 +1000 Subject: [POWERPC] ibmebus: More descriptive error return code in ibmebus_store_probe() Signed-off-by: Joachim Fenkes Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ibmebus.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 7697d5b..53bf646 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -397,10 +397,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, return -ENOMEM; if (bus_find_device(&ibmebus_bus_type, NULL, path, - ibmebus_match_path)) { + ibmebus_match_path)) { printk(KERN_WARNING "%s: %s has already been probed\n", __FUNCTION__, path); - rc = -EINVAL; + rc = -EEXIST; goto out; } -- cgit v1.1 From 2d5f5659649924b86d527a2af2552bab741d1d6f Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Wed, 3 Oct 2007 07:40:12 +1000 Subject: [POWERPC] Use alloc_maybe_bootmem() in pcibios_alloc_controller Use alloc_maybe_bootmem() which wraps the if (mem_init_done) malloc clause. Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/pci-common.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 083cfbd..2ae3b6f 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -65,14 +65,11 @@ static void __devinit pci_setup_pci_controller(struct pci_controller *hose) spin_unlock(&hose_spinlock); } -__init_refok struct pci_controller * pcibios_alloc_controller(struct device_node *dev) +struct pci_controller * pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; - if (mem_init_done) - phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); - else - phb = alloc_bootmem(sizeof (struct pci_controller)); + phb = alloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; pci_setup_pci_controller(phb); -- cgit v1.1 From d831d0b83f205888f4be4dee0a074ad67ef809b3 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 21 Sep 2007 13:26:03 +1000 Subject: [POWERPC] Implement clockevents driver for powerpc This registers a clock event structure for the decrementer and turns on CONFIG_GENERIC_CLOCKEVENTS, which means that we now don't need most of timer_interrupt(), since the work is done in generic code. For secondary CPUs, their decrementer clockevent is registered when the CPU comes up (the generic code automatically removes the clockevent when the CPU goes down). Signed-off-by: Tony Breeds Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/smp.c | 2 + arch/powerpc/kernel/time.c | 131 ++++++++++++++++++++++++++++++--------------- 2 files changed, 90 insertions(+), 43 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index b24dcba..d30f08f 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -569,6 +569,8 @@ int __devinit start_secondary(void *unused) if (system_state > SYSTEM_BOOTING) snapshot_timebase(); + secondary_cpu_time_init(); + spin_lock(&call_lock); cpu_set(cpu, cpu_online_map); spin_unlock(&call_lock); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e71a0d8..d20947c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -73,6 +73,7 @@ /* powerpc clocksource/clockevent code */ +#include #include static cycle_t rtc_read(void); @@ -97,6 +98,27 @@ static struct clocksource clocksource_timebase = { .read = timebase_read, }; +#define DECREMENTER_MAX 0x7fffffff + +static int decrementer_set_next_event(unsigned long evt, + struct clock_event_device *dev); +static void decrementer_set_mode(enum clock_event_mode mode, + struct clock_event_device *dev); + +static struct clock_event_device decrementer_clockevent = { + .name = "decrementer", + .rating = 200, + .shift = 32, + .mult = 0, /* To be filled in */ + .irq = 0, + .set_next_event = decrementer_set_next_event, + .set_mode = decrementer_set_mode, + .features = CLOCK_EVT_FEAT_ONESHOT, +}; + +static DEFINE_PER_CPU(struct clock_event_device, decrementers); +void init_decrementer_clockevent(void); + #ifdef CONFIG_PPC_ISERIES static unsigned long __initdata iSeries_recal_titan; static signed long __initdata iSeries_recal_tb; @@ -517,10 +539,12 @@ void __init iSeries_time_init_early(void) void timer_interrupt(struct pt_regs * regs) { struct pt_regs *old_regs; - int next_dec; int cpu = smp_processor_id(); - unsigned long ticks; - u64 tb_next_jiffy; + struct clock_event_device *evt = &per_cpu(decrementers, cpu); + + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continuue to take decrementer exceptions */ + set_dec(DECREMENTER_MAX); #ifdef CONFIG_PPC32 if (atomic_read(&ppc_n_lost_interrupts) != 0) @@ -530,7 +554,6 @@ void timer_interrupt(struct pt_regs * regs) old_regs = set_irq_regs(regs); irq_enter(); - profile_tick(CPU_PROFILING); calculate_steal_time(); #ifdef CONFIG_PPC_ISERIES @@ -538,44 +561,20 @@ void timer_interrupt(struct pt_regs * regs) get_lppaca()->int_dword.fields.decr_int = 0; #endif - while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) - >= tb_ticks_per_jiffy) { - /* Update last_jiffy */ - per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy; - /* Handle RTCL overflow on 601 */ - if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000) - per_cpu(last_jiffy, cpu) -= 1000000000; - - /* - * We cannot disable the decrementer, so in the period - * between this cpu's being marked offline in cpu_online_map - * and calling stop-self, it is taking timer interrupts. - * Avoid calling into the scheduler rebalancing code if this - * is the case. - */ - if (!cpu_is_offline(cpu)) - account_process_time(regs); - - /* - * No need to check whether cpu is offline here; boot_cpuid - * should have been fixed up by now. - */ - if (cpu != boot_cpuid) - continue; + /* + * We cannot disable the decrementer, so in the period + * between this cpu's being marked offline in cpu_online_map + * and calling stop-self, it is taking timer interrupts. + * Avoid calling into the scheduler rebalancing code if this + * is the case. + */ + if (!cpu_is_offline(cpu)) + account_process_time(regs); - write_seqlock(&xtime_lock); - tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; - if (__USE_RTC() && tb_next_jiffy >= 1000000000) - tb_next_jiffy -= 1000000000; - if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { - tb_last_jiffy = tb_next_jiffy; - do_timer(1); - } - write_sequnlock(&xtime_lock); - } - - next_dec = tb_ticks_per_jiffy - ticks; - set_dec(next_dec); + if (evt->event_handler) + evt->event_handler(evt); + else + evt->set_next_event(DECREMENTER_MAX, evt); #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) @@ -795,6 +794,53 @@ void __init clocksource_init(void) clock->name, clock->mult, clock->shift); } +static int decrementer_set_next_event(unsigned long evt, + struct clock_event_device *dev) +{ + set_dec(evt); + return 0; +} + +static void decrementer_set_mode(enum clock_event_mode mode, + struct clock_event_device *dev) +{ + if (mode != CLOCK_EVT_MODE_ONESHOT) + decrementer_set_next_event(DECREMENTER_MAX, dev); +} + +static void register_decrementer_clockevent(int cpu) +{ + struct clock_event_device *dec = &per_cpu(decrementers, cpu); + + *dec = decrementer_clockevent; + dec->cpumask = cpumask_of_cpu(cpu); + + printk(KERN_ERR "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", + dec->name, dec->mult, dec->shift, cpu); + + clockevents_register_device(dec); +} + +void init_decrementer_clockevent(void) +{ + int cpu = smp_processor_id(); + + decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC, + decrementer_clockevent.shift); + decrementer_clockevent.max_delta_ns = + clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); + decrementer_clockevent.min_delta_ns = 1000; + + register_decrementer_clockevent(cpu); +} + +void secondary_cpu_time_init(void) +{ + /* FIME: Should make unrelatred change to move snapshot_timebase + * call here ! */ + register_decrementer_clockevent(smp_processor_id()); +} + /* This function is only called on the boot processor */ void __init time_init(void) { @@ -908,8 +954,7 @@ void __init time_init(void) if (!firmware_has_feature(FW_FEATURE_ISERIES)) clocksource_init(); - /* Not exact, but the timer interrupt takes care of this */ - set_dec(tb_ticks_per_jiffy); + init_decrementer_clockevent(); } -- cgit v1.1 From 1ad749980a5fda46f7ec920d8409ddcc89b38714 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 21 Sep 2007 13:26:03 +1000 Subject: [POWERPC] Enable tickless idle and high res timers for powerpc Signed-off-by: Tony Breeds Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/idle.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index a9e9cbd..abd2957 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -59,6 +60,7 @@ void cpu_idle(void) set_thread_flag(TIF_POLLING_NRFLAG); while (1) { + tick_nohz_stop_sched_tick(); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); @@ -90,6 +92,7 @@ void cpu_idle(void) HMT_medium(); ppc64_runlatch_on(); + tick_nohz_restart_sched_tick(); if (cpu_should_die()) cpu_die(); preempt_enable_no_resched(); -- cgit v1.1 From 84e3ad5b91ed51db7513a54ad7ed652ab0ca4ba1 Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Sat, 22 Sep 2007 00:44:38 +1000 Subject: [POWERPC] 4xx: Introduce cpu_setup functionality to 44x platform This adds cpu_setup functionality for ppc44x platform. Low level cpu-spefic initialization routines should be placed in cpu_setup_44x.S and a callback should be added to cputable. The cpu_setup is invoked by identify_cpu() function at early init. Signed-off-by: Valentine Barshak Signed-off-by: Josh Boyer --- arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/cpu_setup_44x.S | 19 +++++++++++++++++++ arch/powerpc/kernel/cputable.c | 13 +++++++------ 3 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 arch/powerpc/kernel/cpu_setup_44x.S (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index fb33a7e..f1dd904 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -58,6 +58,7 @@ obj-y += time.o prom.o traps.o setup-common.o \ misc_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma_64.o iommu.o +obj-$(CONFIG_44x) += cpu_setup_44x.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_BOOTX_TEXT) += btext.o diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S new file mode 100644 index 0000000..6a6e6c7 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_44x.S @@ -0,0 +1,19 @@ +/* + * This file contains low level CPU setup functions. + * Valentine Barshak + * MontaVista Software, Inc (c) 2007 + * + * Based on cpu_setup_6xx code by + * Benjamin Herrenschmidt + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include + diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8eb8087..8711499 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1319,17 +1319,18 @@ struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr) for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) if ((pvr & s->pvr_mask) == s->pvr_value) { *cur = cpu_specs + i; -#ifdef CONFIG_PPC64 - /* ppc64 expects identify_cpu to also call setup_cpu - * for that processor. I will consolidate that at a - * later time, for now, just use our friend #ifdef. +#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) + /* ppc64 and booke expect identify_cpu to also call + * setup_cpu for that processor. I will consolidate + * that at a later time, for now, just use #ifdef. * we also don't need to PTRRELOC the function pointer - * on ppc64 as we are running at 0 in real mode. + * on ppc64 and booke as we are running at 0 in real + * mode on ppc64 and reloc_offset is always 0 on booke. */ if (s->cpu_setup) { s->cpu_setup(offset, s); } -#endif /* CONFIG_PPC64 */ +#endif /* CONFIG_PPC64 || CONFIG_BOOKE */ return s; } BUG(); -- cgit v1.1 From 8112753bb2c0045398c89d0647792b39805f6d40 Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Sat, 22 Sep 2007 00:46:57 +1000 Subject: [POWERPC] 4xx: Move 440EP(x) FPU setup from head_44x to cpu_setup_4xx The PowerPC 440EP(x) FPU init is currently done in head_44x under ifdefs. Since we should support more then one board in the same kernel, we move FPU initialization code from head_44x to cpu_setup_44x and add cpu_setup callbacks for 440EP(x). Signed-off-by: Valentine Barshak Signed-off-by: Josh Boyer --- arch/powerpc/kernel/cpu_setup_44x.S | 14 ++++++++++++++ arch/powerpc/kernel/cputable.c | 6 ++++++ arch/powerpc/kernel/head_44x.S | 10 ---------- 3 files changed, 20 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index 6a6e6c7..c790634 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S @@ -17,3 +17,17 @@ #include #include +_GLOBAL(__setup_cpu_440ep) + b __init_fpu_44x +_GLOBAL(__setup_cpu_440epx) + b __init_fpu_44x + +/* enable APU between CPU and FPU */ +_GLOBAL(__init_fpu_44x) + mfspr r3,SPRN_CCR0 + /* Clear DAPUIB flag in CCR0 */ + rlwinm r3,r3,0,12,10 + mtspr SPRN_CCR0,r3 + isync + blr + diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8711499..94d9819 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -31,6 +31,8 @@ EXPORT_SYMBOL(cur_cpu_spec); * and ppc64 */ #ifdef CONFIG_PPC32 +extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); @@ -1111,6 +1113,7 @@ static struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .icache_bsize = 32, .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440ep, .platform = "ppc440", }, { @@ -1121,6 +1124,7 @@ static struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .icache_bsize = 32, .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440ep, .platform = "ppc440", }, { /* 440EPX */ @@ -1131,6 +1135,8 @@ static struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .icache_bsize = 32, .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440epx, + .platform = "ppc440", }, { /* 440GRX */ .pvr_mask = 0xf0000ffb, diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index c6a510b..864d63f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -217,16 +217,6 @@ skpinv: addi r4,r4,1 /* Increment */ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ mtspr SPRN_IVPR,r4 -#if defined(CONFIG_440EP) || defined(CONFIG_440EPX) - /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */ - mfspr r2,SPRN_CCR0 - lis r3,0xffef - ori r3,r3,0xffff - and r2,r2,r3 - mtspr SPRN_CCR0,r2 - isync -#endif - /* * This is where the main kernel code starts. */ -- cgit v1.1 From 340ffd267c85fc28da7cfd681b177c816af800cf Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Sat, 22 Sep 2007 00:50:09 +1000 Subject: [POWERPC] 4xx: 440EPx/GRx incorrect write to DDR SDRAM errata workaround Add a workaround for PowerPC 440EPx/GRx incorrect write to DDR SDRAM errata. Data can be written to wrong address in SDRAM when write pipelining enabled on plb0. We disable it in the cpu_setup for these processors at early init. Signed-off-by: Valentine Barshak Signed-off-by: Josh Boyer --- arch/powerpc/kernel/cpu_setup_44x.S | 25 ++++++++++++++++++++++++- arch/powerpc/kernel/cputable.c | 3 +++ 2 files changed, 27 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index c790634..8e1812e 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S @@ -20,7 +20,14 @@ _GLOBAL(__setup_cpu_440ep) b __init_fpu_44x _GLOBAL(__setup_cpu_440epx) - b __init_fpu_44x + mflr r4 + bl __init_fpu_44x + bl __plb_disable_wrp + mtlr r4 + blr +_GLOBAL(__setup_cpu_440grx) + b __plb_disable_wrp + /* enable APU between CPU and FPU */ _GLOBAL(__init_fpu_44x) @@ -31,3 +38,19 @@ _GLOBAL(__init_fpu_44x) isync blr +/* + * Workaround for the incorrect write to DDR SDRAM errata. + * The write address can be corrupted during writes to + * DDR SDRAM when write pipelining is enabled on PLB0. + * Disable write pipelining here. + */ +#define DCRN_PLB4A0_ACR 0x81 + +_GLOBAL(__plb_disable_wrp) + mfdcr r3,DCRN_PLB4A0_ACR + /* clear WRP bit in PLB4A0_ACR */ + rlwinm r3,r3,0,8,6 + mtdcr DCRN_PLB4A0_ACR,r3 + isync + blr + diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 94d9819..b03a442 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -33,6 +33,7 @@ EXPORT_SYMBOL(cur_cpu_spec); #ifdef CONFIG_PPC32 extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); @@ -1146,6 +1147,8 @@ static struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440grx, + .platform = "ppc440", }, { /* 440GP Rev. B */ .pvr_mask = 0xf0000fff, -- cgit v1.1 From 26f571d7c968dbd30656fc1421eeb0d9088aaad9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 4 Oct 2007 11:02:09 +1000 Subject: [PPC] Use cpu setup routines from cpu_setup_44x.S for ARCH=ppc Commit 8112753bb2c0045398c89d0647792b39805f6d40 made 44x in ARCH=powerpc builds use cpu setup routines in cpu_setup_44x.S, but didn't make a similar change for ARCH=ppc, and consequently the ARCH=ppc builds fail with undefined symbols (since both use the same cputable.c). This fixes it by including cpu_setup_44x.S in the ARCH=ppc builds, and by taking out the now-redundant FPU initialization in arch/ppc/kernel/head_44x.S. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index f1dd904..8327d92 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \ swsusp_$(CONFIG_WORD_SIZE).o obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o obj-$(CONFIG_MODULES) += module_$(CONFIG_WORD_SIZE).o +obj-$(CONFIG_44x) += cpu_setup_44x.o ifeq ($(CONFIG_PPC_MERGE),y) @@ -58,7 +59,6 @@ obj-y += time.o prom.o traps.o setup-common.o \ misc_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma_64.o iommu.o -obj-$(CONFIG_44x) += cpu_setup_44x.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_BOOTX_TEXT) += btext.o -- cgit v1.1 From c374e00e17f1c10768d5af922a1ff33e43df2eb0 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Mon, 16 Jul 2007 11:43:43 -0500 Subject: [POWERPC] Add early debug console for CPM serial ports. This code assumes that the ports have been previously set up, with buffers in DPRAM. Signed-off-by: Scott Wood Acked-by: David Gibson Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_32.S | 16 ++++++++++++++++ arch/powerpc/kernel/udbg.c | 2 ++ 2 files changed, 18 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index c86c626..d83f04e 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -149,6 +149,9 @@ __after_mmu_off: #if defined(CONFIG_BOOTX_TEXT) bl setup_disp_bat #endif +#ifdef CONFIG_PPC_EARLY_DEBUG_CPM + bl setup_cpm_bat +#endif /* * Call setup_cpu for CPU 0 and initialize 6xx Idle @@ -1245,6 +1248,19 @@ setup_disp_bat: blr #endif /* CONFIG_BOOTX_TEXT */ +#ifdef CONFIG_PPC_EARLY_DEBUG_CPM +setup_cpm_bat: + lis r8, 0xf000 + ori r8, r8, 0x002a + mtspr SPRN_DBAT1L, r8 + + lis r11, 0xf000 + ori r11, r11, (BL_1M << 2) | 2 + mtspr SPRN_DBAT1U, r11 + + blr +#endif + #ifdef CONFIG_8260 /* Jump into the system reset for the rom. * We first disable the MMU, and then jump to the ROM reset address. diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 0f9b4ea..d723070 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -54,6 +54,8 @@ void __init udbg_early_init(void) #elif defined(CONFIG_PPC_EARLY_DEBUG_44x) /* PPC44x debug */ udbg_init_44x_as1(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM) + udbg_init_cpm(); #endif } -- cgit v1.1 From ccf0d68e835003f19d5a9463d5a8c1e092d3a31a Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Mon, 16 Jul 2007 11:28:18 -0500 Subject: [POWERPC] 8xx: Fix CONFIG_PIN_TLB. 1. Move CONSISTENT_START on 8xx so that it doesn't overlap the IMMR mapping. 2. The wrong register was being loaded into SPRN_MD_RPN. Signed-off-by: Scott Wood Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_8xx.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 96cea8e..9c30938 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -727,13 +727,13 @@ initial_mmu: mtspr SPRN_MD_TWC, r9 li r11, MI_BOOTINIT /* Create RPN for address 0 */ addis r11, r11, 0x0080 /* Add 8M */ - mtspr SPRN_MD_RPN, r8 + mtspr SPRN_MD_RPN, r11 addis r8, r8, 0x0080 /* Add 8M */ mtspr SPRN_MD_EPN, r8 mtspr SPRN_MD_TWC, r9 addis r11, r11, 0x0080 /* Add 8M */ - mtspr SPRN_MD_RPN, r8 + mtspr SPRN_MD_RPN, r11 #endif /* Since the cache is enabled according to the information we -- cgit v1.1 From 7401685242fbcbf4b0660726372c77a88c4af17d Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Mon, 25 Jun 2007 14:50:41 -0500 Subject: [POWERPC] 8xx: Work around CPU15 erratum. The CPU15 erratum on MPC8xx chips can cause incorrect code execution under certain circumstances, where there is a conditional or indirect branch in the last word of a page, with a target in the last cache line of the next page. This patch implements one of the suggested workarounds, by forcing a TLB miss whenever execution crosses a page boundary. This is done by invalidating the pages before and after the one being loaded into the TLB in the ITLB miss handler. Signed-off-by: Scott Wood Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_8xx.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 9c30938..f745839 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -298,6 +298,12 @@ InstructionTLBMiss: stw r10, 0(r0) stw r11, 4(r0) mfspr r10, SPRN_SRR0 /* Get effective address of fault */ +#ifdef CONFIG_8xx_CPU15 + addi r11, r10, 0x1000 + tlbie r11 + addi r11, r10, -0x1000 + tlbie r11 +#endif DO_8xx_CPU6(0x3780, r3) mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ -- cgit v1.1 From 5dd57a1308a7e40e04fb6ecbff170df7a0b92cd8 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Tue, 18 Sep 2007 15:29:35 -0500 Subject: [POWERPC] 8xx: Move softemu8xx.c from arch/ppc Previously, Soft_emulate_8xx was called with no implementation, resulting in build failures whenever building 8xx without math emulation. The implementation is copied from arch/ppc to resolve this issue. However, this sort of minimal emulation is not a very good idea other than for compatibility with existing userspaces, as it's less efficient than soft-float and can mislead users into believing they have soft-float. Thus, it is made a configurable option, off by default. Signed-off-by: Scott Wood Signed-off-by: Kumar Gala --- arch/powerpc/kernel/Makefile | 2 + arch/powerpc/kernel/softemu8xx.c | 202 +++++++++++++++++++++++++++++++++++++++ arch/powerpc/kernel/traps.c | 6 +- 3 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/kernel/softemu8xx.c (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 8327d92..ca51f0c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -75,6 +75,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o +obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o + ifneq ($(CONFIG_PPC_INDIRECT_IO),y) obj-y += iomap.o endif diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c new file mode 100644 index 0000000..67d6f68 --- /dev/null +++ b/arch/powerpc/kernel/softemu8xx.c @@ -0,0 +1,202 @@ +/* + * Software emulation of some PPC instructions for the 8xx core. + * + * Copyright (C) 1998 Dan Malek (dmalek@jlc.net) + * + * Software floating emuation for the MPC8xx processor. I did this mostly + * because it was easier than trying to get the libraries compiled for + * software floating point. The goal is still to get the libraries done, + * but I lost patience and needed some hacks to at least get init and + * shells running. The first problem is the setjmp/longjmp that save + * and restore the floating point registers. + * + * For this emulation, our working registers are found on the register + * save area. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Eventually we may need a look-up table, but this works for now. +*/ +#define LFS 48 +#define LFD 50 +#define LFDU 51 +#define STFD 54 +#define STFDU 55 +#define FMR 63 + +void print_8xx_pte(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + printk(" pte @ 0x%8lx: ", addr); + pgd = pgd_offset(mm, addr & PAGE_MASK); + if (pgd) { + pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK), + addr & PAGE_MASK); + if (pmd && pmd_present(*pmd)) { + pte = pte_offset_kernel(pmd, addr & PAGE_MASK); + if (pte) { + printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n", + (long)pgd, (long)pte, (long)pte_val(*pte)); +#define pp ((long)pte_val(*pte)) + printk(" RPN: %05lx PP: %lx SPS: %lx SH: %lx " + "CI: %lx v: %lx\n", + pp>>12, /* rpn */ + (pp>>10)&3, /* pp */ + (pp>>3)&1, /* small */ + (pp>>2)&1, /* shared */ + (pp>>1)&1, /* cache inhibit */ + pp&1 /* valid */ + ); +#undef pp + } + else { + printk("no pte\n"); + } + } + else { + printk("no pmd\n"); + } + } + else { + printk("no pgd\n"); + } +} + +int get_8xx_pte(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int retval = 0; + + pgd = pgd_offset(mm, addr & PAGE_MASK); + if (pgd) { + pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK), + addr & PAGE_MASK); + if (pmd && pmd_present(*pmd)) { + pte = pte_offset_kernel(pmd, addr & PAGE_MASK); + if (pte) { + retval = (int)pte_val(*pte); + } + } + } + return retval; +} + +/* + * We return 0 on success, 1 on unimplemented instruction, and EFAULT + * if a load/store faulted. + */ +int Soft_emulate_8xx(struct pt_regs *regs) +{ + u32 inst, instword; + u32 flreg, idxreg, disp; + int retval; + s16 sdisp; + u32 *ea, *ip; + + retval = 0; + + instword = *((u32 *)regs->nip); + inst = instword >> 26; + + flreg = (instword >> 21) & 0x1f; + idxreg = (instword >> 16) & 0x1f; + disp = instword & 0xffff; + + ea = (u32 *)(regs->gpr[idxreg] + disp); + ip = (u32 *)¤t->thread.fpr[flreg]; + + switch ( inst ) + { + case LFD: + /* this is a 16 bit quantity that is sign extended + * so use a signed short here -- Cort + */ + sdisp = (instword & 0xffff); + ea = (u32 *)(regs->gpr[idxreg] + sdisp); + if (copy_from_user(ip, ea, sizeof(double))) + retval = -EFAULT; + break; + + case LFDU: + if (copy_from_user(ip, ea, sizeof(double))) + retval = -EFAULT; + else + regs->gpr[idxreg] = (u32)ea; + break; + case LFS: + sdisp = (instword & 0xffff); + ea = (u32 *)(regs->gpr[idxreg] + sdisp); + if (copy_from_user(ip, ea, sizeof(float))) + retval = -EFAULT; + break; + case STFD: + /* this is a 16 bit quantity that is sign extended + * so use a signed short here -- Cort + */ + sdisp = (instword & 0xffff); + ea = (u32 *)(regs->gpr[idxreg] + sdisp); + if (copy_to_user(ea, ip, sizeof(double))) + retval = -EFAULT; + break; + + case STFDU: + if (copy_to_user(ea, ip, sizeof(double))) + retval = -EFAULT; + else + regs->gpr[idxreg] = (u32)ea; + break; + case FMR: + /* assume this is a fp move -- Cort */ + memcpy(ip, ¤t->thread.fpr[(instword>>11)&0x1f], + sizeof(double)); + break; + default: + retval = 1; + printk("Bad emulation %s/%d\n" + " NIP: %08lx instruction: %08x opcode: %x " + "A: %x B: %x C: %x code: %x rc: %x\n", + current->comm,current->pid, + regs->nip, + instword,inst, + (instword>>16)&0x1f, + (instword>>11)&0x1f, + (instword>>6)&0x1f, + (instword>>1)&0x3ff, + instword&1); + { + int pa; + print_8xx_pte(current->mm,regs->nip); + pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK; + pa |= (regs->nip & ~PAGE_MASK); + pa = (unsigned long)__va(pa); + printk("Kernel VA for NIP %x ", pa); + print_8xx_pte(current->mm,pa); + } + } + + if (retval == 0) + regs->nip += 4; + + return retval; +} diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 5a49eab..2f1857c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -906,7 +906,9 @@ void SoftwareEmulation(struct pt_regs *regs) { extern int do_mathemu(struct pt_regs *); extern int Soft_emulate_8xx(struct pt_regs *); +#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) int errcode; +#endif CHECK_FULL_REGS(regs); @@ -936,7 +938,7 @@ void SoftwareEmulation(struct pt_regs *regs) return; } -#else +#elif defined(CONFIG_8XX_MINIMAL_FPEMU) errcode = Soft_emulate_8xx(regs); switch (errcode) { case 0: @@ -949,6 +951,8 @@ void SoftwareEmulation(struct pt_regs *regs) _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); return; } +#else + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); #endif } #endif /* CONFIG_8xx */ -- cgit v1.1 From 3c5df5c26ed17828760945d59653a2e22e3fb63f Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 27 Sep 2007 08:43:35 -0500 Subject: [POWERPC] Cleaned up whitespace in head_fsl_booke.S Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_fsl_booke.S | 76 ++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index bfc3870..ee33ddd9 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -2,27 +2,27 @@ * Kernel execution entry point code. * * Copyright (c) 1995-1996 Gary Thomas - * Initial PowerPC version. + * Initial PowerPC version. * Copyright (c) 1996 Cort Dougan - * Rewritten for PReP + * Rewritten for PReP * Copyright (c) 1996 Paul Mackerras - * Low-level exception handers, MMU support, and rewrite. + * Low-level exception handers, MMU support, and rewrite. * Copyright (c) 1997 Dan Malek - * PowerPC 8xx modifications. + * PowerPC 8xx modifications. * Copyright (c) 1998-1999 TiVo, Inc. - * PowerPC 403GCX modifications. + * PowerPC 403GCX modifications. * Copyright (c) 1999 Grant Erickson - * PowerPC 403GCX/405GP modifications. + * PowerPC 403GCX/405GP modifications. * Copyright 2000 MontaVista Software Inc. * PPC405 modifications - * PowerPC 403GCX/405GP modifications. - * Author: MontaVista Software, Inc. - * frank_rowand@mvista.com or source@mvista.com - * debbie_chu@mvista.com + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com * Copyright 2002-2004 MontaVista Software, Inc. - * PowerPC 44x support, Matt Porter + * PowerPC 44x support, Matt Porter * Copyright 2004 Freescale Semiconductor, Inc - * PowerPC e500 modifications, Kumar Gala + * PowerPC e500 modifications, Kumar Gala * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -146,13 +146,13 @@ skpinv: addi r6,r6,1 /* Increment */ bne 1b /* If not, repeat */ /* Invalidate TLB0 */ - li r6,0x04 + li r6,0x04 tlbivax 0,r6 #ifdef CONFIG_SMP tlbsync #endif /* Invalidate TLB1 */ - li r6,0x0c + li r6,0x0c tlbivax 0,r6 #ifdef CONFIG_SMP tlbsync @@ -211,7 +211,7 @@ skpinv: addi r6,r6,1 /* Increment */ mtspr SPRN_MAS1,r6 tlbwe /* Invalidate TLB1 */ - li r9,0x0c + li r9,0x0c tlbivax 0,r9 #ifdef CONFIG_SMP tlbsync @@ -254,7 +254,7 @@ skpinv: addi r6,r6,1 /* Increment */ mtspr SPRN_MAS1,r8 tlbwe /* Invalidate TLB1 */ - li r9,0x0c + li r9,0x0c tlbivax 0,r9 #ifdef CONFIG_SMP tlbsync @@ -294,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */ #ifdef CONFIG_E200 oris r2,r2,MAS4_TLBSELD(1)@h #endif - mtspr SPRN_MAS4, r2 + mtspr SPRN_MAS4, r2 #if 0 /* Enable DOZE */ @@ -305,7 +305,7 @@ skpinv: addi r6,r6,1 /* Increment */ #ifdef CONFIG_E200 /* enable dedicated debug exception handling resources (Debug APU) */ mfspr r2,SPRN_HID0 - ori r2,r2,HID0_DAPUEN@l + ori r2,r2,HID0_DAPUEN@l mtspr SPRN_HID0,r2 #endif @@ -391,7 +391,7 @@ skpinv: addi r6,r6,1 /* Increment */ #ifdef CONFIG_PTE_64BIT #define PTE_FLAGS_OFFSET 4 #define FIND_PTE \ - rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ + rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ beq 2f; /* Bail if no table */ \ @@ -487,7 +487,7 @@ interrupt_base: */ andi. r11, r11, _PAGE_HWEXEC rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ - ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ + ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ /* update search PID in MAS6, AS = 0 */ mfspr r12, SPRN_PID0 @@ -694,7 +694,7 @@ interrupt_base: START_EXCEPTION(SPEUnavailable) NORMAL_EXCEPTION_PROLOG bne load_up_spe - addi r3,r1,STACK_FRAME_OVERHEAD + addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_EE_LITE(0x2010, KernelSPE) #else EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) @@ -741,10 +741,10 @@ data_access: * Both the instruction and data TLB miss get to this * point to load the TLB. - * r10 - EA of fault - * r11 - TLB (info from Linux PTE) - * r12, r13 - available to use - * CR5 - results of addr < TASK_SIZE + * r10 - EA of fault + * r11 - TLB (info from Linux PTE) + * r12, r13 - available to use + * CR5 - results of addr < TASK_SIZE * MAS0, MAS1 - loaded with proper value when we get here * MAS2, MAS3 - will need additional info from Linux PTE * Upon exit, we reload everything and RFI. @@ -813,7 +813,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) lwz r13, tlbcam_index@l(r13) rlwimi r12, r13, 0, 20, 31 7: - mtspr SPRN_MAS0,r12 + mtspr SPRN_MAS0,r12 #endif /* CONFIG_E200 */ tlbwe @@ -855,17 +855,17 @@ load_up_spe: beq 1f addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ SAVE_32EVRS(0,r10,r4) - evxor evr10, evr10, evr10 /* clear out evr10 */ + evxor evr10, evr10, evr10 /* clear out evr10 */ evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ li r5,THREAD_ACC - evstddx evr10, r4, r5 /* save off accumulator */ + evstddx evr10, r4, r5 /* save off accumulator */ lwz r5,PT_REGS(r4) lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r10,MSR_SPE@h andc r4,r4,r10 /* disable SPE for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1: -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ /* enable use of SPE after return */ oris r9,r9,MSR_SPE@h mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ @@ -878,7 +878,7 @@ load_up_spe: #ifndef CONFIG_SMP subi r4,r5,THREAD stw r4,last_task_used_spe@l(r3) -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ /* restore registers and return */ 2: REST_4GPRS(3, r11) lwz r10,_CCR(r11) @@ -963,10 +963,10 @@ _GLOBAL(giveup_spe) lwz r5,PT_REGS(r3) cmpi 0,r5,0 SAVE_32EVRS(0, r4, r3) - evxor evr6, evr6, evr6 /* clear out evr6 */ + evxor evr6, evr6, evr6 /* clear out evr6 */ evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ li r4,THREAD_ACC - evstddx evr6, r4, r3 /* save off accumulator */ + evstddx evr6, r4, r3 /* save off accumulator */ mfspr r6,SPRN_SPEFSCR stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ beq 1f @@ -979,7 +979,7 @@ _GLOBAL(giveup_spe) li r5,0 lis r4,last_task_used_spe@ha stw r5,last_task_used_spe@l(r4) -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ blr #endif /* CONFIG_SPE */ @@ -1000,15 +1000,15 @@ _GLOBAL(giveup_fpu) */ _GLOBAL(abort) li r13,0 - mtspr SPRN_DBCR0,r13 /* disable all debug events */ + mtspr SPRN_DBCR0,r13 /* disable all debug events */ isync mfmsr r13 ori r13,r13,MSR_DE@l /* Enable Debug Events */ mtmsr r13 isync - mfspr r13,SPRN_DBCR0 - lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h - mtspr SPRN_DBCR0,r13 + mfspr r13,SPRN_DBCR0 + lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h + mtspr SPRN_DBCR0,r13 isync _GLOBAL(set_context) @@ -1043,7 +1043,7 @@ swapper_pg_dir: /* Reserved 4k for the critical exception stack & 4k for the machine * check stack per CPU for kernel mode exceptions */ .section .bss - .align 12 + .align 12 exception_stack_bottom: .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS .globl exception_stack_top -- cgit v1.1 From 6039680705906f270411435c05c869ac4f59ef10 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 3 Oct 2007 10:43:10 -0500 Subject: [POWERPC] Update .gitignore for new vdso generated files We now generate vdso[32,64].so.dbg as part of the build so add them to .gitignore Signed-off-by: Kumar Gala --- arch/powerpc/kernel/vdso32/.gitignore | 1 + arch/powerpc/kernel/vdso64/.gitignore | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vdso32/.gitignore b/arch/powerpc/kernel/vdso32/.gitignore index e45fba9..fea58098 100644 --- a/arch/powerpc/kernel/vdso32/.gitignore +++ b/arch/powerpc/kernel/vdso32/.gitignore @@ -1 +1,2 @@ vdso32.lds +vdso32.so.dbg diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore index 3fd18cf..77a0b42 100644 --- a/arch/powerpc/kernel/vdso64/.gitignore +++ b/arch/powerpc/kernel/vdso64/.gitignore @@ -1 +1,2 @@ vdso64.lds +vdso64.so.dbg -- cgit v1.1 From cd6eed3718b1b32c60a6ee5658ae6341de3177d9 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 21 Sep 2007 18:08:17 +1000 Subject: [POWERPC] Prepare to remove of_platform_driver name The name field of of_platform_driver is just copied into the included device_driver. By not overriding an already initialised device_driver name, we can convert the drivers over time to stop using the of_platform_driver name. Also we were not copying the owner field from of_platform_driver, so do the same with it. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/of_platform.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index f70e787..eca8ccc 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -19,11 +19,11 @@ #include #include #include +#include +#include #include #include -#include -#include #include #include #include @@ -70,7 +70,10 @@ postcore_initcall(of_bus_driver_init); int of_register_platform_driver(struct of_platform_driver *drv) { /* initialize common driver fields */ - drv->driver.name = drv->name; + if (!drv->driver.name) + drv->driver.name = drv->name; + if (!drv->driver.owner) + drv->driver.owner = drv->owner; drv->driver.bus = &of_platform_bus_type; /* register with core */ @@ -385,9 +388,11 @@ static struct of_device_id of_pci_phb_ids[] = { }; static struct of_platform_driver of_pci_phb_driver = { - .name = "of-pci", - .match_table = of_pci_phb_ids, - .probe = of_pci_phb_probe, + .match_table = of_pci_phb_ids, + .probe = of_pci_phb_probe, + .driver = { + .name = "of-pci", + }, }; static __init int of_pci_phb_init(void) -- cgit v1.1 From 84fdde5af1eca5ff170d1dff7e2681b0a50a9ecb Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 3 Oct 2007 14:41:15 +1000 Subject: [POWERPC] Use cache-inhibited large page bit from firmware Discussions with firmware architects have confirmed that the bit in the ibm,pa-features property that indicates support for cache-inhibited large (>= 64kB) page mappings does in fact mean that the hypervisor allows 64kB mappings to I/O devices. Thus we can now enable the code that tests that bit and sets our CPU_FTR_CI_LARGE_PAGE feature bit. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 172dcc3..9f329a8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -531,10 +531,7 @@ static struct ibm_pa_feature { {CPU_FTR_CTRL, 0, 0, 3, 0}, {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, -#if 0 - /* put this back once we know how to test if firmware does 64k IO */ {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, -#endif {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, }; -- cgit v1.1 From 46b45b10f1425d02819b525e3805ddacd9ad4f29 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 9 Oct 2007 17:03:57 +1000 Subject: [POWERPC] Align the sys_call_table Our _GLOBAL macro does a ".align 2" so the alignment is fine for 32 bit, but on 64 bit it is possible for it to end up only 4 byte aligned. I don't know if it matters, but it can't hurt to 8 byte align it. It also means that when we build with --emit_relocs, none of our 64 bit relocations are to misaligned places. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/systbl.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 579de70..93219c3 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -39,6 +39,8 @@ #ifdef CONFIG_PPC64 #define sys_sigpending sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall + + .p2align 3 #endif _GLOBAL(sys_call_table) -- cgit v1.1 From dc9b43d0f706852fc4abce5bf28958db41524328 Mon Sep 17 00:00:00 2001 From: Wolfgang Denk Date: Wed, 10 Oct 2007 08:36:18 +1000 Subject: [POWERPC] Disable vDSO support for ARCH=ppc where it's not implemented Signed-off-by: Wolfgang Denk Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vdso.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 213fa31..2322ba5 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -766,7 +766,9 @@ static int __init vdso_init(void) return 0; } +#ifdef CONFIG_PPC_MERGE arch_initcall(vdso_init); +#endif int in_gate_area_no_task(unsigned long addr) { -- cgit v1.1 From 38db7e740ade7f07f6315e3a3b1172d7e456b793 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 11 Oct 2007 04:48:18 +1000 Subject: [POWERPC] Only call ppc_md.setup_arch() if it is provided This allows platforms which don't have anything to do at setup_arch time (like a bunch of the 4xx platforms) to eliminate an empty setup_arch hook. Signed-off-by: Grant Likely Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup_32.c | 3 ++- arch/powerpc/kernel/setup_64.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 7474502..cd870a8 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -290,7 +290,8 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif - ppc_md.setup_arch(); + if (ppc_md.setup_arch) + ppc_md.setup_arch(); if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); paging_init(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 3089eae..008ab68 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -530,7 +530,8 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif - ppc_md.setup_arch(); + if (ppc_md.setup_arch) + ppc_md.setup_arch(); paging_init(); ppc64_boot_msg(0x15, "Setup Done"); -- cgit v1.1 From b707f517d2c72c6b340ba762ed8a7de2b22935e9 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 11 Oct 2007 14:48:24 +1000 Subject: [POWERPC] Clean up vio.h Remove vio_dma_ops declaration (since it no longer exists) and some unused fields from struct vio_driver. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 1d7b272..fd631d4 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -168,16 +168,6 @@ static int vio_bus_remove(struct device *dev) return 1; } -/* convert from struct device to struct vio_dev and pass to driver. */ -static void vio_bus_shutdown(struct device *dev) -{ - struct vio_dev *viodev = to_vio_dev(dev); - struct vio_driver *viodrv = to_vio_driver(dev->driver); - - if (dev->driver && viodrv->shutdown) - viodrv->shutdown(viodev); -} - /** * vio_register_driver: - Register a new vio driver * @drv: The vio_driver structure to be registered. @@ -397,7 +387,6 @@ static struct bus_type vio_bus_type = { .match = vio_bus_match, .probe = vio_bus_probe, .remove = vio_bus_remove, - .shutdown = vio_bus_shutdown, }; /** -- cgit v1.1 From 1670b2b2716b98541765da94be1332ad5c314b7a Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 11 Oct 2007 14:53:32 +1000 Subject: [POWERPC] Remove iSeries_vio_dev It was only being used to carry around dma_iommu_ops and vio_iommu_table which we can use directly instead. This also means that vio_bus_device doesn't need to refer to them either. Signed-off-by: Stephen Rothwell Acked-by: Jens Axboe Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index fd631d4..eaf7f69 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -49,11 +49,8 @@ static struct vio_dev vio_bus_device = { /* fake "parent" device */ }; #ifdef CONFIG_PPC_ISERIES -struct device *iSeries_vio_dev = &vio_bus_device.dev; -EXPORT_SYMBOL(iSeries_vio_dev); - static struct iommu_table veth_iommu_table; -static struct iommu_table vio_iommu_table; +struct iommu_table vio_iommu_table; static void __init iommu_vio_init(void) { @@ -66,8 +63,6 @@ static void __init iommu_vio_init(void) printk("Virtual Bus VETH TCE table failed.\n"); if (!iommu_init_table(&vio_iommu_table, -1)) printk("Virtual Bus VIO TCE table failed.\n"); - vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops; - vio_bus_device.dev.archdata.dma_data = &vio_iommu_table; } #else static void __init iommu_vio_init(void) -- cgit v1.1 From dd9b67ab37d57da67840276d28957498512d4dd8 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 11 Oct 2007 14:55:02 +1000 Subject: [POWERPC] Remove more iSeries-specific stuff from vio.c Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 78 +++++++++++++++-------------------------------- 1 file changed, 25 insertions(+), 53 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index eaf7f69..b7c9e44 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -48,61 +48,33 @@ static struct vio_dev vio_bus_device = { /* fake "parent" device */ .dev.bus = &vio_bus_type, }; -#ifdef CONFIG_PPC_ISERIES -static struct iommu_table veth_iommu_table; -struct iommu_table vio_iommu_table; - -static void __init iommu_vio_init(void) -{ - iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); - veth_iommu_table.it_size /= 2; - vio_iommu_table = veth_iommu_table; - vio_iommu_table.it_offset += veth_iommu_table.it_size; - - if (!iommu_init_table(&veth_iommu_table, -1)) - printk("Virtual Bus VETH TCE table failed.\n"); - if (!iommu_init_table(&vio_iommu_table, -1)) - printk("Virtual Bus VIO TCE table failed.\n"); -} -#else -static void __init iommu_vio_init(void) -{ -} -#endif - static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) { -#ifdef CONFIG_PPC_ISERIES - if (firmware_has_feature(FW_FEATURE_ISERIES)) { - if (strcmp(dev->type, "network") == 0) - return &veth_iommu_table; - return &vio_iommu_table; - } else -#endif - { - const unsigned char *dma_window; - struct iommu_table *tbl; - unsigned long offset, size; - - dma_window = of_get_property(dev->dev.archdata.of_node, - "ibm,my-dma-window", NULL); - if (!dma_window) - return NULL; - - tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); - - of_parse_dma_window(dev->dev.archdata.of_node, dma_window, - &tbl->it_index, &offset, &size); - - /* TCE table size - measured in tce entries */ - tbl->it_size = size >> IOMMU_PAGE_SHIFT; - /* offset for VIO should always be 0 */ - tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; - tbl->it_busno = 0; - tbl->it_type = TCE_VB; - - return iommu_init_table(tbl, -1); - } + const unsigned char *dma_window; + struct iommu_table *tbl; + unsigned long offset, size; + + if (firmware_has_feature(FW_FEATURE_ISERIES)) + return vio_build_iommu_table_iseries(dev); + + dma_window = of_get_property(dev->dev.archdata.of_node, + "ibm,my-dma-window", NULL); + if (!dma_window) + return NULL; + + tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); + + of_parse_dma_window(dev->dev.archdata.of_node, dma_window, + &tbl->it_index, &offset, &size); + + /* TCE table size - measured in tce entries */ + tbl->it_size = size >> IOMMU_PAGE_SHIFT; + /* offset for VIO should always be 0 */ + tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; + tbl->it_busno = 0; + tbl->it_type = TCE_VB; + + return iommu_init_table(tbl, -1); } /** -- cgit v1.1 From b833b481c10cf591b15cc674948cc514e55d3b94 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 11 Oct 2007 14:57:26 +1000 Subject: [POWERPC] iSeries: Move detection of virtual cdroms Now we will only have entries in the device tree for the actual existing devices (including their OS/400 properties). This way viocd.c gets all the information about the devices from the device tree. Signed-off-by: Stephen Rothwell Acked-by: Jens Axboe Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vio.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index b7c9e44..cb22a35 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -247,9 +247,6 @@ static int __init vio_bus_init(void) int err; struct device_node *node_vroot; - if (firmware_has_feature(FW_FEATURE_ISERIES)) - iommu_vio_init(); - err = bus_register(&vio_bus_type); if (err) { printk(KERN_ERR "failed to register VIO bus\n"); -- cgit v1.1 From 87a72f9e171e558a0288aa83ef1dc6ae4af32224 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 4 Oct 2007 14:18:01 +1000 Subject: [POWERPC] Fix performance monitor on machines with logical PVR Some IBM machines supply a "logical" PVR (processor version register) value in the device tree in the cpu nodes rather than the real PVR. This is used for instance to indicate that the processors in a POWER6 partition have been configured by the hypervisor to run in POWER5+ mode rather than POWER6 mode. To cope with this, we call identify_cpu a second time with the logical PVR value (the first call is with the real PVR value in the very early setup code). However, POWER5+ machines can also supply a logical PVR value, and use the same value (the value that indicates a v2.04 architecture compliant processor). This causes problems for code that uses the performance monitor (such as oprofile), because the PMU registers are different in POWER6 (even in POWER5+ mode) from the real POWER5+. This change works around this problem by taking out the PMU information from the cputable entries for the logical PVR values, and changing identify_cpu so that the second call to it won't overwrite the PMU information that was established by the first call (the one with the real PVR), but does update the other fields. Specifically, if the cputable entry for the logical PVR value has num_pmcs == 0, none of the PMU-related fields get used. So that we can create a mixed cputable entry, we now make cur_cpu_spec point to a single static struct cpu_spec, and copy stuff from cpu_specs[i] into it. This has the side-effect that we can now make cpu_specs[] be initdata. Ultimately it would be good to move the PMU-related fields out to a separate structure, pointed to by the cputable entries, and change identify_cpu so that it saves the PMU info pointer, copies the whole structure, and restores the PMU info pointer, rather than identify_cpu having to list all the fields that are *not* PMU-related. Signed-off-by: Paul Mackerras Acked-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/cputable.c | 45 ++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 21 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index b03a442..8662cf0 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -71,7 +71,7 @@ extern void __restore_cpu_ppc970(void); #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) -static struct cpu_spec cpu_specs[] = { +static struct cpu_spec __initdata cpu_specs[] = { #ifdef CONFIG_PPC64 { /* Power3 */ .pvr_mask = 0xffff0000, @@ -327,14 +327,6 @@ static struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER_POWER5_PLUS, .icache_bsize = 128, .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .oprofile_cpu_type = "ppc64/power6", - .oprofile_type = PPC_OPROFILE_POWER4, - .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, - .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, - .oprofile_mmcra_clear = POWER6_MMCRA_THRM | - POWER6_MMCRA_OTHER, .platform = "power5+", }, { /* Power6 */ @@ -364,14 +356,6 @@ static struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER_POWER6, .icache_bsize = 128, .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .oprofile_cpu_type = "ppc64/power6", - .oprofile_type = PPC_OPROFILE_POWER4, - .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, - .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, - .oprofile_mmcra_clear = POWER6_MMCRA_THRM | - POWER6_MMCRA_OTHER, .platform = "power6", }, { /* Cell Broadband Engine */ @@ -1316,18 +1300,37 @@ static struct cpu_spec cpu_specs[] = { #endif /* CONFIG_PPC32 */ }; -struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr) +static struct cpu_spec the_cpu_spec; + +struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) { struct cpu_spec *s = cpu_specs; - struct cpu_spec **cur = &cur_cpu_spec; + struct cpu_spec *t = &the_cpu_spec; int i; s = PTRRELOC(s); - cur = PTRRELOC(cur); + t = PTRRELOC(t); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) if ((pvr & s->pvr_mask) == s->pvr_value) { - *cur = cpu_specs + i; + /* + * If we are overriding a previous value derived + * from the real PVR with a new value obtained + * using a logical PVR value, don't modify the + * performance monitor fields. + */ + if (t->num_pmcs && !s->num_pmcs) { + t->cpu_name = s->cpu_name; + t->cpu_features = s->cpu_features; + t->cpu_user_features = s->cpu_user_features; + t->icache_bsize = s->icache_bsize; + t->dcache_bsize = s->dcache_bsize; + t->cpu_setup = s->cpu_setup; + t->cpu_restore = s->cpu_restore; + t->platform = s->platform; + } else + *t = *s; + *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) /* ppc64 and booke expect identify_cpu to also call * setup_cpu for that processor. I will consolidate -- cgit v1.1 From d968014b7280e2c447b20363e576999040ac72ef Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 9 Oct 2007 09:59:17 +1000 Subject: [POWERPC] Prevent decrementer clockevents from firing early On old powermacs, we sometimes set the decrementer to 1 in order to trigger a decrementer interrupt, which we use to handle an interrupt that was pending at the time when it was re-enabled. This was causing the decrementer clock event device to call the event function for the next event early, which was causing problems when high-res timers were not enabled. This fixes the problem by recording the timebase value at which the next event should occur, and checking the current timebase against the recorded value in timer_interrupt. If it isn't time for the next event, it just reprograms the decrementer and returns. This also subtracts 1 from the value stored into the decrementer, which is appropriate because the decrementer interrupts on the transition from 0 to -1, not when the decrementer reaches 0. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/time.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index d20947c..64b503c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -118,6 +118,7 @@ static struct clock_event_device decrementer_clockevent = { static DEFINE_PER_CPU(struct clock_event_device, decrementers); void init_decrementer_clockevent(void); +static DEFINE_PER_CPU(u64, decrementer_next_tb); #ifdef CONFIG_PPC_ISERIES static unsigned long __initdata iSeries_recal_titan; @@ -541,6 +542,7 @@ void timer_interrupt(struct pt_regs * regs) struct pt_regs *old_regs; int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(decrementers, cpu); + u64 now; /* Ensure a positive value is written to the decrementer, or else * some CPUs will continuue to take decrementer exceptions */ @@ -551,6 +553,14 @@ void timer_interrupt(struct pt_regs * regs) do_IRQ(regs); #endif + now = get_tb_or_rtc(); + if (now < per_cpu(decrementer_next_tb, cpu)) { + /* not time for this event yet */ + now = per_cpu(decrementer_next_tb, cpu) - now; + if (now <= DECREMENTER_MAX) + set_dec((unsigned int)now - 1); + return; + } old_regs = set_irq_regs(regs); irq_enter(); @@ -797,6 +807,10 @@ void __init clocksource_init(void) static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { + __get_cpu_var(decrementer_next_tb) = get_tb_or_rtc() + evt; + /* The decrementer interrupts on the 0 -> -1 transition */ + if (evt) + --evt; set_dec(evt); return 0; } -- cgit v1.1 From cdec12aebe1b10aa58bebaa05bb697843154f7f9 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Oct 2007 21:46:45 +1000 Subject: [POWERPC] Make clockevents work on PPC601 processors In testing the new clocksource and clockevent code on a PPC601 processor, I discovered that the clockevent multiplier value for the decrementer clockevent was overflowing. Because the RTCL register in the 601 effectively counts at 1GHz (it doesn't actually, but it increases by 128 every 128ns), and the shift value was 32, that meant the multiplier value had to be 2^32, which won't fit in an unsigned long on 32-bit. The same problem would arise on any platform where the timebase frequency was 1GHz or more (not that we actually have any such machines today). This fixes it by reducing the shift value to 16. Doing the calculations with a resolution of 2^-16 nanoseconds (15 femtoseconds) should be quite adequate. :) Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 64b503c..9368da3 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -108,7 +108,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, static struct clock_event_device decrementer_clockevent = { .name = "decrementer", .rating = 200, - .shift = 32, + .shift = 16, .mult = 0, /* To be filled in */ .irq = 0, .set_next_event = decrementer_set_next_event, -- cgit v1.1 From 8a13c4f972e6c107d8cff54de647544c00e25b41 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 11 Oct 2007 13:36:52 -0500 Subject: [POWERPC] Use PAGE_OFFSET to tell if an address is user/kernel in SW TLB handlers Move to using PAGE_OFFSET instead of TASK_SIZE or KERNELBASE value on 6xx/40x/44x/fsl-booke to determine if the faulting address is a kernel or user space address. This mimics how the macro is_kernel_addr() works. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_32.S | 18 +++++++++--------- arch/powerpc/kernel/head_40x.S | 6 +++--- arch/powerpc/kernel/head_44x.S | 6 +++--- arch/powerpc/kernel/head_fsl_booke.S | 11 ++++------- 4 files changed, 19 insertions(+), 22 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index d83f04e..a5b13ae 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -469,12 +469,12 @@ InstructionTLBMiss: mfctr r0 /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_IMISS - lis r1,KERNELBASE@h /* check if kernel address */ - cmplw 0,r3,r1 + lis r1,PAGE_OFFSET@h /* check if kernel address */ + cmplw 0,r1,r3 mfspr r2,SPRN_SPRG3 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ lwz r2,PGDIR(r2) - blt+ 112f + bge- 112f mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ lis r2,swapper_pg_dir@ha /* if kernel address, use */ @@ -543,12 +543,12 @@ DataLoadTLBMiss: mfctr r0 /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_DMISS - lis r1,KERNELBASE@h /* check if kernel address */ - cmplw 0,r3,r1 + lis r1,PAGE_OFFSET@h /* check if kernel address */ + cmplw 0,r1,r3 mfspr r2,SPRN_SPRG3 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ lwz r2,PGDIR(r2) - blt+ 112f + bge- 112f mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ lis r2,swapper_pg_dir@ha /* if kernel address, use */ @@ -615,12 +615,12 @@ DataStoreTLBMiss: mfctr r0 /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_DMISS - lis r1,KERNELBASE@h /* check if kernel address */ - cmplw 0,r3,r1 + lis r1,PAGE_OFFSET@h /* check if kernel address */ + cmplw 0,r1,r3 mfspr r2,SPRN_SPRG3 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ lwz r2,PGDIR(r2) - blt+ 112f + bge- 112f mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ lis r2,swapper_pg_dir@ha /* if kernel address, use */ diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index e312824..cfefc2d 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -289,7 +289,7 @@ label: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h + lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h @@ -481,7 +481,7 @@ label: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h + lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h @@ -581,7 +581,7 @@ label: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h + lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 864d63f..409db61 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -319,7 +319,7 @@ interrupt_base: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h + lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h @@ -458,7 +458,7 @@ interrupt_base: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h + lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h @@ -528,7 +528,7 @@ interrupt_base: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h + lis r11, PAGE_OFFSET@h cmplw r10, r11 blt+ 3f lis r11, swapper_pg_dir@h diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index ee33ddd9..4b98227 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -461,8 +461,7 @@ interrupt_base: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h - ori r11, r11, TASK_SIZE@l + lis r11, PAGE_OFFSET@h cmplw 0, r10, r11 bge 2f @@ -584,8 +583,7 @@ interrupt_base: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h - ori r11, r11, TASK_SIZE@l + lis r11, PAGE_OFFSET@h cmplw 5, r10, r11 blt 5, 3f lis r11, swapper_pg_dir@h @@ -645,8 +643,7 @@ interrupt_base: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - lis r11, TASK_SIZE@h - ori r11, r11, TASK_SIZE@l + lis r11, PAGE_OFFSET@h cmplw 5, r10, r11 blt 5, 3f lis r11, swapper_pg_dir@h @@ -744,7 +741,7 @@ data_access: * r10 - EA of fault * r11 - TLB (info from Linux PTE) * r12, r13 - available to use - * CR5 - results of addr < TASK_SIZE + * CR5 - results of addr >= PAGE_OFFSET * MAS0, MAS1 - loaded with proper value when we get here * MAS2, MAS3 - will need additional info from Linux PTE * Upon exit, we reload everything and RFI. -- cgit v1.1 From 5d8476c8fab2a51475983af26e220ee84a3964f8 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 11 Oct 2007 22:08:14 +1000 Subject: [POWERPC] 4xx: Add AMCC 405EX support to cputable.c Signed-off-by: Stefan Roese Signed-off-by: Josh Boyer --- arch/powerpc/kernel/cputable.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8662cf0..d3fb7d0 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1087,6 +1087,17 @@ static struct cpu_spec __initdata cpu_specs[] = { .dcache_bsize = 32, .platform = "ppc405", }, + { /* 405EX */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x12910000, + .cpu_name = "405EX", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | + PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .icache_bsize = 32, + .dcache_bsize = 32, + .platform = "ppc405", + }, #endif /* CONFIG_40x */ #ifdef CONFIG_44x -- cgit v1.1 From 1189be6508d45183013ddb82b18f4934193de274 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Oct 2007 20:37:10 +1000 Subject: [POWERPC] Use 1TB segments This makes the kernel use 1TB segments for all kernel mappings and for user addresses of 1TB and above, on machines which support them (currently POWER5+, POWER6 and PA6T). We detect that the machine supports 1TB segments by looking at the ibm,processor-segment-sizes property in the device tree. We don't currently use 1TB segments for user addresses < 1T, since that would effectively prevent 32-bit processes from using huge pages unless we also had a way to revert to using 256MB segments. That would be possible but would involve extra complications (such as keeping track of which segment size was used when HPTEs were inserted) and is not addressed here. Parts of this patch were originally written by Ben Herrenschmidt. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/entry_64.S | 14 +++++++++++++- arch/powerpc/kernel/head_64.S | 2 +- arch/powerpc/kernel/process.c | 9 +++++++-- 3 files changed, 21 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index fbbd3f6..0ec1340 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -373,8 +373,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ld r8,KSP(r4) /* new stack pointer */ BEGIN_FTR_SECTION + b 2f +END_FTR_SECTION_IFCLR(CPU_FTR_SLB) +BEGIN_FTR_SECTION clrrdi r6,r8,28 /* get its ESID */ clrrdi r9,r1,28 /* get current sp ESID */ +END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) +BEGIN_FTR_SECTION + clrrdi r6,r8,40 /* get its 1T ESID */ + clrrdi r9,r1,40 /* get current sp 1T ESID */ +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) clrldi. r0,r6,2 /* is new ESID c00000000? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cror eq,4*cr1+eq,eq @@ -384,6 +392,11 @@ BEGIN_FTR_SECTION ld r7,KSP_VSID(r4) /* Get new stack's VSID */ oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l +BEGIN_FTR_SECTION + li r9,MMU_SEGSIZE_1T /* insert B field */ + oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h + rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) /* Update the last bolted SLB. No write barriers are needed * here, provided we only update the current CPU's SLB shadow @@ -401,7 +414,6 @@ BEGIN_FTR_SECTION isync 2: -END_FTR_SECTION_IFSET(CPU_FTR_SLB) clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE because we don't need to leave the 288-byte ABI gap at the diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 22ac245..97c5857 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -935,7 +935,7 @@ _GLOBAL(do_stab_bolted) /* Calculate VSID */ /* This is a kernel address, so protovsid = ESID */ - ASM_VSID_SCRAMBLE(r11, r9) + ASM_VSID_SCRAMBLE(r11, r9, 256M) rldic r9,r11,12,16 /* r9 = vsid << 12 */ /* Search the primary group for a free entry */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 15998b5..7949c203 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -564,10 +564,15 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_SLB)) { - unsigned long sp_vsid = get_kernel_vsid(sp); + unsigned long sp_vsid; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; - sp_vsid <<= SLB_VSID_SHIFT; + if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) + sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) + << SLB_VSID_SHIFT_1T; + else + sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) + << SLB_VSID_SHIFT; sp_vsid |= SLB_VSID_KERNEL | llp; p->thread.ksp_vsid = sp_vsid; } -- cgit v1.1 From b63db45ca44a805ef21eb10a3750e88419156423 Mon Sep 17 00:00:00 2001 From: Valentine Barshak Date: Fri, 12 Oct 2007 05:09:25 +1000 Subject: [POWERPC] Add legacy serial support for OPB with flattened device tree Currently find_legacy_serial_ports() can find no serial ports on the OPB with flattened device tree. Thus no legacy boot console can be initialized. Just the early udbg console works, which is initialized with udbg_init_44x_as1 on the UART's physical address specified in kernel config. This happens because we look for ns16750 serial devices only and expect opb node to have a device type property. This patch makes it look for ns16550-compatible devices and use of_device_is_compatible() for opb in case device type is not specified. Signed-off-by: Valentine Barshak Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/legacy_serial.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 90fa11c..4ed5887 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -340,9 +340,10 @@ void __init find_legacy_serial_ports(void) } /* First fill our array with opb bus ports */ - for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16750")) != NULL;) { + for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) { struct device_node *opb = of_get_parent(np); - if (opb && !strcmp(opb->type, "opb")) { + if (opb && (!strcmp(opb->type, "opb") || + of_device_is_compatible(opb, "ibm,opb"))) { index = add_legacy_soc_port(np, np); if (index >= 0 && np == stdout) legacy_serial_console = index; -- cgit v1.1 From d0c3d534a4388a465101b634a95f2ec586415254 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Fri, 12 Oct 2007 10:20:07 +1000 Subject: [POWERPC] Implement logging of unhandled signals Implement show_unhandled_signals sysctl + support to print when a process is killed due to unhandled signals just as i386 and x86_64 does. Default to having it off, unlike x86 that defaults on. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/signal.c | 6 ++++++ arch/powerpc/kernel/signal_32.c | 38 ++++++++++++++++++++++++++++++++++++++ arch/powerpc/kernel/signal_64.c | 15 +++++++++++++++ arch/powerpc/kernel/traps.c | 12 +++++++++++- 4 files changed, 70 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index c434d6c..a65a44f 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -16,6 +16,12 @@ #include "signal.h" +/* Log an error when sending an unhandled signal to a process. Controlled + * through debug.exception-trace sysctl. + */ + +int show_unhandled_signals = 0; + /* * Allocate space for the signal frame */ diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 590057e..6126bca 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -705,11 +705,13 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, { struct rt_sigframe __user *rt_sf; struct mcontext __user *frame; + void __user *addr; unsigned long newsp = 0; /* Set up Signal Frame */ /* Put a Real Time Context onto stack */ rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf)); + addr = rt_sf; if (unlikely(rt_sf == NULL)) goto badframe; @@ -728,6 +730,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; + addr = frame; if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { if (save_user_regs(regs, frame, 0)) goto badframe; @@ -742,6 +745,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); + addr = (void __user *)regs->gpr[1]; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; @@ -762,6 +766,12 @@ badframe: printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif + if (show_unhandled_signals && printk_ratelimit()) + printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + addr, regs->nip, regs->link); + force_sigsegv(sig, current); return 0; } @@ -886,6 +896,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, return 0; bad: + if (show_unhandled_signals && printk_ratelimit()) + printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + rt_sf, regs->nip, regs->link); + force_sig(SIGSEGV, current); return 0; } @@ -967,6 +983,13 @@ int sys_debug_setcontext(struct ucontext __user *ctx, * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(ctx, regs, 1)) { + if (show_unhandled_signals && printk_ratelimit()) + printk(KERN_INFO "%s[%d]: bad frame in " + "sys_debug_setcontext: %p nip %08lx " + "lr %08lx\n", + current->comm, current->pid, + ctx, regs->nip, regs->link); + force_sig(SIGSEGV, current); goto out; } @@ -1048,6 +1071,12 @@ badframe: printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif + if (show_unhandled_signals && printk_ratelimit()) + printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + frame, regs->nip, regs->link); + force_sigsegv(sig, current); return 0; } @@ -1061,12 +1090,14 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct sigcontext __user *sc; struct sigcontext sigctx; struct mcontext __user *sr; + void __user *addr; sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); + addr = sc; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; @@ -1083,6 +1114,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, restore_sigmask(&set); sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); + addr = sr; if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) || restore_user_regs(regs, sr, 1)) goto badframe; @@ -1091,6 +1123,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, return 0; badframe: + if (show_unhandled_signals && printk_ratelimit()) + printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + addr, regs->nip, regs->link); + force_sig(SIGSEGV, current); return 0; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index de895e6..faeb8f2 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -64,6 +64,11 @@ struct rt_sigframe { char abigap[288]; } __attribute__ ((aligned (16))); +static const char fmt32[] = KERN_INFO \ + "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; +static const char fmt64[] = KERN_INFO \ + "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; + /* * Set up the sigcontext for the signal frame. */ @@ -315,6 +320,11 @@ badframe: printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", regs, uc, &uc->uc_mcontext); #endif + if (show_unhandled_signals && printk_ratelimit()) + printk(regs->msr & MSR_SF ? fmt64 : fmt32, + current->comm, current->pid, "rt_sigreturn", + (long)uc, regs->nip, regs->link); + force_sig(SIGSEGV, current); return 0; } @@ -398,6 +408,11 @@ badframe: printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif + if (show_unhandled_signals && printk_ratelimit()) + printk(regs->msr & MSR_SF ? fmt64 : fmt32, + current->comm, current->pid, "setup_rt_frame", + (long)frame, regs->nip, regs->link); + force_sigsegv(signr, current); return 0; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 2f1857c..bf9e39c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -172,11 +172,21 @@ int die(const char *str, struct pt_regs *regs, long err) void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { siginfo_t info; + const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ + "at %08lx nip %08lx lr %08lx code %x\n"; + const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ + "at %016lx nip %016lx lr %016lx code %x\n"; if (!user_mode(regs)) { if (die("Exception in kernel mode", regs, signr)) return; - } + } else if (show_unhandled_signals && + unhandled_signal(current, signr) && + printk_ratelimit()) { + printk(regs->msr & MSR_SF ? fmt64 : fmt32, + current->comm, current->pid, signr, + addr, regs->nip, regs->link, code); + } memset(&info, 0, sizeof(info)); info.si_signo = signr; -- cgit v1.1