From 6d7c466292fc7c90bb160c1c7df74035c7c12595 Mon Sep 17 00:00:00 2001 From: Jimi Xenidis Date: Wed, 21 Jun 2006 19:15:55 -0400 Subject: [POWERPC] Don't access HID registers if running on a Hypervisor. The following patch avoids accessing Hypervisor privilege HID registers when running on a Hypervisor (MSR[HV]=0). Signed-off-by: Jimi Xenidis Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cpu_setup_power4.S | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S index 2714183..1fc8632 100644 --- a/arch/powerpc/kernel/cpu_setup_power4.S +++ b/arch/powerpc/kernel/cpu_setup_power4.S @@ -125,7 +125,12 @@ _GLOBAL(__save_cpu_setup) cmpwi r0,0x44 bne 2f -1: /* Save HID0,1,4 and 5 */ +1: /* skip if not running in HV mode */ + mfmsr r0 + rldicl. r0,r0,4,63 + beq 2f + + /* Save HID0,1,4 and 5 */ mfspr r3,SPRN_HID0 std r3,CS_HID0(r5) mfspr r3,SPRN_HID1 @@ -159,7 +164,12 @@ _GLOBAL(__restore_cpu_setup) cmpwi r0,0x44 bnelr -1: /* Before accessing memory, we make sure rm_ci is clear */ +1: /* skip if not running in HV mode */ + mfmsr r0 + rldicl. r0,r0,4,63 + beqlr + + /* Before accessing memory, we make sure rm_ci is clear */ li r0,0 mfspr r3,SPRN_HID4 rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ -- cgit v1.1 From 5f50867b4f1938ab80d249206efbec37bba48c39 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Thu, 22 Jun 2006 23:35:10 -0700 Subject: [POWERPC] kdump: Reserve the existing TCE mappings left by the first kernel During kdump boot, noticed some machines checkstop on dma protection fault for ongoing DMA left in the first kernel. Instead of initializing TCE entries in iommu_init() for the kdump boot, this patch fixes this issue by walking through the each TCE table and checks whether the entries are in use by the first kernel. If so, reserve those entries by setting the corresponding bit in tbl->it_map such that these entries will not be available for the kdump boot. However it could be possible that all TCE entries might be used up due to the driver bug that does continuous mapping. My observation is around 1700 TCE entries are used on some systems (Ex: P4) at some point of time during kdump boot and saving dump (either write into the disk or sending to remote machine). Hence, this patch will make sure that minimum of 2048 entries will be available such that kdump boot could be successful in some cases. Signed-off-by: Haren Myneni Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/iommu.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 7cb77c2..3d677ac 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -38,6 +38,7 @@ #include #include #include +#include #define DBG(...) @@ -440,8 +441,37 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) tbl->it_largehint = tbl->it_halfpoint; spin_lock_init(&tbl->it_lock); +#ifdef CONFIG_CRASH_DUMP + if (ppc_md.tce_get) { + unsigned long index, tceval; + unsigned long tcecount = 0; + + /* + * Reserve the existing mappings left by the first kernel. + */ + for (index = 0; index < tbl->it_size; index++) { + tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); + /* + * Freed TCE entry contains 0x7fffffffffffffff on JS20 + */ + if (tceval && (tceval != 0x7fffffffffffffffUL)) { + __set_bit(index, tbl->it_map); + tcecount++; + } + } + if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { + printk(KERN_WARNING "TCE table is full; "); + printk(KERN_WARNING "freeing %d entries for the kdump boot\n", + KDUMP_MIN_TCE_ENTRIES); + for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; + index < tbl->it_size; index++) + __clear_bit(index, tbl->it_map); + } + } +#else /* Clear the hardware table in case firmware left allocations in it */ ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); +#endif if (!welcomed) { printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", -- cgit v1.1 From 1dce0e30471ac863fd141d137c98e3644817975e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:15:37 +1000 Subject: [POWERPC] Remove remaining iSeries debugger cruft None of this seems to be necessary, so let's see if can remove it and not break anything. Booted on iSeries & pSeries here. NB. we don't remove the hvReleaseData, we just move it down so that the file reads more clearly. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 831acbd..be5560a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -85,34 +85,6 @@ END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ trap -#ifdef CONFIG_PPC_ISERIES - /* - * At offset 0x20, there is a pointer to iSeries LPAR data. - * This is required by the hypervisor - */ - . = 0x20 - .llong hvReleaseData-KERNELBASE - - /* - * At offset 0x28 and 0x30 are offsets to the mschunks_map - * array (used by the iSeries LPAR debugger to do translation - * between physical addresses and absolute addresses) and - * to the pidhash table (also used by the debugger) - */ - .llong mschunks_map-KERNELBASE - .llong 0 /* pidhash-KERNELBASE SFRXXX */ - - /* Offset 0x38 - Pointer to start of embedded System.map */ - .globl embedded_sysmap_start -embedded_sysmap_start: - .llong 0 - /* Offset 0x40 - Pointer to end of embedded System.map */ - .globl embedded_sysmap_end -embedded_sysmap_end: - .llong 0 - -#endif /* CONFIG_PPC_ISERIES */ - /* Secondary processors spin on this value until it goes to 1. */ .globl __secondary_hold_spinloop __secondary_hold_spinloop: @@ -124,6 +96,15 @@ __secondary_hold_spinloop: __secondary_hold_acknowledge: .llong 0x0 +#ifdef CONFIG_PPC_ISERIES + /* + * At offset 0x20, there is a pointer to iSeries LPAR data. + * This is required by the hypervisor + */ + . = 0x20 + .llong hvReleaseData-KERNELBASE +#endif /* CONFIG_PPC_ISERIES */ + . = 0x60 /* * The following code is used on pSeries to hold secondary processors -- cgit v1.1 From 7a4571ae553e2972b7958306fd796a2fd24fd7d1 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:16:03 +1000 Subject: [POWERPC] Export flat device tree via debugfs for debugging If DEBUG is turned on in prom.c, export the flat device tree via debugfs. This has been handy on several occasions. To look at it: # mount -t debugfs none /sys/kernel/debug # od -a /sys/kernel/debug/powerpc/flat-device-tree and/or # dtc -fI dtb /sys/kernel/debug/powerpc/flat-device-tree -O dts Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 483455c..efed4bc 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -2148,3 +2149,27 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) } return NULL; } + +#ifdef DEBUG +static struct debugfs_blob_wrapper flat_dt_blob; + +static int __init export_flat_device_tree(void) +{ + struct dentry *d; + + d = debugfs_create_dir("powerpc", NULL); + if (!d) + return 1; + + flat_dt_blob.data = initial_boot_params; + flat_dt_blob.size = initial_boot_params->totalsize; + + d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, + d, &flat_dt_blob); + if (!d) + return 1; + + return 0; +} +__initcall(export_flat_device_tree); +#endif -- cgit v1.1 From 7d0daae4ae1a3e80d78b83cddf414a3b98a962f4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:16:38 +1000 Subject: [POWERPC] powerpc: Initialise ppc_md htab pointers earlier Initialise the ppc_md htab callbacks earlier, in the probe routines. This allows us to call htab_finish_init() from htab_initialize(), and makes it private to hash_utils_64.c. Move htab_finish_init() and make_bl() above htab_initialize() to avoid forward declarations. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup_64.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 78f3a5f..6f213a9 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -358,11 +358,7 @@ void __init setup_system(void) * Fill the ppc64_caches & systemcfg structures with informations * retrieved from the device-tree. Need to be called before * finish_device_tree() since the later requires some of the - * informations filled up here to properly parse the interrupt - * tree. - * It also sets up the cache line sizes which allows to call - * routines like flush_icache_range (used by the hash init - * later on). + * informations filled up here to properly parse the interrupt tree. */ initialize_cache_info(); -- cgit v1.1 From aa98c50dcb5d5b85d2a4c26d54fa1e3c31c11e4b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:17:32 +1000 Subject: [POWERPC] Make kexec_setup() a regular initcall There's no reason kexec_setup() needs to be called explicitly from setup_system(), it can just be a regular initcall. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/machine_kexec_64.c | 4 +++- arch/powerpc/kernel/setup_64.c | 4 ---- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index a8fa04e..b438d45 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -378,11 +378,13 @@ static void __init export_crashk_values(void) of_node_put(node); } -void __init kexec_setup(void) +static int __init kexec_setup(void) { export_htab_values(); export_crashk_values(); + return 0; } +__initcall(kexec_setup); static int __init early_parse_crashk(char *p) { diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6f213a9..cc26530 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -350,10 +350,6 @@ void __init setup_system(void) */ unflatten_device_tree(); -#ifdef CONFIG_KEXEC - kexec_setup(); /* requires unflattened device tree. */ -#endif - /* * Fill the ppc64_caches & systemcfg structures with informations * retrieved from the device-tree. Need to be called before -- cgit v1.1 From 4ba99b97dadd35b9ce1438b2bc7c992a4a14a8b1 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:20:09 +1000 Subject: [POWERPC] Setup the boot cpu's paca pointer in C rather than asm There's no need to set the boot cpu paca in asm, so do it in C so us mere mortals can understand it. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 11 ----------- arch/powerpc/kernel/setup_64.c | 9 ++++++++- 2 files changed, 8 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index be5560a..da09242 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1976,17 +1976,6 @@ _STATIC(start_here_common) /* Not reached */ BUG_OPCODE -/* Put the paca pointer into r13 and SPRG3 */ -_GLOBAL(setup_boot_paca) - LOAD_REG_IMMEDIATE(r3, boot_cpuid) - lwz r3,0(r3) - LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ - mulli r3,r3,PACA_SIZE /* Calculate vaddr of right paca */ - add r13,r3,r4 /* for this processor. */ - mtspr SPRN_SPRG3,r13 - - blr - /* * We put a few things here that have to be page-aligned. * This stuff goes at the beginning of the bss, which is page-aligned. diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index cc26530..a2fb2e6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -149,6 +149,13 @@ early_param("smt-enabled", early_smt_enabled); #define check_smt_enabled() #endif /* CONFIG_SMP */ +/* Put the paca pointer into r13 and SPRG3 */ +void __init setup_paca(int cpu) +{ + local_paca = &paca[cpu]; + mtspr(SPRN_SPRG3, local_paca); +} + /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of @@ -183,7 +190,7 @@ void __init early_setup(unsigned long dt_ptr) early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ - setup_boot_paca(); + setup_paca(boot_cpuid); /* Fix up paca fields required for the boot cpu */ get_paca()->cpu_start = 1; -- cgit v1.1 From 24da3dd53430ec07ce9110b1eaaad080c3c7ecf6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:20:10 +1000 Subject: [POWERPC] Make rtas_call() safe if RTAS hasn't been initialised Currently it's unsafe to call rtas_call() prior to rtas_initialize(). This is because the rtas.entry value hasn't been setup and so we don't know where to enter, but we just try anyway. We can't do anything intelligent without rtas.entry, so if it's not set, just return. Code that calls rtas_call() early needs to be aware that the call might fail. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/rtas.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 17dc791..0f14c2e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -328,7 +328,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) char *buff_copy = NULL; int ret; - if (token == RTAS_UNKNOWN_SERVICE) + if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) return -1; /* Gotta do something different here, use global lock for now... */ -- cgit v1.1 From ab3ab74d9b6b3920be70f502b40cb3f7f08d23fa Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:20:11 +1000 Subject: [POWERPC] Move RTAS exports next to their declarations Move RTAS exports next to their declarations. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/rtas.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 0f14c2e..b6aed76 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -38,16 +38,19 @@ struct rtas_t rtas = { .lock = SPIN_LOCK_UNLOCKED }; +EXPORT_SYMBOL(rtas); struct rtas_suspend_me_data { long waiting; struct rtas_args *args; }; -EXPORT_SYMBOL(rtas); - DEFINE_SPINLOCK(rtas_data_buf_lock); +EXPORT_SYMBOL(rtas_data_buf_lock); + char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; +EXPORT_SYMBOL(rtas_data_buf); + unsigned long rtas_rmo_buf; /* @@ -236,6 +239,7 @@ int rtas_token(const char *service) tokp = (int *) get_property(rtas.dev, service, NULL); return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; } +EXPORT_SYMBOL(rtas_token); #ifdef CONFIG_RTAS_ERROR_LOGGING /* @@ -369,6 +373,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) } return ret; } +EXPORT_SYMBOL(rtas_call); /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. @@ -388,6 +393,7 @@ unsigned int rtas_busy_delay_time(int status) return ms; } +EXPORT_SYMBOL(rtas_busy_delay_time); /* For an RTAS busy status code, perform the hinted delay. */ unsigned int rtas_busy_delay(int status) @@ -401,6 +407,7 @@ unsigned int rtas_busy_delay(int status) return ms; } +EXPORT_SYMBOL(rtas_busy_delay); int rtas_error_rc(int rtas_rc) { @@ -446,6 +453,7 @@ int rtas_get_power_level(int powerdomain, int *level) return rtas_error_rc(rc); return rc; } +EXPORT_SYMBOL(rtas_get_power_level); int rtas_set_power_level(int powerdomain, int level, int *setlevel) { @@ -463,6 +471,7 @@ int rtas_set_power_level(int powerdomain, int level, int *setlevel) return rtas_error_rc(rc); return rc; } +EXPORT_SYMBOL(rtas_set_power_level); int rtas_get_sensor(int sensor, int index, int *state) { @@ -480,6 +489,7 @@ int rtas_get_sensor(int sensor, int index, int *state) return rtas_error_rc(rc); return rc; } +EXPORT_SYMBOL(rtas_get_sensor); int rtas_set_indicator(int indicator, int index, int new_value) { @@ -497,6 +507,7 @@ int rtas_set_indicator(int indicator, int index, int new_value) return rtas_error_rc(rc); return rc; } +EXPORT_SYMBOL(rtas_set_indicator); void rtas_restart(char *cmd) { @@ -790,15 +801,3 @@ void __init rtas_initialize(void) rtas_last_error_token = rtas_token("rtas-last-error"); #endif } - - -EXPORT_SYMBOL(rtas_token); -EXPORT_SYMBOL(rtas_call); -EXPORT_SYMBOL(rtas_data_buf); -EXPORT_SYMBOL(rtas_data_buf_lock); -EXPORT_SYMBOL(rtas_busy_delay_time); -EXPORT_SYMBOL(rtas_busy_delay); -EXPORT_SYMBOL(rtas_get_sensor); -EXPORT_SYMBOL(rtas_get_power_level); -EXPORT_SYMBOL(rtas_set_power_level); -EXPORT_SYMBOL(rtas_set_indicator); -- cgit v1.1 From 458148c00b97864a27ecf528a1d45a8e5ebd9bbc Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:20:13 +1000 Subject: [POWERPC] Setup RTAS values earlier, to enable rtas_call() earlier Althought RTAS is instantiated when we enter the kernel, we can't actually call into it until we know its entry point address. Currently we grab that in rtas_initialize(), however that's quite late in the boot sequence. To enable rtas_call() earlier, we can grab the RTAS entry etc. values while we're scanning the flattened device tree. There's existing code to retrieve the values from /chosen, however we don't store them there anymore, so remove that code. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 23 +++++------------------ arch/powerpc/kernel/rtas.c | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 18 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index efed4bc..ce02c05 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1125,24 +1125,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node, tce_alloc_end = *lprop; #endif -#ifdef CONFIG_PPC_RTAS - /* To help early debugging via the front panel, we retrieve a minimal - * set of RTAS infos now if available - */ - { - u64 *basep, *entryp, *sizep; - - basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); - entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); - sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL); - if (basep && entryp && sizep) { - rtas.base = *basep; - rtas.entry = *entryp; - rtas.size = *sizep; - } - } -#endif /* CONFIG_PPC_RTAS */ - #ifdef CONFIG_KEXEC lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); if (lprop) @@ -1327,6 +1309,11 @@ void __init early_init_devtree(void *params) /* Setup flat device-tree pointer */ initial_boot_params = params; +#ifdef CONFIG_PPC_RTAS + /* Some machines might need RTAS info for debugging, grab it now. */ + of_scan_flat_dt(early_init_dt_scan_rtas, NULL); +#endif + /* Retrieve various informations from the /chosen node of the * device-tree, including the platform type, initrd location and * size, TCE reserve, and more ... diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index b6aed76..061d8af 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -801,3 +801,25 @@ void __init rtas_initialize(void) rtas_last_error_token = rtas_token("rtas-last-error"); #endif } + +int __init early_init_dt_scan_rtas(unsigned long node, + const char *uname, int depth, void *data) +{ + u32 *basep, *entryp, *sizep; + + if (depth != 1 || strcmp(uname, "rtas") != 0) + return 0; + + basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL); + entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); + sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); + + if (basep && entryp && sizep) { + rtas.base = *basep; + rtas.entry = *entryp; + rtas.size = *sizep; + } + + /* break now */ + return 1; +} -- cgit v1.1 From cc46bb98c0d52695f26c239de701050660f5c79f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 23 Jun 2006 18:20:16 +1000 Subject: [POWERPC] Add udbg support for RTAS console Add udbg hooks for the RTAS console, based on the RTAS put-term-char and get-term-char calls. Along with my previous patches, this should enable debugging as soon as early_init_dt_scan_rtas() is called. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/rtas.c | 72 +++++++++++++++++++++++++++++++++++++++++++++- arch/powerpc/kernel/udbg.c | 7 +++-- 2 files changed, 76 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 061d8af..4a4cb55 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -109,11 +109,71 @@ static void call_rtas_display_status_delay(char c) } } -void __init udbg_init_rtas(void) +void __init udbg_init_rtas_panel(void) { udbg_putc = call_rtas_display_status_delay; } +#ifdef CONFIG_UDBG_RTAS_CONSOLE + +/* If you think you're dying before early_init_dt_scan_rtas() does its + * work, you can hard code the token values for your firmware here and + * hardcode rtas.base/entry etc. + */ +static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE; +static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE; + +static void udbg_rtascon_putc(char c) +{ + int tries; + + if (!rtas.base) + return; + + /* Add CRs before LFs */ + if (c == '\n') + udbg_rtascon_putc('\r'); + + /* if there is more than one character to be displayed, wait a bit */ + for (tries = 0; tries < 16; tries++) { + if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0) + break; + udelay(1000); + } +} + +static int udbg_rtascon_getc_poll(void) +{ + int c; + + if (!rtas.base) + return -1; + + if (rtas_call(rtas_getchar_token, 0, 2, &c)) + return -1; + + return c; +} + +static int udbg_rtascon_getc(void) +{ + int c; + + while ((c = udbg_rtascon_getc_poll()) == -1) + ; + + return c; +} + + +void __init udbg_init_rtas_console(void) +{ + udbg_putc = udbg_rtascon_putc; + udbg_getc = udbg_rtascon_getc; + udbg_getc_poll = udbg_rtascon_getc_poll; +} +#endif /* CONFIG_UDBG_RTAS_CONSOLE */ + void rtas_progress(char *s, unsigned short hex) { struct device_node *root; @@ -820,6 +880,16 @@ int __init early_init_dt_scan_rtas(unsigned long node, rtas.size = *sizep; } +#ifdef CONFIG_UDBG_RTAS_CONSOLE + basep = of_get_flat_dt_prop(node, "put-term-char", NULL); + if (basep) + rtas_putchar_token = *basep; + + basep = of_get_flat_dt_prop(node, "get-term-char", NULL); + if (basep) + rtas_getchar_token = *basep; +#endif + /* break now */ return 1; } diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 67d9fd9..759afd5 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -34,9 +34,12 @@ void __init udbg_early_init(void) #elif defined(CONFIG_PPC_EARLY_DEBUG_G5) /* For use on Apple G5 machines */ udbg_init_pmac_realmode(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS) +#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL) /* RTAS panel debug */ - udbg_init_rtas(); + udbg_init_rtas_panel(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE) + /* RTAS console debug */ + udbg_init_rtas_console(); #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) /* Maple real mode debug */ udbg_init_maple_realmode(); -- cgit v1.1 From c0ce7d0886cf0c2579c604eac41a7e125bc0e96d Mon Sep 17 00:00:00 2001 From: David Wilder Date: Fri, 23 Jun 2006 15:29:34 -0700 Subject: [POWERPC] Add the use of the firmware soft-reset-nmi to kdump. With this patch, kdump uses the firmware soft-reset NMI for two purposes: 1) Initiate the kdump (take a crash dump) by issuing a soft-reset. 2) Break a CPU out of a deadlock condition that is detected during kdump processing. When a soft-reset is initiated each CPU will enter system_reset_exception() and set its corresponding bit in the global bit-array cpus_in_sr then call die(). When die() finds the CPU's bit set in cpu_in_sr crash_kexec() is called to initiate a crash dump. The first CPU to enter crash_kexec() is called the "crashing CPU". All other CPUs are "secondary CPUs". The secondary CPU's pass through to crash_kexec_secondary() and sleep. The crashing CPU waits for all CPUs to enter via soft-reset then boots the kdump kernel (see crash_soft_reset_check()) When the system crashes due to a panic or exception, crash_kexec() is called by panic() or die(). The crashing CPU sends an IPI to all other CPUs to notify them of the pending shutdown. If a CPU is in a deadlock or hung state with interrupts disabled, the IPI will not be delivered. The result being, that the kdump kernel is not booted. This problem is solved with the use of a firmware generated soft-reset. After the crashing_cpu has issued the IPI, it waits for 10 sec for all CPUs to enter crash_ipi_callback(). A CPU signifies its entry to crash_ipi_callback() by setting its corresponding bit in the cpus_in_crash bit array. After 10 sec, if one or more CPUs have not set their bit in cpus_in_crash we assume that the CPU(s) is deadlocked. The operator is then prompted to generate a soft-reset to break the deadlock. Each CPU enters the soft reset handler as described above. Two conditions must be handled at this point: 1) The system crashed because the operator generated a soft-reset. See 2) The system had crashed before the soft-reset was generated ( in the case of a Panic or oops). The first CPU to enter crash_kexec() uses the state of the kexec_lock to determine this state. If kexec_lock is already held then condition 2 is true and crash_kexec_secondary() is called, else; this CPU is flagged as the crashing CPU, the kexec_lock is acquired and crash_kexec() proceeds as described above. Each additional CPUs responding to the soft-reset will pass through crash_kexec() to kexec_secondary(). All secondary CPUs call crash_ipi_callback() readying them self's for the shutdown. When ready they clear their bit in cpus_in_sr. The crashing CPU waits in kexec_secondary() until all other CPUs have cleared their bits in cpus_in_sr. The kexec kernel boot is then started. Signed-off-by: Haren Myneni Signed-off-by: David Wilder Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/crash.c | 147 +++++++++++++++++++++++++++++++++++--------- arch/powerpc/kernel/traps.c | 27 ++++---- 2 files changed, 132 insertions(+), 42 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index e253a45..b537cfa 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -24,9 +24,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -41,6 +43,7 @@ /* This keeps a track of which one is crashing cpu. */ int crashing_cpu = -1; +static cpumask_t cpus_in_crash = CPU_MASK_NONE; static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, size_t data_len) @@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu) } #ifdef CONFIG_SMP -static atomic_t waiting_for_crash_ipi; +static atomic_t enter_on_soft_reset = ATOMIC_INIT(0); void crash_ipi_callback(struct pt_regs *regs) { int cpu = smp_processor_id(); - if (cpu == crashing_cpu) - return; - if (!cpu_online(cpu)) return; - if (ppc_md.kexec_cpu_down) - ppc_md.kexec_cpu_down(1, 1); - local_irq_disable(); + if (!cpu_isset(cpu, cpus_in_crash)) + crash_save_this_cpu(regs, cpu); + cpu_set(cpu, cpus_in_crash); - crash_save_this_cpu(regs, cpu); - atomic_dec(&waiting_for_crash_ipi); + /* + * Entered via soft-reset - could be the kdump + * process is invoked using soft-reset or user activated + * it if some CPU did not respond to an IPI. + * For soft-reset, the secondary CPU can enter this func + * twice. 1 - using IPI, and 2. soft-reset. + * Tell the kexec CPU that entered via soft-reset and ready + * to go down. + */ + if (cpu_isset(cpu, cpus_in_sr)) { + cpu_clear(cpu, cpus_in_sr); + atomic_inc(&enter_on_soft_reset); + } + + /* + * Starting the kdump boot. + * This barrier is needed to make sure that all CPUs are stopped. + * If not, soft-reset will be invoked to bring other CPUs. + */ + while (!cpu_isset(crashing_cpu, cpus_in_crash)) + cpu_relax(); + + if (ppc_md.kexec_cpu_down) + ppc_md.kexec_cpu_down(1, 1); kexec_smp_wait(); /* NOTREACHED */ } -static void crash_kexec_prepare_cpus(void) +/* + * Wait until all CPUs are entered via soft-reset. + */ +static void crash_soft_reset_check(int cpu) +{ + unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ + + cpu_clear(cpu, cpus_in_sr); + while (atomic_read(&enter_on_soft_reset) != ncpus) + cpu_relax(); +} + + +static void crash_kexec_prepare_cpus(int cpu) { unsigned int msecs; - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ crash_send_ipi(crash_ipi_callback); smp_wmb(); @@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void) /* * FIXME: Until we will have the way to stop other CPUSs reliabally, * the crash CPU will send an IPI and wait for other CPUs to - * respond. If not, proceed the kexec boot even though we failed to - * capture other CPU states. + * respond. * Delay of at least 10 seconds. */ - printk(KERN_ALERT "Sending IPI to other cpus...\n"); + printk(KERN_EMERG "Sending IPI to other cpus...\n"); msecs = 10000; - while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { - barrier(); + while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { + cpu_relax(); mdelay(1); } @@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void) /* * FIXME: In case if we do not get all CPUs, one possibility: ask the * user to do soft reset such that we get all. - * IPI handler is already set by the panic cpu initially. Therefore, - * all cpus could invoke this handler from die() and the panic CPU - * will call machine_kexec() directly from this handler to do - * kexec boot. + * Soft-reset will be used until better mechanism is implemented. + */ + if (cpus_weight(cpus_in_crash) < ncpus) { + printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", + ncpus - cpus_weight(cpus_in_crash)); + printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); + cpus_in_sr = CPU_MASK_NONE; + atomic_set(&enter_on_soft_reset, 0); + while (cpus_weight(cpus_in_crash) < ncpus) + cpu_relax(); + } + /* + * Make sure all CPUs are entered via soft-reset if the kdump is + * invoked using soft-reset. */ - if (atomic_read(&waiting_for_crash_ipi)) - printk(KERN_ALERT "done waiting: %d cpus not responding\n", - atomic_read(&waiting_for_crash_ipi)); + if (cpu_isset(cpu, cpus_in_sr)) + crash_soft_reset_check(cpu); /* Leave the IPI callback set */ } + +/* + * This function will be called by secondary cpus or by kexec cpu + * if soft-reset is activated to stop some CPUs. + */ +void crash_kexec_secondary(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + unsigned long flags; + int msecs = 5; + + local_irq_save(flags); + /* Wait 5ms if the kexec CPU is not entered yet. */ + while (crashing_cpu < 0) { + if (--msecs < 0) { + /* + * Either kdump image is not loaded or + * kdump process is not started - Probably xmon + * exited using 'x'(exit and recover) or + * kexec_should_crash() failed for all running tasks. + */ + cpu_clear(cpu, cpus_in_sr); + local_irq_restore(flags); + return; + } + mdelay(1); + cpu_relax(); + } + if (cpu == crashing_cpu) { + /* + * Panic CPU will enter this func only via soft-reset. + * Wait until all secondary CPUs entered and + * then start kexec boot. + */ + crash_soft_reset_check(cpu); + cpu_set(crashing_cpu, cpus_in_crash); + if (ppc_md.kexec_cpu_down) + ppc_md.kexec_cpu_down(1, 0); + machine_kexec(kexec_crash_image); + /* NOTREACHED */ + } + crash_ipi_callback(regs); +} + #else -static void crash_kexec_prepare_cpus(void) +static void crash_kexec_prepare_cpus(int cpu) { /* * move the secondarys to us so that we can copy @@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void) smp_release_cpus(); } +void crash_kexec_secondary(struct pt_regs *regs) +{ + cpus_in_sr = CPU_MASK_NONE; +} #endif void default_machine_crash_shutdown(struct pt_regs *regs) @@ -199,14 +290,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs) desc->handler->disable(irq); } - if (ppc_md.kexec_cpu_down) - ppc_md.kexec_cpu_down(1, 0); - /* * Make a note of crashing cpu. Will be used in machine_kexec * such that another IPI will not be sent. */ crashing_cpu = smp_processor_id(); - crash_kexec_prepare_cpus(); crash_save_this_cpu(regs, crashing_cpu); + crash_kexec_prepare_cpus(crashing_cpu); + cpu_set(crashing_cpu, cpus_in_crash); + if (ppc_md.kexec_cpu_down) + ppc_md.kexec_cpu_down(1, 0); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 52f5659..fa6bd97 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -52,9 +52,13 @@ #include #include #endif +#include #ifdef CONFIG_PPC64 /* XXX */ #define _IO_BASE pci_io_base +#ifdef CONFIG_KEXEC +cpumask_t cpus_in_sr = CPU_MASK_NONE; +#endif #endif #ifdef CONFIG_DEBUGGER @@ -97,7 +101,7 @@ static DEFINE_SPINLOCK(die_lock); int die(const char *str, struct pt_regs *regs, long err) { - static int die_counter, crash_dump_start = 0; + static int die_counter; if (debugger(regs)) return 1; @@ -137,21 +141,12 @@ int die(const char *str, struct pt_regs *regs, long err) print_modules(); show_regs(regs); bust_spinlocks(0); + spin_unlock_irq(&die_lock); - if (!crash_dump_start && kexec_should_crash(current)) { - crash_dump_start = 1; - spin_unlock_irq(&die_lock); + if (kexec_should_crash(current) || + kexec_sr_activated(smp_processor_id())) crash_kexec(regs); - /* NOTREACHED */ - } - spin_unlock_irq(&die_lock); - if (crash_dump_start) - /* - * Only for soft-reset: Other CPUs will be responded to an IPI - * sent by first kexec CPU. - */ - for(;;) - ; + crash_kexec_secondary(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); @@ -215,6 +210,10 @@ void system_reset_exception(struct pt_regs *regs) return; } +#ifdef CONFIG_KEXEC + cpu_set(smp_processor_id(), cpus_in_sr); +#endif + die("System Reset", regs, SIGABRT); /* Must die if the interrupt is not recoverable */ -- cgit v1.1 From d0b79c54fc14f1f2db64180a21b84146028c598b Mon Sep 17 00:00:00 2001 From: Jimi Xenidis Date: Mon, 26 Jun 2006 04:56:58 -0400 Subject: [POWERPC] Skip the "copy down" of the kernel if it is already at zero. This patch allows the kernel to recognized that it was loaded at zero and the copy down of the image is unnecessary. This is useful for Simulators and kexec models. On a typical 3.8 MiB vmlinux.strip this saves about 2.3 million instructions. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index da09242..f2fab68 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1664,6 +1664,9 @@ _STATIC(__after_prom_start) /* i.e. where we are running */ /* the source addr */ + cmpdi r4,0 /* In some cases the loader may */ + beq .start_here_multiplatform /* have already put us at zero */ + /* so we can skip the copy. */ LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ sub r5,r5,r27 -- cgit v1.1 From 612f02d6d6eadd9c5dde5a5b97b55a3a620d28e5 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 28 Jun 2006 11:49:10 +1000 Subject: [POWERPC] Clean up it_lp_queue.h No more StudlyCaps. Remove from a couple of places it is no longer needed. Use C style comments. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/paca.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f505a88..a0bb354 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include -- cgit v1.1 From a240da35a1087804cbdc3faba949445e64d1eecb Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 28 Jun 2006 11:53:56 +1000 Subject: [POWERPC] Remove unused function call_with_mmu_off Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/misc_64.S | 42 ------------------------------------------ 1 file changed, 42 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e8883d4..6bf4a46 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -101,48 +101,6 @@ _GLOBAL(call___do_IRQ) blr #endif /* CONFIG_IRQSTACKS */ - /* - * To be called by C code which needs to do some operations with MMU - * disabled. Note that interrupts have to be disabled by the caller - * prior to calling us. The code called _MUST_ be in the RMO of course - * and part of the linear mapping as we don't attempt to translate the - * stack pointer at all. The function is called with the stack switched - * to this CPU emergency stack - * - * prototype is void *call_with_mmu_off(void *func, void *data); - * - * the called function is expected to be of the form - * - * void *called(void *data); - */ -_GLOBAL(call_with_mmu_off) - mflr r0 /* get link, save it on stackframe */ - std r0,16(r1) - mr r1,r5 /* save old stack ptr */ - ld r1,PACAEMERGSP(r13) /* get emerg. stack */ - subi r1,r1,STACK_FRAME_OVERHEAD - std r0,16(r1) /* save link on emerg. stack */ - std r5,0(r1) /* save old stack ptr in backchain */ - ld r3,0(r3) /* get to real function ptr (assume same TOC) */ - bl 2f /* we need LR to return, continue at label 2 */ - - ld r0,16(r1) /* we return here from the call, get LR and */ - ld r1,0(r1) /* .. old stack ptr */ - mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */ - mfmsr r4 - ori r4,r4,MSR_IR|MSR_DR - mtspr SPRN_SRR1,r4 - rfid - -2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */ - mr r3,r4 /* get parameter */ - mfmsr r0 - ori r0,r0,MSR_IR|MSR_DR - xori r0,r0,MSR_IR|MSR_DR - mtspr SPRN_SRR1,r0 - rfid - - .section ".toc","aw" PPC64_CACHES: .tc ppc64_caches[TC],ppc64_caches -- cgit v1.1 From 127efeb286bb772019236182a1b4fc14ff2ae00c Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 28 Jun 2006 11:55:49 +1000 Subject: [POWERPC] Consolidate some of kernel/misc*.S There were some common functions (mainly i/o). Also some small white space cleanups and remove a couple of small unused functions. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 3 +- arch/powerpc/kernel/misc.S | 203 ++++++++++++++++++++++++++++++++++++++++++ arch/powerpc/kernel/misc_32.S | 156 -------------------------------- arch/powerpc/kernel/misc_64.S | 181 +------------------------------------ 4 files changed, 209 insertions(+), 334 deletions(-) create mode 100644 arch/powerpc/kernel/misc.S (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 803858e..814f242 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -50,7 +50,8 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds -obj-y += time.o prom.o traps.o setup-common.o udbg.o +obj-y += time.o prom.o traps.o setup-common.o \ + udbg.o misc.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S new file mode 100644 index 0000000..fc23040 --- /dev/null +++ b/arch/powerpc/kernel/misc.S @@ -0,0 +1,203 @@ +/* + * This file contains miscellaneous low-level functions. + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) + * and Paul Mackerras. + * + * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) + * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include + + .text + +#ifdef CONFIG_PPC64 +#define IN_SYNC twi 0,r5,0; isync +#define EIEIO_32 +#define SYNC_64 sync +#else /* CONFIG_PPC32 */ +#define IN_SYNC +#define EIEIO_32 eieio +#define SYNC_64 +#endif +/* + * Returns (address we are running at) - (address we were linked at) + * for use before the text and data are mapped to KERNELBASE. + */ + +_GLOBAL(reloc_offset) + mflr r0 + bl 1f +1: mflr r3 + LOAD_REG_IMMEDIATE(r4,1b) + subf r3,r4,r3 + mtlr r0 + blr + +/* + * add_reloc_offset(x) returns x + reloc_offset(). + */ +_GLOBAL(add_reloc_offset) + mflr r0 + bl 1f +1: mflr r5 + LOAD_REG_IMMEDIATE(r4,1b) + subf r5,r4,r5 + add r3,r3,r5 + mtlr r0 + blr + +/* + * I/O string operations + * + * insb(port, buf, len) + * outsb(port, buf, len) + * insw(port, buf, len) + * outsw(port, buf, len) + * insl(port, buf, len) + * outsl(port, buf, len) + * insw_ns(port, buf, len) + * outsw_ns(port, buf, len) + * insl_ns(port, buf, len) + * outsl_ns(port, buf, len) + * + * The *_ns versions don't do byte-swapping. + */ +_GLOBAL(_insb) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,1 + blelr- +00: lbz r5,0(r3) + eieio + stbu r5,1(r4) + bdnz 00b + IN_SYNC + blr + +_GLOBAL(_outsb) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,1 + blelr- +00: lbzu r5,1(r4) + stb r5,0(r3) + EIEIO_32 + bdnz 00b + SYNC_64 + blr + +_GLOBAL(_insw) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,2 + blelr- +00: lhbrx r5,0,r3 + eieio + sthu r5,2(r4) + bdnz 00b + IN_SYNC + blr + +_GLOBAL(_outsw) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,2 + blelr- +00: lhzu r5,2(r4) + EIEIO_32 + sthbrx r5,0,r3 + bdnz 00b + SYNC_64 + blr + +_GLOBAL(_insl) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,4 + blelr- +00: lwbrx r5,0,r3 + eieio + stwu r5,4(r4) + bdnz 00b + IN_SYNC + blr + +_GLOBAL(_outsl) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,4 + blelr- +00: lwzu r5,4(r4) + stwbrx r5,0,r3 + EIEIO_32 + bdnz 00b + SYNC_64 + blr + +#ifdef CONFIG_PPC32 +_GLOBAL(__ide_mm_insw) +#endif +_GLOBAL(_insw_ns) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,2 + blelr- +00: lhz r5,0(r3) + eieio + sthu r5,2(r4) + bdnz 00b + IN_SYNC + blr + +#ifdef CONFIG_PPC32 +_GLOBAL(__ide_mm_outsw) +#endif +_GLOBAL(_outsw_ns) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,2 + blelr- +00: lhzu r5,2(r4) + sth r5,0(r3) + EIEIO_32 + bdnz 00b + SYNC_64 + blr + +#ifdef CONFIG_PPC32 +_GLOBAL(__ide_mm_insl) +#endif +_GLOBAL(_insl_ns) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,4 + blelr- +00: lwz r5,0(r3) + eieio + stwu r5,4(r4) + bdnz 00b + IN_SYNC + blr + +#ifdef CONFIG_PPC32 +_GLOBAL(__ide_mm_outsl) +#endif +_GLOBAL(_outsl_ns) + cmpwi 0,r5,0 + mtctr r5 + subi r4,r4,4 + blelr- +00: lwzu r5,4(r4) + stw r5,0(r3) + EIEIO_32 + bdnz 00b + SYNC_64 + blr + diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 01d3916..c74774e 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -61,32 +61,6 @@ _GLOBAL(mulhdu) blr /* - * Returns (address we're running at) - (address we were linked at) - * for use before the text and data are mapped to KERNELBASE. - */ -_GLOBAL(reloc_offset) - mflr r0 - bl 1f -1: mflr r3 - LOAD_REG_IMMEDIATE(r4,1b) - subf r3,r4,r3 - mtlr r0 - blr - -/* - * add_reloc_offset(x) returns x + reloc_offset(). - */ -_GLOBAL(add_reloc_offset) - mflr r0 - bl 1f -1: mflr r5 - LOAD_REG_IMMEDIATE(r4,1b) - subf r5,r4,r5 - add r3,r3,r5 - mtlr r0 - blr - -/* * sub_reloc_offset(x) returns x - reloc_offset(). */ _GLOBAL(sub_reloc_offset) @@ -781,136 +755,6 @@ _GLOBAL(atomic_set_mask) blr /* - * I/O string operations - * - * insb(port, buf, len) - * outsb(port, buf, len) - * insw(port, buf, len) - * outsw(port, buf, len) - * insl(port, buf, len) - * outsl(port, buf, len) - * insw_ns(port, buf, len) - * outsw_ns(port, buf, len) - * insl_ns(port, buf, len) - * outsl_ns(port, buf, len) - * - * The *_ns versions don't do byte-swapping. - */ -_GLOBAL(_insb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbz r5,0(r3) - eieio - stbu r5,1(r4) - bdnz 00b - blr - -_GLOBAL(_outsb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbzu r5,1(r4) - stb r5,0(r3) - eieio - bdnz 00b - blr - -_GLOBAL(_insw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhbrx r5,0,r3 - eieio - sthu r5,2(r4) - bdnz 00b - blr - -_GLOBAL(_outsw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) - eieio - sthbrx r5,0,r3 - bdnz 00b - blr - -_GLOBAL(_insl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwbrx r5,0,r3 - eieio - stwu r5,4(r4) - bdnz 00b - blr - -_GLOBAL(_outsl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) - stwbrx r5,0,r3 - eieio - bdnz 00b - blr - -_GLOBAL(__ide_mm_insw) -_GLOBAL(_insw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhz r5,0(r3) - eieio - sthu r5,2(r4) - bdnz 00b - blr - -_GLOBAL(__ide_mm_outsw) -_GLOBAL(_outsw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) - sth r5,0(r3) - eieio - bdnz 00b - blr - -_GLOBAL(__ide_mm_insl) -_GLOBAL(_insl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwz r5,0(r3) - eieio - stwu r5,4(r4) - bdnz 00b - blr - -_GLOBAL(__ide_mm_outsl) -_GLOBAL(_outsl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) - stw r5,0(r3) - eieio - bdnz 00b - blr - -/* * Extended precision shifts. * * Updated to be valid for shift counts from 0 to 63 inclusive. diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 6bf4a46..580891c 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -1,14 +1,12 @@ /* - * arch/powerpc/kernel/misc64.S - * * This file contains miscellaneous low-level functions. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras. * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) - * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) - * + * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version @@ -30,41 +28,10 @@ .text -/* - * Returns (address we are running at) - (address we were linked at) - * for use before the text and data are mapped to KERNELBASE. - */ - -_GLOBAL(reloc_offset) - mflr r0 - bl 1f -1: mflr r3 - LOAD_REG_IMMEDIATE(r4,1b) - subf r3,r4,r3 - mtlr r0 - blr - -/* - * add_reloc_offset(x) returns x + reloc_offset(). - */ -_GLOBAL(add_reloc_offset) - mflr r0 - bl 1f -1: mflr r5 - LOAD_REG_IMMEDIATE(r4,1b) - subf r5,r4,r5 - add r3,r3,r5 - mtlr r0 - blr - _GLOBAL(get_msr) mfmsr r3 blr -_GLOBAL(get_dar) - mfdar r3 - blr - _GLOBAL(get_srr0) mfsrr0 r3 blr @@ -72,10 +39,6 @@ _GLOBAL(get_srr0) _GLOBAL(get_srr1) mfsrr1 r3 blr - -_GLOBAL(get_sp) - mr r3,r1 - blr #ifdef CONFIG_IRQSTACKS _GLOBAL(call_do_softirq) @@ -281,144 +244,6 @@ _GLOBAL(__flush_dcache_icache) bdnz 1b isync blr - -/* - * I/O string operations - * - * insb(port, buf, len) - * outsb(port, buf, len) - * insw(port, buf, len) - * outsw(port, buf, len) - * insl(port, buf, len) - * outsl(port, buf, len) - * insw_ns(port, buf, len) - * outsw_ns(port, buf, len) - * insl_ns(port, buf, len) - * outsl_ns(port, buf, len) - * - * The *_ns versions don't do byte-swapping. - */ -_GLOBAL(_insb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbz r5,0(r3) - eieio - stbu r5,1(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbzu r5,1(r4) - stb r5,0(r3) - bdnz 00b - sync - blr - -_GLOBAL(_insw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhbrx r5,0,r3 - eieio - sthu r5,2(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) - sthbrx r5,0,r3 - bdnz 00b - sync - blr - -_GLOBAL(_insl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwbrx r5,0,r3 - eieio - stwu r5,4(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) - stwbrx r5,0,r3 - bdnz 00b - sync - blr - -/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */ -_GLOBAL(_insw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhz r5,0(r3) - eieio - sthu r5,2(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */ -_GLOBAL(_outsw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) - sth r5,0(r3) - bdnz 00b - sync - blr - -_GLOBAL(_insl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwz r5,0(r3) - eieio - stwu r5,4(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) - stw r5,0(r3) - bdnz 00b - sync - blr /* * identify_cpu and calls setup_cpu @@ -563,6 +388,7 @@ _GLOBAL(real_writeb) blr #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ +#ifdef CONFIG_CPU_FREQ_PMAC64 /* * SCOM access functions for 970 (FX only for now) * @@ -631,6 +457,7 @@ _GLOBAL(scom970_write) /* restore interrupts */ mtmsrd r5,1 blr +#endif /* CONFIG_CPU_FREQ_PMAC64 */ /* -- cgit v1.1 From 9ad494f62444ee37209e85173377c67612e66ef1 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 28 Jun 2006 00:37:45 -0500 Subject: powerpc: minor cleanups for mpc86xx * Remove duplicated cputable entry for 8641 (matches w/7448) * Removed __init from function prototypes in mpc86xx.h * Moved pci fixups into board specific code * Moved mpc86xx_exclude_device to generic mpc86xx pci code * Fixed sparse warnings in mpc86xx_smp.c * Removed board specific header include from asm-powerpc/mpc86xx.h Signed-off-by: Kumar Gala --- arch/powerpc/kernel/cputable.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1c11488..abf7d42 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -722,18 +722,6 @@ struct cpu_spec cpu_specs[] = { .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, - { /* 8641 */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x80040010, - .cpu_name = "8641", - .cpu_features = CPU_FTRS_7447A, - .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .cpu_setup = __setup_cpu_745x - }, - { /* 82xx (8240, 8245, 8260 are all 603e cores) */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00810000, -- cgit v1.1 From c5d56332fd6c2f0c7cf9d1f65416076f2711ea28 Mon Sep 17 00:00:00 2001 From: Zang Roy-r61911 Date: Tue, 13 Jun 2006 15:07:15 +0800 Subject: [POWERPC] Add general support for mpc7448hpc2 (Taiga) platform Add support for Freescale mpc7448 (Taiga) board support Signed-off-by: Roy Zang Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/legacy_serial.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 6e67b5b..3a9b78d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -302,6 +302,17 @@ void __init find_legacy_serial_ports(void) of_node_put(isa); } + /* First fill our array with tsi-bridge ports */ + for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) { + struct device_node *tsi = of_get_parent(np); + if (tsi && !strcmp(tsi->type, "tsi-bridge")) { + index = add_legacy_soc_port(np, np); + if (index >= 0 && np == stdout) + legacy_serial_console = index; + } + of_node_put(tsi); + } + #ifdef CONFIG_PCI /* Next, try to locate PCI ports */ for (np = NULL; (np = of_find_all_nodes(np));) { -- cgit v1.1 From 16e9f994442b599987ce5dd4a5ebe50865e6573d Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 29 Jun 2006 15:07:42 +1000 Subject: [POWERPC] Make lparcfg.c work when both iseries and pseries are selected This also consolidates the initial bits of lparcfg_data() and adds the partition number to the iSeries flattened device tree. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/lparcfg.c | 147 ++++++++++++++++++++++-------------------- 1 file changed, 76 insertions(+), 71 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index c02deaa..73edc3c 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -45,11 +45,9 @@ static struct proc_dir_entry *proc_ppc64_lparcfg; #define LPARCFG_BUFF_SIZE 4096 -#ifdef CONFIG_PPC_ISERIES - /* - * For iSeries legacy systems, the PPA purr function is available from the - * emulated_time_base field in the paca. + * Track sum of all purrs across all processors. This is used to further + * calculate usage values by different applications */ static unsigned long get_purr(void) { @@ -57,48 +55,31 @@ static unsigned long get_purr(void) int cpu; for_each_possible_cpu(cpu) { - sum_purr += lppaca[cpu].emulated_time_base; + if (firmware_has_feature(FW_FEATURE_ISERIES)) + sum_purr += lppaca[cpu].emulated_time_base; + else { + struct cpu_usage *cu; -#ifdef PURR_DEBUG - printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n", - cpu, lppaca[cpu].emulated_time_base); -#endif + cu = &per_cpu(cpu_usage_array, cpu); + sum_purr += cu->current_tb; + } } return sum_purr; } -#define lparcfg_write NULL +#ifdef CONFIG_PPC_ISERIES /* * Methods used to fetch LPAR data when running on an iSeries platform. */ -static int lparcfg_data(struct seq_file *m, void *v) +static int iseries_lparcfg_data(struct seq_file *m, void *v) { - unsigned long pool_id, lp_index; + unsigned long pool_id; int shared, entitled_capacity, max_entitled_capacity; int processors, max_processors; unsigned long purr = get_purr(); - seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); - shared = (int)(get_lppaca()->shared_proc); - seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n", - e2a(xItExtVpdPanel.mfgID[2]), - e2a(xItExtVpdPanel.mfgID[3]), - e2a(xItExtVpdPanel.systemSerial[1]), - e2a(xItExtVpdPanel.systemSerial[2]), - e2a(xItExtVpdPanel.systemSerial[3]), - e2a(xItExtVpdPanel.systemSerial[4]), - e2a(xItExtVpdPanel.systemSerial[5])); - - seq_printf(m, "system_type=%c%c%c%c\n", - e2a(xItExtVpdPanel.machineType[0]), - e2a(xItExtVpdPanel.machineType[1]), - e2a(xItExtVpdPanel.machineType[2]), - e2a(xItExtVpdPanel.machineType[3])); - - lp_index = HvLpConfig_getLpIndex(); - seq_printf(m, "partition_id=%d\n", (int)lp_index); seq_printf(m, "system_active_processors=%d\n", (int)HvLpConfig_getSystemPhysicalProcessors()); @@ -137,6 +118,14 @@ static int lparcfg_data(struct seq_file *m, void *v) return 0; } + +#else /* CONFIG_PPC_ISERIES */ + +static int iseries_lparcfg_data(struct seq_file *m, void *v) +{ + return 0; +} + #endif /* CONFIG_PPC_ISERIES */ #ifdef CONFIG_PPC_PSERIES @@ -213,22 +202,6 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) log_plpar_hcall_return(rc, "H_PIC"); } -/* Track sum of all purrs across all processors. This is used to further */ -/* calculate usage values by different applications */ - -static unsigned long get_purr(void) -{ - unsigned long sum_purr = 0; - int cpu; - struct cpu_usage *cu; - - for_each_possible_cpu(cpu) { - cu = &per_cpu(cpu_usage_array, cpu); - sum_purr += cu->current_tb; - } - return sum_purr; -} - #define SPLPAR_CHARACTERISTICS_TOKEN 20 #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) @@ -333,35 +306,13 @@ static int lparcfg_count_active_processors(void) return count; } -static int lparcfg_data(struct seq_file *m, void *v) +static int pseries_lparcfg_data(struct seq_file *m, void *v) { int partition_potential_processors; int partition_active_processors; - struct device_node *rootdn; - const char *model = ""; - const char *system_id = ""; - unsigned int *lp_index_ptr, lp_index = 0; struct device_node *rtas_node; int *lrdrp = NULL; - rootdn = find_path_device("/"); - if (rootdn) { - model = get_property(rootdn, "model", NULL); - system_id = get_property(rootdn, "system-id", NULL); - lp_index_ptr = (unsigned int *) - get_property(rootdn, "ibm,partition-no", NULL); - if (lp_index_ptr) - lp_index = *lp_index_ptr; - } - - seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); - - seq_printf(m, "serial_number=%s\n", system_id); - - seq_printf(m, "system_type=%s\n", model); - - seq_printf(m, "partition_id=%d\n", (int)lp_index); - rtas_node = find_path_device("/rtas"); if (rtas_node) lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", @@ -549,8 +500,61 @@ out: return retval; } +#else /* CONFIG_PPC_PSERIES */ + +static int pseries_lparcfg_data(struct seq_file *m, void *v) +{ + return 0; +} + +static ssize_t lparcfg_write(struct file *file, const char __user * buf, + size_t count, loff_t * off) +{ + return count; +} + #endif /* CONFIG_PPC_PSERIES */ +static int lparcfg_data(struct seq_file *m, void *v) +{ + struct device_node *rootdn; + const char *model = ""; + const char *system_id = ""; + const char *tmp; + unsigned int *lp_index_ptr, lp_index = 0; + + seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); + + rootdn = find_path_device("/"); + if (rootdn) { + tmp = get_property(rootdn, "model", NULL); + if (tmp) { + model = tmp; + /* Skip "IBM," - see platforms/iseries/dt.c */ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + model += 4; + } + tmp = get_property(rootdn, "system-id", NULL); + if (tmp) { + system_id = tmp; + /* Skip "IBM," - see platforms/iseries/dt.c */ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + system_id += 4; + } + lp_index_ptr = (unsigned int *) + get_property(rootdn, "ibm,partition-no", NULL); + if (lp_index_ptr) + lp_index = *lp_index_ptr; + } + seq_printf(m, "serial_number=%s\n", system_id); + seq_printf(m, "system_type=%s\n", model); + seq_printf(m, "partition_id=%d\n", (int)lp_index); + + if (firmware_has_feature(FW_FEATURE_ISERIES)) + return iseries_lparcfg_data(m, v); + return pseries_lparcfg_data(m, v); +} + static int lparcfg_open(struct inode *inode, struct file *file) { return single_open(file, lparcfg_data, NULL); @@ -569,7 +573,8 @@ int __init lparcfg_init(void) mode_t mode = S_IRUSR | S_IRGRP | S_IROTH; /* Allow writing if we have FW_FEATURE_SPLPAR */ - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + if (firmware_has_feature(FW_FEATURE_SPLPAR) && + !firmware_has_feature(FW_FEATURE_ISERIES)) { lparcfg_fops.write = lparcfg_write; mode |= S_IWUSR; } -- cgit v1.1 From 33dbcf72f607f5da791402e161feaf1ccf5d5be4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Jun 2006 13:18:53 +1000 Subject: [POWERPC] Make sure smp_processor_id works very early in boot There's a small period early in boot where we don't know which cpu we're running on. That's ok, except that it means we have no paca, or more correctly that our paca pointer points somewhere random. So that we can safely call things like smp_processor_id(), we need a paca, so just assume we're on cpu 0. No code should _write_ to the paca before we've set the correct one up. We setup the proper paca after we've scanned the flat device tree in early_setup(), so there's no need to do it again in start_here_common. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 11 ----------- arch/powerpc/kernel/setup_64.c | 3 +++ 2 files changed, 3 insertions(+), 11 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index f2fab68..8cfd040 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1583,9 +1583,6 @@ _GLOBAL(__start_initialization_multiplatform) /* Setup some critical 970 SPRs before switching MMU off */ bl .__970_cpu_preinit - /* cpu # */ - li r24,0 - /* Switch off MMU if not already */ LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) add r4,r4,r30 @@ -1946,14 +1943,6 @@ _STATIC(start_here_common) li r3,0 bl .do_cpu_ftr_fixups - LOAD_REG_IMMEDIATE(r26, boot_cpuid) - lwz r26,0(r26) - - LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */ - mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ - add r13,r13,r24 /* for this processor. */ - mtspr SPRN_SPRG3,r13 - /* ptr to current */ LOAD_REG_IMMEDIATE(r4, init_task) std r4,PACACURRENT(r13) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a2fb2e6..175539c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -177,6 +177,9 @@ void __init setup_paca(int cpu) void __init early_setup(unsigned long dt_ptr) { + /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ + setup_paca(0); + /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); -- cgit v1.1 From 339d76c54336443f5050b00172beb675f35e3be0 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 29 Jun 2006 17:12:30 +1000 Subject: [POWERPC] Use little-endian bit from firmware ibm,pa-features property Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ce02c05..320c913 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -953,6 +953,7 @@ static struct ibm_pa_feature { /* put this back once we know how to test if firmware does 64k IO */ {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, #endif + {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, }; static void __init check_cpu_pa_features(unsigned long node) -- cgit v1.1