From e92310a930462c6e1611f35453f57357c42bde14 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 30 Jul 2005 04:18:00 -0400 Subject: [ACPI] fix IA64 build warning Signed-off-by: Andrew Morton Signed-off-by: Len Brown --- include/asm-ia64/acpi-ext.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h index 9271d74..56d2ddc 100644 --- a/include/asm-ia64/acpi-ext.h +++ b/include/asm-ia64/acpi-ext.h @@ -11,6 +11,7 @@ #define _ASM_IA64_ACPI_EXT_H #include +#include extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); -- cgit v1.1 From a1cddb88920b915eaba10712ecfd0fc698b00a22 Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Wed, 31 Aug 2005 08:05:00 -0700 Subject: [IA64-SGI] Add new vendor-specific SAL calls for: - notifying the PROM of specific features that are supported by the OS. This is used to enable PROM feature if and only if the corresponding feature is implemented in the OS - fetch feature sets that are supported by the current PROM. This allows the OS to selectively enable features when the PROM support is available. Signed-off-by: Jack Steiner Signed-off-by: Tony Luck --- include/asm-ia64/sn/sn_feature_sets.h | 57 +++++++++++++++++++++++++++++++++++ include/asm-ia64/sn/sn_sal.h | 36 ++++++++++++++++------ 2 files changed, 84 insertions(+), 9 deletions(-) create mode 100644 include/asm-ia64/sn/sn_feature_sets.h (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h new file mode 100644 index 0000000..e68a808 --- /dev/null +++ b/include/asm-ia64/sn/sn_feature_sets.h @@ -0,0 +1,57 @@ +#ifndef _ASM_IA64_SN_FEATURE_SETS_H +#define _ASM_IA64_SN_FEATURE_SETS_H + +/* + * SN PROM Features + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. + */ + + +#include +#include + +/* --------------------- PROM Features -----------------------------*/ +extern int sn_prom_feature_available(int id); + +#define MAX_PROM_FEATURE_SETS 2 + +/* + * The following defines features that may or may not be supported by the + * current PROM. The OS uses sn_prom_feature_available(feature) to test for + * the presence of a PROM feature. Down rev (old) PROMs will always test + * "false" for new features. + * + * Use: + * if (sn_prom_feature_available(PRF_FEATURE_XXX)) + * ... + */ + +/* + * Example: feature XXX + */ +#define PRF_FEATURE_XXX 0 + + + +/* --------------------- OS Features -------------------------------*/ + +/* + * The following defines OS features that are optionally present in + * the operating system. + * During boot, PROM is notified of these features via a series of calls: + * + * ia64_sn_set_os_feature(feature1); + * + * Once enabled, a feature cannot be disabled. + * + * By default, features are disabled unless explicitly enabled. + */ +#define OSF_MCA_SLV_TO_OS_INIT_SLV 0 +#define OSF_FEAT_LOG_SBES 1 + +#endif /* _ASM_IA64_SN_FEATURE_SETS_H */ diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 27976d2..231d0d0 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -80,6 +80,9 @@ #define SN_SAL_BTE_RECOVER 0x02000061 #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000062 +#define SN_SAL_GET_PROM_FEATURE_SET 0x02000065 +#define SN_SAL_SET_OS_FEATURE_SET 0x02000066 + /* * Service-specific constants */ @@ -118,8 +121,8 @@ /* * Error Handling Features */ -#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 -#define SAL_ERR_FEAT_LOG_SBES 0x2 +#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete +#define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 @@ -152,12 +155,6 @@ sn_sal_rev(void) } /* - * Specify the minimum PROM revsion required for this kernel. - * Note that they're stored in hex format... - */ -#define SN_SAL_MIN_VERSION 0x0404 - -/* * Returns the master console nasid, if the call fails, return an illegal * value. */ @@ -336,7 +333,7 @@ ia64_sn_plat_cpei_handler(void) } /* - * Set Error Handling Features + * Set Error Handling Features (Obsolete) */ static inline u64 ia64_sn_plat_set_error_handling_features(void) @@ -1100,4 +1097,25 @@ ia64_sn_is_fake_prom(void) return (rv.status == 0); } +static inline int +ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set) +{ + struct ia64_sal_retval rv; + + SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0); + if (rv.status != 0) + return rv.status; + *feature_set = rv.v0; + return 0; +} + +static inline int +ia64_sn_set_os_feature(int feature) +{ + struct ia64_sal_retval rv; + + SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0); + return rv.status; +} + #endif /* _ASM_IA64_SN_SN_SAL_H */ -- cgit v1.1 From a52ac87eb249f5e87f43e1a0adeb1a737f4a2b43 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 7 Sep 2005 14:04:14 +0900 Subject: [IA64] Minor cleanups - remove unnecessary function prototype in irq.h The function prototype for handl_IRQ_event() in include/asm-ia64/irq.h is no longer needed. This patch removes it. Signed-off-by: Kenji Kaneshige Signed-off-by: Tony Luck --- include/asm-ia64/irq.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index bd07d11..585644c 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h @@ -36,8 +36,4 @@ extern void move_irq(int irq); #define move_irq(irq) #endif -struct irqaction; -struct pt_regs; -int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); - #endif /* _ASM_IA64_IRQ_H */ -- cgit v1.1 From 697eaad417f9f2e40f62282e8b396208b72990cf Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 7 Sep 2005 14:06:25 +0900 Subject: [IA64] Minor cleanups - remove CONFIG_ACPI_DEALLOCATE_IRQ The config option 'CONFIG_ACPI_DEALLOCATE_IRQ' is no longer needed. This patch removes it. Signed-off-by: Kenji Kaneshige Signed-off-by: Tony Luck --- include/asm-ia64/iosapic.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index a429fe2..9a18820 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -83,9 +83,7 @@ extern int gsi_to_irq (unsigned int gsi); extern void iosapic_enable_intr (unsigned int vector); extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, unsigned long trigger); -#ifdef CONFIG_ACPI_DEALLOCATE_IRQ extern void iosapic_unregister_intr (unsigned int irq); -#endif extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, unsigned long polarity, unsigned long trigger); -- cgit v1.1 From 9799e4d39a7e2763a614084f6ae6cc936047de70 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 7 Sep 2005 14:08:18 +0900 Subject: [IA64] Minor cleanups - remove unnecessary function prototype in iosapic.h The function prototypes for iosapic_enable_intr() and iosapic_pci_fixup() in include/asm-ia64/iosapic.h are no longer needed. This patch removes them. The original patch has been posted by Satoru Takeuchi. Signed-off-by: Satoru Takeuchi Signed-off-by: Kenji Kaneshige Signed-off-by: Tony Luck --- include/asm-ia64/iosapic.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index 9a18820..20f98f1 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -80,7 +80,6 @@ extern int iosapic_remove (unsigned int gsi_base); #endif /* CONFIG_HOTPLUG */ extern int gsi_to_vector (unsigned int gsi); extern int gsi_to_irq (unsigned int gsi); -extern void iosapic_enable_intr (unsigned int vector); extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, unsigned long trigger); extern void iosapic_unregister_intr (unsigned int irq); @@ -95,7 +94,6 @@ extern int __init iosapic_register_platform_intr (u32 int_type, unsigned long trigger); extern unsigned int iosapic_version (char __iomem *addr); -extern void iosapic_pci_fixup (int); #ifdef CONFIG_NUMA extern void __devinit map_iosapic_to_node (unsigned int, int); #endif -- cgit v1.1 From 085ae41f66657a9655ce832b0a61832a06f0e1dc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 8 Aug 2005 13:19:08 -0700 Subject: [PATCH] Make sparc64 use setup-res.c There were three changes necessary in order to allow sparc64 to use setup-res.c: 1) Sparc64 roots the PCI I/O and MEM address space using parent resources contained in the PCI controller structure. I'm actually surprised no other platforms do this, especially ones like Alpha and PPC{,64}. These resources get linked into the iomem/ioport tree when PCI controllers are probed. So the hierarchy looks like this: iomem --| PCI controller 1 MEM space --| device 1 device 2 etc. PCI controller 2 MEM space --| ... ioport --| PCI controller 1 IO space --| ... PCI controller 2 IO space --| ... You get the idea. The drivers/pci/setup-res.c code allocates using plain iomem_space and ioport_space as the root, so that wouldn't work with the above setup. So I added a pcibios_select_root() that is used to handle this. It uses the PCI controller struct's io_space and mem_space on sparc64, and io{port,mem}_resource on every other platform to keep current behavior. 2) quirk_io_region() is buggy. It takes in raw BUS view addresses and tries to use them as a PCI resource. pci_claim_resource() expects the resource to be fully formed when it gets called. The sparc64 implementation would do the translation but that's absolutely wrong, because if the same resource gets released then re-claimed we'll adjust things twice. So I fixed up quirk_io_region() to do the proper pcibios_bus_to_resource() conversion before passing it on to pci_claim_resource(). 3) I was mistakedly __init'ing the function methods the PCI controller drivers provide on sparc64 to implement some parts of these routines. This was, of course, easy to fix. So we end up with the following, and that nasty SPARC64 makefile ifdef in drivers/pci/Makefile is finally zapped. Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/asm-ia64/pci.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h index dba9f22..ef616fd 100644 --- a/include/asm-ia64/pci.h +++ b/include/asm-ia64/pci.h @@ -156,6 +156,19 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + #define pcibios_scan_all_fns(a, b) 0 #endif /* _ASM_IA64_PCI_H */ -- cgit v1.1 From 0013a85454c281faaf064ccb576e373a2881aac8 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Fri, 9 Sep 2005 20:57:26 +0200 Subject: kbuild: m68k,parisc,ppc,ppc64,s390,xtensa use generic asm-offsets.h support Delete obsoleted parts form arch makefiles and rename to asm-offsets.h Signed-off-by: Sam Ravnborg --- include/asm-ia64/ptrace.h | 2 +- include/asm-ia64/thread_info.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 0bef195..c8766de 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -57,7 +57,7 @@ #include #include -#include +#include /* * Base-2 logarithm of number of pages to allocate per task structure diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 7dc8951..b2c79f0 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -5,7 +5,7 @@ #ifndef _ASM_IA64_THREAD_INFO_H #define _ASM_IA64_THREAD_INFO_H -#include +#include #include #include -- cgit v1.1 From 383f2835eb9afb723af71850037b2f074ac9db60 Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Fri, 9 Sep 2005 13:02:02 -0700 Subject: [PATCH] Prefetch kernel stacks to speed up context switch For architecture like ia64, the switch stack structure is fairly large (currently 528 bytes). For context switch intensive application, we found that significant amount of cache misses occurs in switch_to() function. The following patch adds a hook in the schedule() function to prefetch switch stack structure as soon as 'next' task is determined. This allows maximum overlap in prefetch cache lines for that structure. Signed-off-by: Ken Chen Cc: Ingo Molnar Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ia64/system.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 33256db..635235f 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -275,6 +275,7 @@ extern void ia64_load_extra (struct task_struct *task); */ #define __ARCH_WANT_UNLOCKED_CTXSW +#define ARCH_HAS_PREFETCH_SWITCH_STACK #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) void cpu_idle_wait(void); -- cgit v1.1 From fb1c8f93d869b34cacb8b8932e2b83d96a19d720 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 10 Sep 2005 00:25:56 -0700 Subject: [PATCH] spinlock consolidation This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" ia64 fix Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Grant Grundler Cc: Matthew Wilcox Signed-off-by: Hirokazu Takata Signed-off-by: Mikael Pettersson Signed-off-by: Benoit Boissinot Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ia64/spinlock.h | 69 +++++++++++++++------------------------ include/asm-ia64/spinlock_types.h | 21 ++++++++++++ 2 files changed, 47 insertions(+), 43 deletions(-) create mode 100644 include/asm-ia64/spinlock_types.h (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa..5b78611 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -17,28 +17,20 @@ #include #include -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } -#define spin_lock_init(x) ((x)->lock = 0) +#define __raw_spin_lock_init(x) ((x)->lock = 0) #ifdef ASM_SUPPORTED /* * Try to get the lock. If we fail to get the lock, make a non-standard call to * ia64_spinlock_contention(). We do not use a normal call because that would force all - * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is - * carefully coded to touch only those registers that spin_lock() marks "clobbered". + * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is + * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". */ #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" static inline void -_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) { register volatile unsigned int *ptr asm ("r31") = &lock->lock; @@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) #endif } -#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) +#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) /* Unlock by doing an ordered store and releasing the cacheline with nta */ -static inline void _raw_spin_unlock(spinlock_t *x) { +static inline void __raw_spin_unlock(raw_spinlock_t *x) { barrier(); asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); } #else /* !ASM_SUPPORTED */ -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) -# define _raw_spin_lock(x) \ +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +# define __raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ __u64 ia64_spinlock_val; \ @@ -117,29 +109,20 @@ do { \ } while (ia64_spinlock_val); \ } \ } while (0) -#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) +#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) #endif /* !ASM_SUPPORTED */ -#define spin_is_locked(x) ((x)->lock != 0) -#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) -#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) - -typedef struct { - volatile unsigned int read_counter : 24; - volatile unsigned int write_lock : 8; -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; -#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } +#define __raw_spin_is_locked(x) ((x)->lock != 0) +#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) -#define read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) -#define _raw_read_lock(rw) \ +#define __raw_read_lock(rw) \ do { \ - rwlock_t *__read_lock_ptr = (rw); \ + raw_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -148,14 +131,14 @@ do { \ } \ } while (0) -#define _raw_read_unlock(rw) \ +#define __raw_read_unlock(rw) \ do { \ - rwlock_t *__read_lock_ptr = (rw); \ + raw_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED -#define _raw_write_lock(rw) \ +#define __raw_write_lock(rw) \ do { \ __asm__ __volatile__ ( \ "mov ar.ccv = r0\n" \ @@ -170,7 +153,7 @@ do { \ :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ } while(0) -#define _raw_write_trylock(rw) \ +#define __raw_write_trylock(rw) \ ({ \ register long result; \ \ @@ -182,7 +165,7 @@ do { \ (result == 0); \ }) -static inline void _raw_write_unlock(rwlock_t *x) +static inline void __raw_write_unlock(raw_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define _raw_write_lock(l) \ +#define __raw_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ @@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) } while (ia64_val); \ }) -#define _raw_write_trylock(rw) \ +#define __raw_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ @@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) (ia64_val == 0); \ }) -static inline void _raw_write_unlock(rwlock_t *x) +static inline void __raw_write_unlock(raw_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) #endif /* !ASM_SUPPORTED */ -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h new file mode 100644 index 0000000..474e46f --- /dev/null +++ b/include/asm-ia64/spinlock_types.h @@ -0,0 +1,21 @@ +#ifndef _ASM_IA64_SPINLOCK_TYPES_H +#define _ASM_IA64_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int lock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int read_counter : 31; + volatile unsigned int write_lock : 1; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } + +#endif -- cgit v1.1 From e619ae0b96b6ace6629a6a0c6a5db23861ddaa78 Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Sun, 11 Sep 2005 17:20:14 +1000 Subject: [IA64] MCA/INIT: add an extra thread_info flag Add an extra thread_info flag to indicate the special MCA/INIT stacks. Mainly for debuggers. Signed-off-by: Keith Owens Signed-off-by: Tony Luck --- include/asm-ia64/ptrace.h | 2 +- include/asm-ia64/thread_info.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index c8766de..fc54492 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -119,7 +119,7 @@ struct pt_regs { unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ - /* The following two are valid only if cr_ipsr.cpl > 0: */ + /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index b2c79f0..cf4a950 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -76,6 +76,7 @@ struct thread_info { #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 +#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) @@ -85,6 +86,7 @@ struct thread_info { #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) /* "work to do on user-return" bits */ #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) -- cgit v1.1 From 7f613c7d2203ae137d98fc1c38abc30fd7048637 Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Sun, 11 Sep 2005 17:22:53 +1000 Subject: [PATCH] MCA/INIT: use per cpu stacks The bulk of the change. Use per cpu MCA/INIT stacks. Change the SAL to OS state (sos) to be per process. Do all the assembler work on the MCA/INIT stacks, leaving the original stack alone. Pass per cpu state data to the C handlers for MCA and INIT, which also means changing the mca_drv interfaces slightly. Lots of verification on whether the original stack is usable before converting it to a sleeping process. Signed-off-by: Keith Owens Signed-off-by: Tony Luck --- include/asm-ia64/mca.h | 102 ++++++++++++++++++++++-------------- include/asm-ia64/mca_asm.h | 125 ++++++++++----------------------------------- 2 files changed, 91 insertions(+), 136 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 149ad01..97a28b8 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h @@ -11,8 +11,6 @@ #ifndef _ASM_IA64_MCA_H #define _ASM_IA64_MCA_H -#define IA64_MCA_STACK_SIZE 8192 - #if !defined(__ASSEMBLY__) #include @@ -48,7 +46,8 @@ typedef union cmcv_reg_u { enum { IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, - IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 + IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, + IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, }; /* Information maintained by the MC infrastructure */ @@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { } ia64_mc_info_t; -typedef struct ia64_mca_sal_to_os_state_s { - u64 imsto_os_gp; /* GP of the os registered with the SAL */ - u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ - u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ - u64 imsto_sal_gp; /* GP of the SAL - physical */ - u64 imsto_rendez_state; /* Rendez state information */ - u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going - * back to SAL from OS after MCA handling. - */ - u64 pal_min_state; /* from PAL in r17 */ - u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ -} ia64_mca_sal_to_os_state_t; +/* Handover state from SAL to OS and vice versa, for both MCA and INIT events. + * Besides the handover state, it also contains some saved registers from the + * time of the event. + * Note: mca_asm.S depends on the precise layout of this structure. + */ + +struct ia64_sal_os_state { + /* SAL to OS, must be at offset 0 */ + u64 os_gp; /* GP of the os registered with the SAL, physical */ + u64 pal_proc; /* PAL_PROC entry point, physical */ + u64 sal_proc; /* SAL_PROC entry point, physical */ + u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ + u64 proc_state_param; /* from R18 */ + u64 monarch; /* 1 for a monarch event, 0 for a slave */ + /* common, must follow SAL to OS */ + u64 sal_ra; /* Return address in SAL, physical */ + u64 sal_gp; /* GP of the SAL - physical */ + pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ + u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ + struct task_struct *prev_task; /* previous task, NULL if it is not useful */ + /* Some interrupt registers are not saved in minstate, pt_regs or + * switch_stack. Because MCA/INIT can occur when interrupts are + * disabled, we need to save the additional interrupt registers over + * MCA/INIT and resume. + */ + u64 isr; + u64 ifa; + u64 itir; + u64 iipa; + u64 iim; + u64 iha; + /* OS to SAL, must follow common */ + u64 os_status; /* OS status to SAL, enum below */ + u64 context; /* 0 if return to same context + 1 if return to new context */ +}; enum { IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ @@ -84,35 +107,21 @@ enum { }; enum { + IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */ + IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ +}; + +enum { IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ }; -typedef struct ia64_mca_os_to_sal_state_s { - u64 imots_os_status; /* OS status to SAL as to what happened - * with the MCA handling. - */ - u64 imots_sal_gp; /* GP of the SAL - physical */ - u64 imots_context; /* 0 if return to same context - 1 if return to new context */ - u64 *imots_new_min_state; /* Pointer to structure containing - * new values of registers in the min state - * save area. - */ - u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going - * back to SAL from OS after MCA handling. - */ -} ia64_mca_os_to_sal_state_t; - /* Per-CPU MCA state that is too big for normal per-CPU variables. */ struct ia64_mca_cpu { - u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ - u64 proc_state_dump[512]; - u64 stackframe[32]; - u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ + u64 mca_stack[KERNEL_STACK_SIZE/8]; u64 init_stack[KERNEL_STACK_SIZE/8]; -} __attribute__ ((aligned(16))); +}; /* Array of physical addresses of each CPU's MCA area. */ extern unsigned long __per_cpu_mca[NR_CPUS]; @@ -121,12 +130,29 @@ extern void ia64_mca_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); -extern void ia64_mca_ucmc_handler(void); +extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); +extern void ia64_init_handler(struct pt_regs *, + struct switch_stack *, + struct ia64_sal_os_state *); extern void ia64_monarch_init_handler(void); extern void ia64_slave_init_handler(void); extern void ia64_mca_cmc_vector_setup(void); -extern int ia64_reg_MCA_extension(void*); +extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); extern void ia64_unreg_MCA_extension(void); +extern u64 ia64_get_rnat(u64 *); + +#else /* __ASSEMBLY__ */ + +#define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ +#define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */ +#define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */ +#define IA64_MCA_HALT -3 /* System to be halted by SAL */ + +#define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */ +#define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */ + +#define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */ +#define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */ #endif /* !__ASSEMBLY__ */ #endif /* _ASM_IA64_MCA_H */ diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 836953e..27c9203 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h @@ -8,6 +8,8 @@ * Copyright (C) 2000 David Mosberger-Tang * Copyright (C) 2002 Intel Corp. * Copyright (C) 2002 Jenna Hall + * Copyright (C) 2005 Silicon Graphics, Inc + * Copyright (C) 2005 Keith Owens */ #ifndef _ASM_IA64_MCA_ASM_H #define _ASM_IA64_MCA_ASM_H @@ -207,106 +209,33 @@ ;; /* - * The following offsets capture the order in which the - * RSE related registers from the old context are - * saved onto the new stack frame. + * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel + * stacks, except that the SAL/OS state and a switch_stack are stored near the + * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as + * well as MCA over INIT, each event needs its own SAL/OS state. All entries + * are 16 byte aligned. * - * +-----------------------+ - * |NDIRTY [BSP - BSPSTORE]| - * +-----------------------+ - * | RNAT | - * +-----------------------+ - * | BSPSTORE | - * +-----------------------+ - * | IFS | - * +-----------------------+ - * | PFS | - * +-----------------------+ - * | RSC | - * +-----------------------+ <-------- Bottom of new stack frame + * +---------------------------+ + * | pt_regs | + * +---------------------------+ + * | switch_stack | + * +---------------------------+ + * | SAL/OS state | + * +---------------------------+ + * | 16 byte scratch area | + * +---------------------------+ <-------- SP at start of C MCA handler + * | ..... | + * +---------------------------+ + * | RBS for MCA/INIT handler | + * +---------------------------+ + * | struct task for MCA/INIT | + * +---------------------------+ <-------- Bottom of MCA/INIT stack */ -#define rse_rsc_offset 0 -#define rse_pfs_offset (rse_rsc_offset+0x08) -#define rse_ifs_offset (rse_pfs_offset+0x08) -#define rse_bspstore_offset (rse_ifs_offset+0x08) -#define rse_rnat_offset (rse_bspstore_offset+0x08) -#define rse_ndirty_offset (rse_rnat_offset+0x08) -/* - * rse_switch_context - * - * 1. Save old RSC onto the new stack frame - * 2. Save PFS onto new stack frame - * 3. Cover the old frame and start a new frame. - * 4. Save IFS onto new stack frame - * 5. Save the old BSPSTORE on the new stack frame - * 6. Save the old RNAT on the new stack frame - * 7. Write BSPSTORE with the new backing store pointer - * 8. Read and save the new BSP to calculate the #dirty registers - * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 - */ -#define rse_switch_context(temp,p_stackframe,p_bspstore) \ - ;; \ - mov temp=ar.rsc;; \ - st8 [p_stackframe]=temp,8;; \ - mov temp=ar.pfs;; \ - st8 [p_stackframe]=temp,8; \ - cover ;; \ - mov temp=cr.ifs;; \ - st8 [p_stackframe]=temp,8;; \ - mov temp=ar.bspstore;; \ - st8 [p_stackframe]=temp,8;; \ - mov temp=ar.rnat;; \ - st8 [p_stackframe]=temp,8; \ - mov ar.bspstore=p_bspstore;; \ - mov temp=ar.bsp;; \ - sub temp=temp,p_bspstore;; \ - st8 [p_stackframe]=temp,8;; - -/* - * rse_return_context - * 1. Allocate a zero-sized frame - * 2. Store the number of dirty registers RSC.loadrs field - * 3. Issue a loadrs to insure that any registers from the interrupted - * context which were saved on the new stack frame have been loaded - * back into the stacked registers - * 4. Restore BSPSTORE - * 5. Restore RNAT - * 6. Restore PFS - * 7. Restore IFS - * 8. Restore RSC - * 9. Issue an RFI - */ -#define rse_return_context(psr_mask_reg,temp,p_stackframe) \ - ;; \ - alloc temp=ar.pfs,0,0,0,0; \ - add p_stackframe=rse_ndirty_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - shl temp=temp,16;; \ - mov ar.rsc=temp;; \ - loadrs;; \ - add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ - ld8 temp=[p_stackframe];; \ - mov ar.bspstore=temp;; \ - add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ - ld8 temp=[p_stackframe];; \ - mov ar.rnat=temp;; \ - add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - mov ar.pfs=temp;; \ - add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - mov cr.ifs=temp;; \ - add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ - ld8 temp=[p_stackframe];; \ - mov ar.rsc=temp ; \ - mov temp=psr;; \ - or temp=temp,psr_mask_reg;; \ - mov cr.ipsr=temp;; \ - mov temp=ip;; \ - add temp=0x30,temp;; \ - mov cr.iip=temp;; \ - srlz.i;; \ - rfi;; +#define ALIGN16(x) ((x)&~15) +#define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) +#define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) +#define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) +#define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) #endif /* _ASM_IA64_MCA_ASM_H */ -- cgit v1.1 From 49a28cc8fd26f5317c47a9aeb2bdd1c33e21738e Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Sun, 11 Sep 2005 17:24:42 +1000 Subject: [IA64] MCA/INIT: remove obsolete unwind code Delete the special case unwind code that was only used by the old MCA/INIT handler. Signed-off-by: Keith Owens Signed-off-by: Tony Luck --- include/asm-ia64/unwind.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h index 61426ad..5df0276 100644 --- a/include/asm-ia64/unwind.h +++ b/include/asm-ia64/unwind.h @@ -114,13 +114,6 @@ extern void unw_remove_unwind_table (void *handle); */ extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); -/* - * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have - * be "adjacent" (no state modifications between pt-regs and switch-stack). - */ -extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, - struct pt_regs *pt, struct switch_stack *sw); - extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw); -- cgit v1.1