diff options
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 65 | ||||
-rw-r--r-- | arch/s390/include/asm/ccwdev.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/ccwgroup.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/cio.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 225 | ||||
-rw-r--r-- | arch/s390/include/asm/system.h | 196 | ||||
-rw-r--r-- | arch/s390/include/asm/types.h | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/unistd.h | 6 |
9 files changed, 279 insertions, 233 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 5c5ba10..d9db138 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -9,7 +9,7 @@ * * Atomic operations that C can't guarantee us. * Useful for resource counting etc. - * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. + * s390 uses 'Compare And Swap' for atomicity in SMP environment. * */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 2e05972..e1c8f3a 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -742,18 +742,42 @@ static inline int sched_find_first_bit(unsigned long *b) * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 */ -#define ext2_set_bit(nr, addr) \ - __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) -#define ext2_test_bit(nr, addr) \ - test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) - -static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size) +static inline void __set_bit_le(unsigned long nr, void *addr) +{ + __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline void __clear_bit_le(unsigned long nr, void *addr) +{ + __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int __test_and_set_bit_le(unsigned long nr, void *addr) +{ + return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int test_and_set_bit_le(unsigned long nr, void *addr) +{ + return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int __test_and_clear_bit_le(unsigned long nr, void *addr) +{ + return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int test_and_clear_bit_le(unsigned long nr, void *addr) +{ + return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int test_bit_le(unsigned long nr, const void *addr) +{ + return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); +} + +static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) { unsigned long bytes, bits; @@ -764,7 +788,7 @@ static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size) return (bits < size) ? bits : size; } -static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, +static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, unsigned long offset) { unsigned long *addr = vaddr, *p; @@ -790,11 +814,10 @@ static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, size -= __BITOPS_WORDSIZE; p++; } - return offset + ext2_find_first_zero_bit(p, size); + return offset + find_first_zero_bit_le(p, size); } -static inline unsigned long ext2_find_first_bit(void *vaddr, - unsigned long size) +static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) { unsigned long bytes, bits; @@ -805,7 +828,7 @@ static inline unsigned long ext2_find_first_bit(void *vaddr, return (bits < size) ? bits : size; } -static inline int ext2_find_next_bit(void *vaddr, unsigned long size, +static inline int find_next_bit_le(void *vaddr, unsigned long size, unsigned long offset) { unsigned long *addr = vaddr, *p; @@ -831,10 +854,14 @@ static inline int ext2_find_next_bit(void *vaddr, unsigned long size, size -= __BITOPS_WORDSIZE; p++; } - return offset + ext2_find_first_bit(p, size); + return offset + find_first_bit_le(p, size); } -#include <asm-generic/bitops/minix.h> +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit_le(nr, addr) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit_le(nr, addr) + #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index ff6f62e..623f2fb 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -112,7 +112,6 @@ enum uc_todo { /** * struct ccw driver - device driver for channel attached devices - * @owner: owning module * @ids: ids supported by this driver * @probe: function called on probe * @remove: function called on remove @@ -128,10 +127,8 @@ enum uc_todo { * @restore: callback for restoring after hibernation * @uc_handler: callback for unit check handler * @driver: embedded device driver structure - * @name: device driver name */ struct ccw_driver { - struct module *owner; struct ccw_device_id *ids; int (*probe) (struct ccw_device *); void (*remove) (struct ccw_device *); @@ -147,7 +144,6 @@ struct ccw_driver { int (*restore)(struct ccw_device *); enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); struct device_driver driver; - char *name; }; extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index c79c1e7..f2ea2c5 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -29,8 +29,6 @@ struct ccwgroup_device { /** * struct ccwgroup_driver - driver for ccw group devices - * @owner: driver owner - * @name: driver name * @max_slaves: maximum number of slave devices * @driver_id: unique id * @probe: function called on probe @@ -46,8 +44,6 @@ struct ccwgroup_device { * @driver: embedded driver structure */ struct ccwgroup_driver { - struct module *owner; - char *name; int max_slaves; unsigned long driver_id; diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index e34347d..fc50a334 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -183,7 +183,7 @@ struct esw3 { * The irb that is handed to the device driver when an interrupt occurs. For * solicited interrupts, the common I/O layer already performs checks whether * a field is valid; a field not being valid is always passed as %0. - * If a unit check occured, @ecw may contain sense data; this is retrieved + * If a unit check occurred, @ecw may contain sense data; this is retrieved * by the common I/O layer itself if the device doesn't support concurrent * sense (so that the device driver never needs to perform basic sene itself). * For unsolicited interrupts, the irb is passed as-is (expect for sense data, diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h new file mode 100644 index 0000000..7488e52 --- /dev/null +++ b/arch/s390/include/asm/cmpxchg.h @@ -0,0 +1,225 @@ +/* + * Copyright IBM Corp. 1999, 2011 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + */ + +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <linux/types.h> + +extern void __xchg_called_with_bad_pointer(void); + +static inline unsigned long __xchg(unsigned long x, void *ptr, int size) +{ + unsigned long addr, old; + int shift; + + switch (size) { + case 1: + addr = (unsigned long) ptr; + shift = (3 ^ (addr & 3)) << 3; + addr ^= addr & 3; + asm volatile( + " l %0,%4\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%4\n" + " jl 0b\n" + : "=&d" (old), "=Q" (*(int *) addr) + : "d" (x << shift), "d" (~(255 << shift)), + "Q" (*(int *) addr) : "memory", "cc", "0"); + return old >> shift; + case 2: + addr = (unsigned long) ptr; + shift = (2 ^ (addr & 2)) << 3; + addr ^= addr & 2; + asm volatile( + " l %0,%4\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%4\n" + " jl 0b\n" + : "=&d" (old), "=Q" (*(int *) addr) + : "d" (x << shift), "d" (~(65535 << shift)), + "Q" (*(int *) addr) : "memory", "cc", "0"); + return old >> shift; + case 4: + asm volatile( + " l %0,%3\n" + "0: cs %0,%2,%3\n" + " jl 0b\n" + : "=&d" (old), "=Q" (*(int *) ptr) + : "d" (x), "Q" (*(int *) ptr) + : "memory", "cc"); + return old; +#ifdef CONFIG_64BIT + case 8: + asm volatile( + " lg %0,%3\n" + "0: csg %0,%2,%3\n" + " jl 0b\n" + : "=&d" (old), "=m" (*(long *) ptr) + : "d" (x), "Q" (*(long *) ptr) + : "memory", "cc"); + return old; +#endif /* CONFIG_64BIT */ + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\ + __ret; \ +}) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +#define __HAVE_ARCH_CMPXCHG + +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long __cmpxchg(void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long addr, prev, tmp; + int shift; + + switch (size) { + case 1: + addr = (unsigned long) ptr; + shift = (3 ^ (addr & 3)) << 3; + addr ^= addr & 3; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) + : "d" (old << shift), "d" (new << shift), + "d" (~(255 << shift)), "Q" (*(int *) ptr) + : "memory", "cc"); + return prev >> shift; + case 2: + addr = (unsigned long) ptr; + shift = (2 ^ (addr & 2)) << 3; + addr ^= addr & 2; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) + : "d" (old << shift), "d" (new << shift), + "d" (~(65535 << shift)), "Q" (*(int *) ptr) + : "memory", "cc"); + return prev >> shift; + case 4: + asm volatile( + " cs %0,%3,%1\n" + : "=&d" (prev), "=Q" (*(int *) ptr) + : "0" (old), "d" (new), "Q" (*(int *) ptr) + : "memory", "cc"); + return prev; +#ifdef CONFIG_64BIT + case 8: + asm volatile( + " csg %0,%3,%1\n" + : "=&d" (prev), "=Q" (*(long *) ptr) + : "0" (old), "d" (new), "Q" (*(long *) ptr) + : "memory", "cc"); + return prev; +#endif /* CONFIG_64BIT */ + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +#ifdef CONFIG_64BIT +#define cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ +}) +#else /* CONFIG_64BIT */ +static inline unsigned long long __cmpxchg64(void *ptr, + unsigned long long old, + unsigned long long new) +{ + register_pair rp_old = {.pair = old}; + register_pair rp_new = {.pair = new}; + + asm volatile( + " cds %0,%2,%1" + : "+&d" (rp_old), "=Q" (ptr) + : "d" (rp_new), "Q" (ptr) + : "cc"); + return rp_old.pair; +} +#define cmpxchg64(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) +#endif /* CONFIG_64BIT */ + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + case 2: + case 4: +#ifdef CONFIG_64BIT + case 8: +#endif + return __cmpxchg(ptr, old, new, size); + default: + return __cmpxchg_local_generic(ptr, old, new, size); + } + + return old; +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n)) + +#endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 8f8d759..d382629 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -14,6 +14,7 @@ #include <asm/setup.h> #include <asm/processor.h> #include <asm/lowcore.h> +#include <asm/cmpxchg.h> #ifdef __KERNEL__ @@ -120,161 +121,6 @@ extern int memcpy_real(void *, void *, size_t); #define nop() asm volatile("nop") -#define xchg(ptr,x) \ -({ \ - __typeof__(*(ptr)) __ret; \ - __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ - __ret; \ -}) - -extern void __xchg_called_with_bad_pointer(void); - -static inline unsigned long __xchg(unsigned long x, void * ptr, int size) -{ - unsigned long addr, old; - int shift; - - switch (size) { - case 1: - addr = (unsigned long) ptr; - shift = (3 ^ (addr & 3)) << 3; - addr ^= addr & 3; - asm volatile( - " l %0,%4\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%4\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) addr) - : "d" (x << shift), "d" (~(255 << shift)), - "Q" (*(int *) addr) : "memory", "cc", "0"); - return old >> shift; - case 2: - addr = (unsigned long) ptr; - shift = (2 ^ (addr & 2)) << 3; - addr ^= addr & 2; - asm volatile( - " l %0,%4\n" - "0: lr 0,%0\n" - " nr 0,%3\n" - " or 0,%2\n" - " cs %0,0,%4\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) addr) - : "d" (x << shift), "d" (~(65535 << shift)), - "Q" (*(int *) addr) : "memory", "cc", "0"); - return old >> shift; - case 4: - asm volatile( - " l %0,%3\n" - "0: cs %0,%2,%3\n" - " jl 0b\n" - : "=&d" (old), "=Q" (*(int *) ptr) - : "d" (x), "Q" (*(int *) ptr) - : "memory", "cc"); - return old; -#ifdef __s390x__ - case 8: - asm volatile( - " lg %0,%3\n" - "0: csg %0,%2,%3\n" - " jl 0b\n" - : "=&d" (old), "=m" (*(long *) ptr) - : "d" (x), "Q" (*(long *) ptr) - : "memory", "cc"); - return old; -#endif /* __s390x__ */ - } - __xchg_called_with_bad_pointer(); - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) - -extern void __cmpxchg_called_with_bad_pointer(void); - -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - unsigned long addr, prev, tmp; - int shift; - - switch (size) { - case 1: - addr = (unsigned long) ptr; - shift = (3 ^ (addr & 3)) << 3; - addr ^= addr & 3; - asm volatile( - " l %0,%2\n" - "0: nr %0,%5\n" - " lr %1,%0\n" - " or %0,%3\n" - " or %1,%4\n" - " cs %0,%1,%2\n" - " jnl 1f\n" - " xr %1,%0\n" - " nr %1,%5\n" - " jnz 0b\n" - "1:" - : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) - : "d" (old << shift), "d" (new << shift), - "d" (~(255 << shift)), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev >> shift; - case 2: - addr = (unsigned long) ptr; - shift = (2 ^ (addr & 2)) << 3; - addr ^= addr & 2; - asm volatile( - " l %0,%2\n" - "0: nr %0,%5\n" - " lr %1,%0\n" - " or %0,%3\n" - " or %1,%4\n" - " cs %0,%1,%2\n" - " jnl 1f\n" - " xr %1,%0\n" - " nr %1,%5\n" - " jnz 0b\n" - "1:" - : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) - : "d" (old << shift), "d" (new << shift), - "d" (~(65535 << shift)), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev >> shift; - case 4: - asm volatile( - " cs %0,%3,%1\n" - : "=&d" (prev), "=Q" (*(int *) ptr) - : "0" (old), "d" (new), "Q" (*(int *) ptr) - : "memory", "cc"); - return prev; -#ifdef __s390x__ - case 8: - asm volatile( - " csg %0,%3,%1\n" - : "=&d" (prev), "=Q" (*(long *) ptr) - : "0" (old), "d" (new), "Q" (*(long *) ptr) - : "memory", "cc"); - return prev; -#endif /* __s390x__ */ - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking @@ -353,46 +199,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __ctl_load(__dummy, cr, cr); \ }) -#include <linux/irqflags.h> - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 1: - case 2: - case 4: -#ifdef __s390x__ - case 8: -#endif - return __cmpxchg(ptr, old, new, size); - default: - return __cmpxchg_local_generic(ptr, old, new, size); - } - - return old; -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#ifdef __s390x__ -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - /* * Use to set psw mask except for the first byte which * won't be changed by this function. diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index 04d6b95..eeb52cc 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h @@ -30,14 +30,6 @@ typedef __signed__ long saddr_t; #ifndef __ASSEMBLY__ -typedef u64 dma64_addr_t; -#ifdef __s390x__ -/* DMA addresses come in 32-bit and 64-bit flavours. */ -typedef u64 dma_addr_t; -#else -typedef u32 dma_addr_t; -#endif - #ifndef __s390x__ typedef union { unsigned long long pair; diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 1049ef2..e821525 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -272,7 +272,11 @@ #define __NR_fanotify_init 332 #define __NR_fanotify_mark 333 #define __NR_prlimit64 334 -#define NR_syscalls 335 +#define __NR_name_to_handle_at 335 +#define __NR_open_by_handle_at 336 +#define __NR_clock_adjtime 337 +#define __NR_syncfs 338 +#define NR_syscalls 339 /* * There are some system calls that are not present on 64 bit, some |