diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 20:51:44 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 20:51:44 -0700 |
commit | bdfa54dfd9eea001274dbcd622657a904fe43b81 (patch) | |
tree | ab251ab359e519656d7061bbe8db4c7ab355404b | |
parent | 2481bc75283ea10e75d5fb1a8b42af363fc4b45c (diff) | |
parent | a1307bba1adcc9b338511180fa94a54b4c3f534b (diff) | |
download | op-kernel-dev-bdfa54dfd9eea001274dbcd622657a904fe43b81.zip op-kernel-dev-bdfa54dfd9eea001274dbcd622657a904fe43b81.tar.gz |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
"The major change in this merge is the removal of the support for
31-bit kernels. Naturally 31-bit user space will continue to work via
the compat layer.
And then some cleanup, some improvements and bug fixes"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits)
s390/smp: wait until secondaries are active & online
s390/hibernate: fix save and restore of kernel text section
s390/cacheinfo: add missing facility check
s390/syscalls: simplify syscall_get_arch()
s390/irq: enforce correct irqclass_sub_desc array size
s390: remove "64" suffix from mem64.S and swsusp_asm64.S
s390/ipl: cleanup macro usage
s390/ipl: cleanup shutdown_action attributes
s390/ipl: cleanup bin attr usage
s390/uprobes: fix address space annotation
s390: add missing arch_release_task_struct() declaration
s390: make couple of functions and variables static
s390/maccess: improve s390_kernel_write()
s390/maccess: remove potentially broken probe_kernel_write()
s390/watchdog: support for KVM hypervisors and delete pr_info messages
s390/watchdog: enable KEEPALIVE for /dev/watchdog
s390/dasd: remove setting of scheduler from driver
s390/traps: panic() instead of die() on translation exception
s390: remove test_facility(2) (== z/Architecture mode active) checks
s390/cmpxchg: simplify cmpxchg_double
...
119 files changed, 1337 insertions, 7255 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 647c3ec..2938934 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -4,6 +4,5 @@ obj-$(CONFIG_KVM) += kvm/ obj-$(CONFIG_CRYPTO_HW) += crypto/ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ obj-$(CONFIG_APPLDATA_BASE) += appldata/ -obj-$(CONFIG_MATHEMU) += math-emu/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 6321fd8..a5ced5c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -35,7 +35,7 @@ config GENERIC_BUG_RELATIVE_POINTERS def_bool y config ARCH_DMA_ADDR_T_64BIT - def_bool 64BIT + def_bool y config GENERIC_LOCKBREAK def_bool y if SMP && PREEMPT @@ -59,7 +59,7 @@ config PCI_QUIRKS def_bool n config ARCH_SUPPORTS_UPROBES - def_bool 64BIT + def_bool y config S390 def_bool y @@ -111,19 +111,19 @@ config S390 select GENERIC_TIME_VSYSCALL select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 + select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT - select HAVE_BPF_JIT if 64BIT && PACK_STACK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_BPF_JIT if PACK_STACK select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_DEBUG_KMEMLEAK - select HAVE_DYNAMIC_FTRACE if 64BIT - select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_FUNCTION_GRAPH_TRACER if 64BIT - select HAVE_FUNCTION_TRACER if 64BIT + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP @@ -133,7 +133,7 @@ config S390 select HAVE_KERNEL_XZ select HAVE_KPROBES select HAVE_KRETPROBES - select HAVE_KVM if 64BIT + select HAVE_KVM select HAVE_LIVEPATCH select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP @@ -143,7 +143,6 @@ config S390 select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS - select HAVE_UID16 if 32BIT select HAVE_VIRT_CPU_ACCOUNTING select MODULES_USE_ELF_RELA select NO_BOOTMEM @@ -199,18 +198,11 @@ config HAVE_MARCH_Z13_FEATURES choice prompt "Processor type" - default MARCH_G5 - -config MARCH_G5 - bool "System/390 model G5 and G6" - depends on !64BIT - help - Select this to build a 31 bit kernel that works - on all ESA/390 and z/Architecture machines. + default MARCH_Z900 config MARCH_Z900 bool "IBM zSeries model z800 and z900" - select HAVE_MARCH_Z900_FEATURES if 64BIT + select HAVE_MARCH_Z900_FEATURES help Select this to enable optimizations for model z800/z900 (2064 and 2066 series). This will enable some optimizations that are not @@ -218,7 +210,7 @@ config MARCH_Z900 config MARCH_Z990 bool "IBM zSeries model z890 and z990" - select HAVE_MARCH_Z990_FEATURES if 64BIT + select HAVE_MARCH_Z990_FEATURES help Select this to enable optimizations for model z890/z990 (2084 and 2086 series). The kernel will be slightly faster but will not work @@ -226,7 +218,7 @@ config MARCH_Z990 config MARCH_Z9_109 bool "IBM System z9" - select HAVE_MARCH_Z9_109_FEATURES if 64BIT + select HAVE_MARCH_Z9_109_FEATURES help Select this to enable optimizations for IBM System z9 (2094 and 2096 series). The kernel will be slightly faster but will not work @@ -234,7 +226,7 @@ config MARCH_Z9_109 config MARCH_Z10 bool "IBM System z10" - select HAVE_MARCH_Z10_FEATURES if 64BIT + select HAVE_MARCH_Z10_FEATURES help Select this to enable optimizations for IBM System z10 (2097 and 2098 series). The kernel will be slightly faster but will not work @@ -242,7 +234,7 @@ config MARCH_Z10 config MARCH_Z196 bool "IBM zEnterprise 114 and 196" - select HAVE_MARCH_Z196_FEATURES if 64BIT + select HAVE_MARCH_Z196_FEATURES help Select this to enable optimizations for IBM zEnterprise 114 and 196 (2818 and 2817 series). The kernel will be slightly faster but will @@ -250,7 +242,7 @@ config MARCH_Z196 config MARCH_ZEC12 bool "IBM zBC12 and zEC12" - select HAVE_MARCH_ZEC12_FEATURES if 64BIT + select HAVE_MARCH_ZEC12_FEATURES help Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and 2827 series). The kernel will be slightly faster but will not work on @@ -258,7 +250,7 @@ config MARCH_ZEC12 config MARCH_Z13 bool "IBM z13" - select HAVE_MARCH_Z13_FEATURES if 64BIT + select HAVE_MARCH_Z13_FEATURES help Select this to enable optimizations for IBM z13 (2964 series). The kernel will be slightly faster but will not work on older @@ -266,9 +258,6 @@ config MARCH_Z13 endchoice -config MARCH_G5_TUNE - def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT - config MARCH_Z900_TUNE def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT @@ -307,9 +296,6 @@ config TUNE_DEFAULT Tune the generated code for the target processor for which the kernel will be compiled. -config TUNE_G5 - bool "System/390 model G5 and G6" - config TUNE_Z900 bool "IBM zSeries model z800 and z900" @@ -335,18 +321,10 @@ endchoice config 64BIT def_bool y - prompt "64 bit kernel" - help - Select this option if you have an IBM z/Architecture machine - and want to use the 64 bit addressing mode. - -config 32BIT - def_bool y if !64BIT config COMPAT def_bool y prompt "Kernel support for 31 bit emulation" - depends on 64BIT select COMPAT_BINFMT_ELF if BINFMT_ELF select ARCH_WANT_OLD_COMPAT_IPC select COMPAT_OLD_SIGACTION @@ -385,8 +363,7 @@ config NR_CPUS int "Maximum number of CPUs (2-512)" range 2 512 depends on SMP - default "32" if !64BIT - default "64" if 64BIT + default "64" help This allows you to specify the maximum number of CPUs which this kernel will support. The maximum supported value is 512 and the @@ -427,15 +404,6 @@ config SCHED_TOPOLOGY source kernel/Kconfig.preempt -config MATHEMU - def_bool y - prompt "IEEE FPU emulation" - depends on MARCH_G5 - help - This option is required for IEEE compliant floating point arithmetic - on older ESA/390 machines. Say Y unless you know your machine doesn't - need this. - source kernel/Kconfig.hz endmenu @@ -446,7 +414,6 @@ config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_VMEMMAP_ENABLE select SPARSEMEM_VMEMMAP - select SPARSEMEM_STATIC if !64BIT config ARCH_SPARSEMEM_DEFAULT def_bool y @@ -462,7 +429,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y - depends on 64BIT config FORCE_MAX_ZONEORDER int @@ -537,7 +503,6 @@ config QDIO menuconfig PCI bool "PCI support" - depends on 64BIT select HAVE_DMA_ATTRS select PCI_MSI help @@ -607,7 +572,6 @@ config CHSC_SCH config SCM_BUS def_bool y - depends on 64BIT prompt "SCM bus driver" help Bus driver for Storage Class Memory. @@ -629,7 +593,7 @@ menu "Dump support" config CRASH_DUMP bool "kernel crash dumps" - depends on 64BIT && SMP + depends on SMP select KEXEC help Generate crash dump after being started by kexec. @@ -668,7 +632,7 @@ endmenu menu "Power Management" config ARCH_HIBERNATION_POSSIBLE - def_bool y if 64BIT + def_bool y source "kernel/power/Kconfig" @@ -819,7 +783,6 @@ source "arch/s390/kvm/Kconfig" config S390_GUEST def_bool y prompt "s390 support for virtio devices" - depends on 64BIT select TTY select VIRTUALIZATION select VIRTIO diff --git a/arch/s390/Makefile b/arch/s390/Makefile index acb6859..667b1bc 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -13,15 +13,6 @@ # Copyright (C) 1994 by Linus Torvalds # -ifndef CONFIG_64BIT -LD_BFD := elf32-s390 -LDFLAGS := -m elf_s390 -KBUILD_CFLAGS += -m31 -KBUILD_AFLAGS += -m31 -UTS_MACHINE := s390 -STACK_SIZE := 8192 -CHECKFLAGS += -D__s390__ -msize-long -else LD_BFD := elf64-s390 LDFLAGS := -m elf64_s390 KBUILD_AFLAGS_MODULE += -fPIC @@ -31,11 +22,9 @@ KBUILD_AFLAGS += -m64 UTS_MACHINE := s390x STACK_SIZE := 16384 CHECKFLAGS += -D__s390__ -D__s390x__ -endif export LD_BFD -mflags-$(CONFIG_MARCH_G5) := -march=g5 mflags-$(CONFIG_MARCH_Z900) := -march=z900 mflags-$(CONFIG_MARCH_Z990) := -march=z990 mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 @@ -47,7 +36,6 @@ mflags-$(CONFIG_MARCH_Z13) := -march=z13 aflags-y += $(mflags-y) cflags-y += $(mflags-y) -cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5 cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 @@ -104,7 +92,7 @@ KBUILD_AFLAGS += $(aflags-y) OBJCOPYFLAGS := -O binary head-y := arch/s390/kernel/head.o -head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) +head-y += arch/s390/kernel/head64.o # See arch/s390/Kbuild for content of core part of the kernel core-y += arch/s390/ @@ -129,9 +117,7 @@ zfcpdump: $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ vdso_install: -ifeq ($(CONFIG_64BIT),y) $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ -endif $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ archclean: diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index f90d1fc..d478811 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -4,13 +4,11 @@ # create a compressed vmlinux image from the original vmlinux # -BITS := $(if $(CONFIG_64BIT),64,31) - targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 -targets += misc.o piggy.o sizes.h head$(BITS).o +targets += misc.o piggy.o sizes.h head.o -KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 +KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) @@ -19,7 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding) GCOV_PROFILE := n OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) -OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o +OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) @@ -34,8 +32,8 @@ quiet_cmd_sizes = GEN $@ $(obj)/sizes.h: vmlinux $(call if_changed,sizes) -AFLAGS_head$(BITS).o += -I$(obj) -$(obj)/head$(BITS).o: $(obj)/sizes.h +AFLAGS_head.o += -I$(obj) +$(obj)/head.o: $(obj)/sizes.h CFLAGS_misc.o += -I$(obj) $(obj)/misc.o: $(obj)/sizes.h diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head.S index f86a4ee..f86a4ee 100644 --- a/arch/s390/boot/compressed/head64.S +++ b/arch/s390/boot/compressed/head.S diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S deleted file mode 100644 index e8c9e18..0000000 --- a/arch/s390/boot/compressed/head31.S +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Startup glue code to uncompress the kernel - * - * Copyright IBM Corp. 2010 - * - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - */ - -#include <linux/init.h> -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/thread_info.h> -#include <asm/page.h> -#include "sizes.h" - -__HEAD -ENTRY(startup_continue) - basr %r13,0 # get base -.LPG1: - # setup stack - l %r15,.Lstack-.LPG1(%r13) - ahi %r15,-96 - l %r1,.Ldecompress-.LPG1(%r13) - basr %r14,%r1 - # setup registers for memory mover & branch to target - lr %r4,%r2 - l %r2,.Loffset-.LPG1(%r13) - la %r4,0(%r2,%r4) - l %r3,.Lmvsize-.LPG1(%r13) - lr %r5,%r3 - # move the memory mover someplace safe - la %r1,0x200 - mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) - # decompress image is started at 0x11000 - lr %r6,%r2 - br %r1 -mover: - mvcle %r2,%r4,0 - jo mover - br %r6 -mover_end: - - .align 8 -.Lstack: - .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) -.Ldecompress: - .long decompress_kernel -.Loffset: - .long 0x11000 -.Lmvsize: - .long SZ__bss_start diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S index 8e1fb82..747735f 100644 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ b/arch/s390/boot/compressed/vmlinux.lds.S @@ -1,12 +1,7 @@ #include <asm-generic/vmlinux.lds.h> -#ifdef CONFIG_64BIT OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) -#else -OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") -OUTPUT_ARCH(s390:31-bit) -#endif ENTRY(startup) diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 6c5cc6d..ba3b2ae 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h @@ -369,14 +369,10 @@ static inline int crypt_s390_func_available(int func, if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) return 0; - - if (facility_mask & CRYPT_S390_MSA3 && - (!test_facility(2) || !test_facility(76))) + if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76)) return 0; - if (facility_mask & CRYPT_S390_MSA4 && - (!test_facility(2) || !test_facility(77))) + if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) return 0; - switch (func & CRYPT_S390_OP_MASK) { case CRYPT_S390_KM: ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index d4c0d37..24c747a 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -19,13 +19,9 @@ static void diag0c(struct hypfs_diag0c_entry *entry) { asm volatile ( -#ifdef CONFIG_64BIT " sam31\n" " diag %0,%0,0x0c\n" " sam64\n" -#else - " diag %0,%0,0x0c\n" -#endif : /* no output register */ : "a" (entry) : "memory"); diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h index 32a7059..16887c5 100644 --- a/arch/s390/include/asm/appldata.h +++ b/arch/s390/include/asm/appldata.h @@ -9,28 +9,6 @@ #include <asm/io.h> -#ifndef CONFIG_64BIT - -#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ -#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ -#define APPLDATA_GEN_EVENT_REC 0x02 -#define APPLDATA_START_CONFIG_REC 0x03 - -/* - * Parameter list for DIAGNOSE X'DC' - */ -struct appldata_parameter_list { - u16 diag; /* The DIAGNOSE code X'00DC' */ - u8 function; /* The function code for the DIAGNOSE */ - u8 parlist_length; /* Length of the parameter list */ - u32 product_id_addr; /* Address of the 16-byte product ID */ - u16 reserved; - u16 buffer_length; /* Length of the application data buffer */ - u32 buffer_addr; /* Address of the application data buffer */ -} __attribute__ ((packed)); - -#else /* CONFIG_64BIT */ - #define APPLDATA_START_INTERVAL_REC 0x80 #define APPLDATA_STOP_REC 0x81 #define APPLDATA_GEN_EVENT_REC 0x82 @@ -51,8 +29,6 @@ struct appldata_parameter_list { u64 buffer_addr; } __attribute__ ((packed)); -#endif /* CONFIG_64BIT */ - struct appldata_product_id { char prod_nr[7]; /* product number */ u16 prod_fn; /* product function */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fa934fe..adbe380 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define ATOMIC64_INIT(i) { (i) } -#ifdef CONFIG_64BIT - #define __ATOMIC64_NO_BARRIER "\n" #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES @@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, #undef __ATOMIC64_LOOP -#else /* CONFIG_64BIT */ - -typedef struct { - long long counter; -} atomic64_t; - -static inline long long atomic64_read(const atomic64_t *v) -{ - register_pair rp; - - asm volatile( - " lm %0,%N0,%1" - : "=&d" (rp) : "Q" (v->counter) ); - return rp.pair; -} - -static inline void atomic64_set(atomic64_t *v, long long i) -{ - register_pair rp = {.pair = i}; - - asm volatile( - " stm %1,%N1,%0" - : "=Q" (v->counter) : "d" (rp) ); -} - -static inline long long atomic64_xchg(atomic64_t *v, long long new) -{ - register_pair rp_new = {.pair = new}; - register_pair rp_old; - - asm volatile( - " lm %0,%N0,%1\n" - "0: cds %0,%2,%1\n" - " jl 0b\n" - : "=&d" (rp_old), "+Q" (v->counter) - : "d" (rp_new) - : "cc"); - return rp_old.pair; -} - -static inline long long atomic64_cmpxchg(atomic64_t *v, - long long old, long long new) -{ - register_pair rp_old = {.pair = old}; - register_pair rp_new = {.pair = new}; - - asm volatile( - " cds %0,%2,%1" - : "+&d" (rp_old), "+Q" (v->counter) - : "d" (rp_new) - : "cc"); - return rp_old.pair; -} - - -static inline long long atomic64_add_return(long long i, atomic64_t *v) -{ - long long old, new; - - do { - old = atomic64_read(v); - new = old + i; - } while (atomic64_cmpxchg(v, old, new) != old); - return new; -} - -static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) -{ - long long old, new; - - do { - old = atomic64_read(v); - new = old | mask; - } while (atomic64_cmpxchg(v, old, new) != old); -} - -static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) -{ - long long old, new; - - do { - old = atomic64_read(v); - new = old & mask; - } while (atomic64_cmpxchg(v, old, new) != old); -} - -static inline void atomic64_add(long long i, atomic64_t *v) -{ - atomic64_add_return(i, v); -} - -#endif /* CONFIG_64BIT */ - static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) { long long c, old; diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 5205424..9b68e98 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -51,32 +51,6 @@ #define __BITOPS_NO_BARRIER "\n" -#ifndef CONFIG_64BIT - -#define __BITOPS_OR "or" -#define __BITOPS_AND "nr" -#define __BITOPS_XOR "xr" -#define __BITOPS_BARRIER "\n" - -#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ -({ \ - unsigned long __old, __new; \ - \ - typecheck(unsigned long *, (__addr)); \ - asm volatile( \ - " l %0,%2\n" \ - "0: lr %1,%0\n" \ - __op_string " %1,%3\n" \ - " cs %0,%1,%2\n" \ - " jl 0b" \ - : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ - : "d" (__val) \ - : "cc", "memory"); \ - __old; \ -}) - -#else /* CONFIG_64BIT */ - #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #define __BITOPS_OR "laog" @@ -125,8 +99,6 @@ #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ -#endif /* CONFIG_64BIT */ - #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) static inline unsigned long * diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 6259895..4eadec4 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -80,15 +80,10 @@ extern void __cmpxchg_double_called_with_bad_pointer(void); ({ \ __typeof__(p1) __p1 = (p1); \ __typeof__(p2) __p2 = (p2); \ - int __ret; \ BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ - if (sizeof(long) == 4) \ - __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \ - else \ - __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ - __ret; \ + __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ }) #define system_has_cmpxchg_double() 1 diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index b91e960..221b454 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -22,15 +22,7 @@ typedef unsigned long long __nocast cputime64_t; static inline unsigned long __div(unsigned long long n, unsigned long base) { -#ifndef CONFIG_64BIT - register_pair rp; - - rp.pair = n >> 1; - asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); - return rp.subreg.odd; -#else /* CONFIG_64BIT */ return n / base; -#endif /* CONFIG_64BIT */ } #define cputime_one_jiffy jiffies_to_cputime(1) @@ -101,17 +93,8 @@ static inline void cputime_to_timespec(const cputime_t cputime, struct timespec *value) { unsigned long long __cputime = (__force unsigned long long) cputime; -#ifndef CONFIG_64BIT - register_pair rp; - - rp.pair = __cputime >> 1; - asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2)); - value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC; - value->tv_sec = rp.subreg.odd; -#else value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; value->tv_sec = __cputime / CPUTIME_PER_SEC; -#endif } /* @@ -129,17 +112,8 @@ static inline void cputime_to_timeval(const cputime_t cputime, struct timeval *value) { unsigned long long __cputime = (__force unsigned long long) cputime; -#ifndef CONFIG_64BIT - register_pair rp; - - rp.pair = __cputime >> 1; - asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2)); - value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC; - value->tv_sec = rp.subreg.odd; -#else value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; value->tv_sec = __cputime / CPUTIME_PER_SEC; -#endif } /* diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 31ab9f3..cfad7fca 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -9,20 +9,12 @@ #include <linux/bug.h> -#ifdef CONFIG_64BIT -# define __CTL_LOAD "lctlg" -# define __CTL_STORE "stctg" -#else -# define __CTL_LOAD "lctl" -# define __CTL_STORE "stctl" -#endif - #define __ctl_load(array, low, high) { \ typedef struct { char _[sizeof(array)]; } addrtype; \ \ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ - __CTL_LOAD " %1,%2,%0\n" \ + " lctlg %1,%2,%0\n" \ : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ } @@ -31,7 +23,7 @@ \ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ - __CTL_STORE " %1,%2,%0\n" \ + " stctg %1,%2,%0\n" \ : "=Q" (*(addrtype *)(&array)) \ : "i" (low), "i" (high)); \ } @@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit); union ctlreg0 { unsigned long val; struct { -#ifdef CONFIG_64BIT unsigned long : 32; -#endif unsigned long : 3; unsigned long lap : 1; /* Low-address-protection control */ unsigned long : 4; diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index a5c4978..3ad48f2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -107,11 +107,7 @@ /* * These are used to set parameters in the core dumps. */ -#ifndef CONFIG_64BIT -#define ELF_CLASS ELFCLASS32 -#else /* CONFIG_64BIT */ #define ELF_CLASS ELFCLASS64 -#endif /* CONFIG_64BIT */ #define ELF_DATA ELFDATA2MSB #define ELF_ARCH EM_S390 diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index ea5a6e4..a7b2d75 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -19,11 +19,7 @@ #include <asm/cio.h> #include <asm/uaccess.h> -#ifdef CONFIG_64BIT #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ -#else -#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */ -#endif #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) /* @@ -32,11 +28,7 @@ static inline int idal_is_needed(void *vaddr, unsigned int length) { -#ifdef CONFIG_64BIT return ((__pa(vaddr) + length - 1) >> 31) != 0; -#else - return 0; -#endif } @@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws, static inline int set_normalized_cda(struct ccw1 * ccw, void *vaddr) { -#ifdef CONFIG_64BIT unsigned int nridaws; unsigned long *idal; @@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr) ccw->flags |= CCW_FLAG_IDA; vaddr = idal; } -#endif ccw->cda = (__u32)(unsigned long) vaddr; return 0; } @@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr) static inline void clear_normalized_cda(struct ccw1 * ccw) { -#ifdef CONFIG_64BIT if (ccw->flags & CCW_FLAG_IDA) { kfree((void *)(unsigned long) ccw->cda); ccw->flags &= ~CCW_FLAG_IDA; } -#endif ccw->cda = 0; } @@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib) static inline int __idal_buffer_is_needed(struct idal_buffer *ib) { -#ifdef CONFIG_64BIT return ib->size > (4096ul << ib->page_order) || idal_is_needed(ib->data[0], ib->size); -#else - return ib->size > (4096ul << ib->page_order); -#endif } /* diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 2b77e23..69972b7 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -8,14 +8,6 @@ #define JUMP_LABEL_NOP_SIZE 6 #define JUMP_LABEL_NOP_OFFSET 2 -#ifdef CONFIG_64BIT -#define ASM_PTR ".quad" -#define ASM_ALIGN ".balign 8" -#else -#define ASM_PTR ".long" -#define ASM_ALIGN ".balign 4" -#endif - /* * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. @@ -24,8 +16,8 @@ static __always_inline bool arch_static_branch(struct static_key *key) { asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" - ASM_ALIGN "\n" - ASM_PTR " 0b, %l[label], %0\n" + ".balign 8\n" + ".quad 0b, %l[label], %0\n" ".popsection\n" : : "X" (key) : : label); return false; diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 34fbcac..663f23e 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -13,163 +13,6 @@ #include <asm/cpu.h> #include <asm/types.h> -#ifdef CONFIG_32BIT - -#define LC_ORDER 0 -#define LC_PAGES 1 - -struct save_area { - u32 ext_save; - u64 timer; - u64 clk_cmp; - u8 pad1[24]; - u8 psw[8]; - u32 pref_reg; - u8 pad2[20]; - u32 acc_regs[16]; - u64 fp_regs[4]; - u32 gp_regs[16]; - u32 ctrl_regs[16]; -} __packed; - -struct save_area_ext { - struct save_area sa; - __vector128 vx_regs[32]; -}; - -struct _lowcore { - psw_t restart_psw; /* 0x0000 */ - psw_t restart_old_psw; /* 0x0008 */ - __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */ - __u32 ipl_parmblock_ptr; /* 0x0014 */ - psw_t external_old_psw; /* 0x0018 */ - psw_t svc_old_psw; /* 0x0020 */ - psw_t program_old_psw; /* 0x0028 */ - psw_t mcck_old_psw; /* 0x0030 */ - psw_t io_old_psw; /* 0x0038 */ - __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */ - psw_t external_new_psw; /* 0x0058 */ - psw_t svc_new_psw; /* 0x0060 */ - psw_t program_new_psw; /* 0x0068 */ - psw_t mcck_new_psw; /* 0x0070 */ - psw_t io_new_psw; /* 0x0078 */ - __u32 ext_params; /* 0x0080 */ - __u16 ext_cpu_addr; /* 0x0084 */ - __u16 ext_int_code; /* 0x0086 */ - __u16 svc_ilc; /* 0x0088 */ - __u16 svc_code; /* 0x008a */ - __u16 pgm_ilc; /* 0x008c */ - __u16 pgm_code; /* 0x008e */ - __u32 trans_exc_code; /* 0x0090 */ - __u16 mon_class_num; /* 0x0094 */ - __u8 per_code; /* 0x0096 */ - __u8 per_atmid; /* 0x0097 */ - __u32 per_address; /* 0x0098 */ - __u32 monitor_code; /* 0x009c */ - __u8 exc_access_id; /* 0x00a0 */ - __u8 per_access_id; /* 0x00a1 */ - __u8 op_access_id; /* 0x00a2 */ - __u8 ar_mode_id; /* 0x00a3 */ - __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ - __u16 subchannel_id; /* 0x00b8 */ - __u16 subchannel_nr; /* 0x00ba */ - __u32 io_int_parm; /* 0x00bc */ - __u32 io_int_word; /* 0x00c0 */ - __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ - __u32 stfl_fac_list; /* 0x00c8 */ - __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */ - __u32 extended_save_area_addr; /* 0x00d4 */ - __u32 cpu_timer_save_area[2]; /* 0x00d8 */ - __u32 clock_comp_save_area[2]; /* 0x00e0 */ - __u32 mcck_interruption_code[2]; /* 0x00e8 */ - __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ - __u32 external_damage_code; /* 0x00f4 */ - __u32 failing_storage_address; /* 0x00f8 */ - __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ - psw_t psw_save_area; /* 0x0100 */ - __u32 prefixreg_save_area; /* 0x0108 */ - __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */ - - /* CPU register save area: defined by architecture */ - __u32 access_regs_save_area[16]; /* 0x0120 */ - __u32 floating_pt_save_area[8]; /* 0x0160 */ - __u32 gpregs_save_area[16]; /* 0x0180 */ - __u32 cregs_save_area[16]; /* 0x01c0 */ - - /* Save areas. */ - __u32 save_area_sync[8]; /* 0x0200 */ - __u32 save_area_async[8]; /* 0x0220 */ - __u32 save_area_restart[1]; /* 0x0240 */ - - /* CPU flags. */ - __u32 cpu_flags; /* 0x0244 */ - - /* Return psws. */ - psw_t return_psw; /* 0x0248 */ - psw_t return_mcck_psw; /* 0x0250 */ - - /* CPU time accounting values */ - __u64 sync_enter_timer; /* 0x0258 */ - __u64 async_enter_timer; /* 0x0260 */ - __u64 mcck_enter_timer; /* 0x0268 */ - __u64 exit_timer; /* 0x0270 */ - __u64 user_timer; /* 0x0278 */ - __u64 system_timer; /* 0x0280 */ - __u64 steal_timer; /* 0x0288 */ - __u64 last_update_timer; /* 0x0290 */ - __u64 last_update_clock; /* 0x0298 */ - __u64 int_clock; /* 0x02a0 */ - __u64 mcck_clock; /* 0x02a8 */ - __u64 clock_comparator; /* 0x02b0 */ - - /* Current process. */ - __u32 current_task; /* 0x02b8 */ - __u32 thread_info; /* 0x02bc */ - __u32 kernel_stack; /* 0x02c0 */ - - /* Interrupt, panic and restart stack. */ - __u32 async_stack; /* 0x02c4 */ - __u32 panic_stack; /* 0x02c8 */ - __u32 restart_stack; /* 0x02cc */ - - /* Restart function and parameter. */ - __u32 restart_fn; /* 0x02d0 */ - __u32 restart_data; /* 0x02d4 */ - __u32 restart_source; /* 0x02d8 */ - - /* Address space pointer. */ - __u32 kernel_asce; /* 0x02dc */ - __u32 user_asce; /* 0x02e0 */ - __u32 current_pid; /* 0x02e4 */ - - /* SMP info area */ - __u32 cpu_nr; /* 0x02e8 */ - __u32 softirq_pending; /* 0x02ec */ - __u32 percpu_offset; /* 0x02f0 */ - __u32 machine_flags; /* 0x02f4 */ - __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */ - __u32 spinlock_lockval; /* 0x02fc */ - - __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ - - /* - * 0xe00 contains the address of the IPL Parameter Information - * block. Dump tools need IPIB for IPL after dump. - * Note: do not change the position of any fields in 0x0e00-0x0f00 - */ - __u32 ipib; /* 0x0e00 */ - __u32 ipib_checksum; /* 0x0e04 */ - __u32 vmcore_info; /* 0x0e08 */ - __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */ - __u32 os_info; /* 0x0e18 */ - __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */ - - /* Extended facility list */ - __u64 stfle_fac_list[32]; /* 0x0f00 */ -} __packed; - -#else /* CONFIG_32BIT */ - #define LC_ORDER 1 #define LC_PAGES 2 @@ -354,8 +197,6 @@ struct _lowcore { __u8 vector_save_area[1024]; /* 0x1c00 */ } __packed; -#endif /* CONFIG_32BIT */ - #define S390_lowcore (*((struct _lowcore *) 0)) extern struct _lowcore *lowcore_ptr[]; diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 9977e08..b55a59e 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -8,7 +8,7 @@ #include <uapi/asm/mman.h> -#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) +#ifndef __ASSEMBLY__ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); #define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) #endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 8fb3802..d25d9ff 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -19,9 +19,7 @@ static inline int init_new_context(struct task_struct *tsk, atomic_set(&mm->context.attach_count, 0); mm->context.flush_mm = 0; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; -#ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; -#endif mm->context.has_pgste = 0; mm->context.use_skey = 0; mm->context.asce_limit = STACK_TOP_MAX; @@ -110,10 +108,8 @@ static inline void activate_mm(struct mm_struct *prev, static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { -#ifdef CONFIG_64BIT if (oldmm->context.asce_limit < mm->context.asce_limit) crst_table_downgrade(mm, oldmm->context.asce_limit); -#endif } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 933355e..6d6556c 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -10,8 +10,6 @@ */ #define __my_cpu_offset S390_lowcore.percpu_offset -#ifdef CONFIG_64BIT - /* * For 64 bit module code, the module may be more than 4G above the * per cpu area, use weak definitions to force the compiler to @@ -183,8 +181,6 @@ #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double -#endif /* CONFIG_64BIT */ - #include <asm-generic/percpu.h> #endif /* __ARCH_S390_PERCPU__ */ diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 159a8ec..4cb19fe 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -9,8 +9,6 @@ #ifndef _ASM_S390_PERF_EVENT_H #define _ASM_S390_PERF_EVENT_H -#ifdef CONFIG_64BIT - #include <linux/perf_event.h> #include <linux/device.h> #include <asm/cpu_mf.h> @@ -92,5 +90,4 @@ struct sf_raw_sample { int perf_reserve_sampling(void); void perf_release_sampling(void); -#endif /* CONFIG_64BIT */ #endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 3009c2ba..51e7fb6 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -33,11 +33,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) *s = val; n = (n / 256) - 1; asm volatile( -#ifdef CONFIG_64BIT " mvc 8(248,%0),0(%0)\n" -#else - " mvc 4(252,%0),0(%0)\n" -#endif "0: mvc 256(256,%0),0(%0)\n" " la %0,256(%0)\n" " brct %1,0b\n" @@ -50,24 +46,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry) clear_table(crst, entry, sizeof(unsigned long)*2048); } -#ifndef CONFIG_64BIT - -static inline unsigned long pgd_entry_type(struct mm_struct *mm) -{ - return _SEGMENT_ENTRY_EMPTY; -} - -#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) -#define pud_free(mm, x) do { } while (0) - -#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(mm, x) do { } while (0) - -#define pgd_populate(mm, pgd, pud) BUG() -#define pud_populate(mm, pud, pmd) BUG() - -#else /* CONFIG_64BIT */ - static inline unsigned long pgd_entry_type(struct mm_struct *mm) { if (mm->context.asce_limit <= (1UL << 31)) @@ -119,8 +97,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); } -#endif /* CONFIG_64BIT */ - static inline pgd_t *pgd_alloc(struct mm_struct *mm) { spin_lock_init(&mm->context.list_lock); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e08ec38..989cfae 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -66,15 +66,9 @@ extern unsigned long zero_page_mask; * table can map * PGDIR_SHIFT determines what a third-level page table entry can map */ -#ifndef CONFIG_64BIT -# define PMD_SHIFT 20 -# define PUD_SHIFT 20 -# define PGDIR_SHIFT 20 -#else /* CONFIG_64BIT */ -# define PMD_SHIFT 20 -# define PUD_SHIFT 31 -# define PGDIR_SHIFT 42 -#endif /* CONFIG_64BIT */ +#define PMD_SHIFT 20 +#define PUD_SHIFT 31 +#define PGDIR_SHIFT 42 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -90,15 +84,8 @@ extern unsigned long zero_page_mask; * that leads to 1024 pte per pgd */ #define PTRS_PER_PTE 256 -#ifndef CONFIG_64BIT -#define __PAGETABLE_PUD_FOLDED -#define PTRS_PER_PMD 1 -#define __PAGETABLE_PMD_FOLDED -#define PTRS_PER_PUD 1 -#else /* CONFIG_64BIT */ #define PTRS_PER_PMD 2048 #define PTRS_PER_PUD 2048 -#endif /* CONFIG_64BIT */ #define PTRS_PER_PGD 2048 #define FIRST_USER_ADDRESS 0UL @@ -127,23 +114,19 @@ extern struct page *vmemmap; #define VMEM_MAX_PHYS ((unsigned long) vmemmap) -#ifdef CONFIG_64BIT extern unsigned long MODULES_VADDR; extern unsigned long MODULES_END; #define MODULES_VADDR MODULES_VADDR #define MODULES_END MODULES_END #define MODULES_LEN (1UL << 31) -#endif static inline int is_module_addr(void *addr) { -#ifdef CONFIG_64BIT BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); if (addr < (void *)MODULES_VADDR) return 0; if (addr > (void *)MODULES_END) return 0; -#endif return 1; } @@ -284,56 +267,6 @@ static inline int is_module_addr(void *addr) * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 */ -#ifndef CONFIG_64BIT - -/* Bits in the segment table address-space-control-element */ -#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ -#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ -#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ -#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ -#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ - -/* Bits in the segment table entry */ -#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */ -#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ -#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ -#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ -#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ -#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ - -#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */ -#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */ -#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */ -#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */ -#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */ -#define _SEGMENT_ENTRY_BITS_LARGE 0 -#define _SEGMENT_ENTRY_ORIGIN_LARGE 0 - -#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) -#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) - -/* - * Segment table entry encoding (I = invalid, R = read-only bit): - * ..R...I..... - * prot-none ..1...1..... - * read-only ..1...0..... - * read-write ..0...0..... - * empty ..0...1..... - */ - -/* Page status table bits for virtualization */ -#define PGSTE_ACC_BITS 0xf0000000UL -#define PGSTE_FP_BIT 0x08000000UL -#define PGSTE_PCL_BIT 0x00800000UL -#define PGSTE_HR_BIT 0x00400000UL -#define PGSTE_HC_BIT 0x00200000UL -#define PGSTE_GR_BIT 0x00040000UL -#define PGSTE_GC_BIT 0x00020000UL -#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ -#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ - -#else /* CONFIG_64BIT */ - /* Bits in the segment/region table address-space-control-element */ #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ @@ -417,8 +350,6 @@ static inline int is_module_addr(void *addr) #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ -#endif /* CONFIG_64BIT */ - /* Guest Page State used for virtualization */ #define _PGSTE_GPS_ZERO 0x0000000080000000UL #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL @@ -509,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm) /* * pgd/pmd/pte query functions */ -#ifndef CONFIG_64BIT - -static inline int pgd_present(pgd_t pgd) { return 1; } -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } - -static inline int pud_present(pud_t pud) { return 1; } -static inline int pud_none(pud_t pud) { return 0; } -static inline int pud_large(pud_t pud) { return 0; } -static inline int pud_bad(pud_t pud) { return 0; } - -#else /* CONFIG_64BIT */ - static inline int pgd_present(pgd_t pgd) { if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) @@ -583,8 +501,6 @@ static inline int pud_bad(pud_t pud) return (pud_val(pud) & mask) != 0; } -#endif /* CONFIG_64BIT */ - static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; @@ -916,18 +832,14 @@ static inline int pte_unused(pte_t pte) static inline void pgd_clear(pgd_t *pgd) { -#ifdef CONFIG_64BIT if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; -#endif } static inline void pud_clear(pud_t *pud) { -#ifdef CONFIG_64BIT if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) pud_val(*pud) = _REGION3_ENTRY_EMPTY; -#endif } static inline void pmd_clear(pmd_t *pmdp) @@ -1026,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) { unsigned long pto = (unsigned long) ptep; -#ifndef CONFIG_64BIT - /* pto in ESA mode must point to the start of the segment table */ - pto &= 0x7ffffc00; -#endif /* Invalidation + global TLB flush for the pte */ asm volatile( " ipte %2,%3" @@ -1040,10 +948,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) { unsigned long pto = (unsigned long) ptep; -#ifndef CONFIG_64BIT - /* pto in ESA mode must point to the start of the segment table */ - pto &= 0x7ffffc00; -#endif /* Invalidation + local TLB flush for the pte */ asm volatile( " .insn rrf,0xb2210000,%2,%3,0,1" @@ -1054,10 +958,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep) { unsigned long pto = (unsigned long) ptep; -#ifndef CONFIG_64BIT - /* pto in ESA mode must point to the start of the segment table */ - pto &= 0x7ffffc00; -#endif /* Invalidate a range of ptes + global TLB flush of the ptes */ do { asm volatile( @@ -1376,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) #define pgd_offset_k(address) pgd_offset(&init_mm, address) -#ifndef CONFIG_64BIT - -#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) -#define pud_deref(pmd) ({ BUG(); 0UL; }) -#define pgd_deref(pmd) ({ BUG(); 0UL; }) - -#define pud_offset(pgd, address) ((pud_t *) pgd) -#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) - -#else /* CONFIG_64BIT */ - #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) @@ -1407,8 +1296,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) return pmd + pmd_index(address); } -#endif /* CONFIG_64BIT */ - #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) @@ -1729,11 +1616,9 @@ static inline int has_transparent_hugepage(void) * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 */ -#ifndef CONFIG_64BIT -#define __SWP_OFFSET_MASK (~0UL >> 12) -#else + #define __SWP_OFFSET_MASK (~0UL >> 11) -#endif + static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index e7cbbdc..dedb621 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -19,7 +19,6 @@ #define _CIF_ASCE (1<<CIF_ASCE) #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) - #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -66,13 +65,6 @@ extern void execve_tail(void); /* * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. */ -#ifndef CONFIG_64BIT - -#define TASK_SIZE (1UL << 31) -#define TASK_MAX_SIZE (1UL << 31) -#define TASK_UNMAPPED_BASE (1UL << 30) - -#else /* CONFIG_64BIT */ #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ @@ -80,15 +72,8 @@ extern void execve_tail(void); #define TASK_SIZE TASK_SIZE_OF(current) #define TASK_MAX_SIZE (1UL << 53) -#endif /* CONFIG_64BIT */ - -#ifndef CONFIG_64BIT -#define STACK_TOP (1UL << 31) -#define STACK_TOP_MAX (1UL << 31) -#else /* CONFIG_64BIT */ #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) #define STACK_TOP_MAX (1UL << 42) -#endif /* CONFIG_64BIT */ #define HAVE_ARCH_PICK_MMAP_LAYOUT @@ -115,10 +100,8 @@ struct thread_struct { /* cpu runtime instrumentation */ struct runtime_instr_cb *ri_cb; int ri_signum; -#ifdef CONFIG_64BIT unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ __vector128 *vxrs; /* Vector register save area */ -#endif }; /* Flag to disable transactions. */ @@ -181,11 +164,7 @@ struct task_struct; struct mm_struct; struct seq_file; -#ifdef CONFIG_64BIT -extern void show_cacheinfo(struct seq_file *m); -#else -static inline void show_cacheinfo(struct seq_file *m) { } -#endif +void show_cacheinfo(struct seq_file *m); /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); @@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key) */ static inline void __load_psw(psw_t psw) { -#ifndef CONFIG_64BIT - asm volatile("lpsw %0" : : "Q" (psw) : "cc"); -#else asm volatile("lpswe %0" : : "Q" (psw) : "cc"); -#endif } /* @@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask) psw.mask = mask; -#ifndef CONFIG_64BIT - asm volatile( - " basr %0,0\n" - "0: ahi %0,1f-0b\n" - " st %0,%O1+4(%R1)\n" - " lpsw %1\n" - "1:" - : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); -#else /* CONFIG_64BIT */ asm volatile( " larl %0,1f\n" " stg %0,%O1+8(%R1)\n" " lpswe %1\n" "1:" : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); -#endif /* CONFIG_64BIT */ } /* @@ -270,20 +235,12 @@ static inline void __load_psw_mask (unsigned long mask) */ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) { -#ifndef CONFIG_64BIT - if (psw.addr & PSW_ADDR_AMODE) - /* 31 bit mode */ - return (psw.addr - ilc) | PSW_ADDR_AMODE; - /* 24 bit mode */ - return (psw.addr - ilc) & ((1UL << 24) - 1); -#else unsigned long mask; mask = (psw.mask & PSW_MASK_EA) ? -1UL : (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : (1UL << 24) - 1; return (psw.addr - ilc) & mask; -#endif } /* @@ -305,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code) * Store status and then load disabled wait psw, * the processor is dead afterwards */ -#ifndef CONFIG_64BIT - asm volatile( - " stctl 0,0,0(%2)\n" - " ni 0(%2),0xef\n" /* switch off protection */ - " lctl 0,0,0(%2)\n" - " stpt 0xd8\n" /* store timer */ - " stckc 0xe0\n" /* store clock comparator */ - " stpx 0x108\n" /* store prefix register */ - " stam 0,15,0x120\n" /* store access registers */ - " std 0,0x160\n" /* store f0 */ - " std 2,0x168\n" /* store f2 */ - " std 4,0x170\n" /* store f4 */ - " std 6,0x178\n" /* store f6 */ - " stm 0,15,0x180\n" /* store general registers */ - " stctl 0,15,0x1c0\n" /* store control registers */ - " oi 0x1c0,0x10\n" /* fake protection bit */ - " lpsw 0(%1)" - : "=m" (ctl_buf) - : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); -#else /* CONFIG_64BIT */ asm volatile( " stctg 0,0,0(%2)\n" " ni 4(%2),0xef\n" /* switch off protection */ @@ -357,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code) " lpswe 0(%1)" : "=m" (ctl_buf) : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); -#endif /* CONFIG_64BIT */ while (1); } diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index be317fe..6feda25 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -40,12 +40,8 @@ struct psw_bits { unsigned long long ri : 1; /* Runtime Instrumentation */ unsigned long long : 6; unsigned long long eaba : 2; /* Addressing Mode */ -#ifdef CONFIG_64BIT unsigned long long : 31; unsigned long long ia : 64;/* Instruction Address */ -#else - unsigned long long ia : 31;/* Instruction Address */ -#endif }; enum { diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 06f3034..998b61c 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -211,11 +211,6 @@ struct qdio_buffer_element { u8 scount; u8 sflags; u32 length; -#ifdef CONFIG_32BIT - /* private: */ - void *res2; - /* public: */ -#endif void *addr; } __attribute__ ((packed, aligned(16))); @@ -232,11 +227,6 @@ struct qdio_buffer { * @sbal: absolute SBAL address */ struct sl_element { -#ifdef CONFIG_32BIT - /* private: */ - unsigned long reserved; - /* public: */ -#endif unsigned long sbal; } __attribute__ ((packed)); diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index 830da73..402ad6d 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) { -#ifdef CONFIG_64BIT if (cb_prev) store_runtime_instr_cb(cb_prev); -#endif } static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, struct runtime_instr_cb *cb_prev) { -#ifdef CONFIG_64BIT if (cb_next) load_runtime_instr_cb(cb_next); else if (cb_prev) load_runtime_instr_cb(&runtime_instr_empty_cb); -#endif } -#ifdef CONFIG_64BIT -extern void exit_thread_runtime_instr(void); -#else -static inline void exit_thread_runtime_instr(void) { } -#endif +void exit_thread_runtime_instr(void); #endif /* _RUNTIME_INSTR_H */ diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 487f9b6..4b43ee7 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h @@ -39,17 +39,10 @@ #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" #endif -#ifndef CONFIG_64BIT -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#else /* CONFIG_64BIT */ #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L #define RWSEM_ACTIVE_BIAS 0x0000000000000001L #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL #define RWSEM_WAITING_BIAS (-0x0000000100000000L) -#endif /* CONFIG_64BIT */ #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) @@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem) signed long old, new; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " ahi %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " aghi %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); @@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) signed long old, new; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: ltr %1,%0\n" - " jm 1f\n" - " ahi %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b\n" - "1:" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: ltgr %1,%0\n" " jm 1f\n" @@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) " csg %0,%1,%2\n" " jl 0b\n" "1:" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); @@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) tmp = RWSEM_ACTIVE_WRITE_BIAS; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " a %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " ag %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "m" (tmp) : "cc", "memory"); @@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) signed long old; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%1\n" - "0: ltr %0,%0\n" - " jnz 1f\n" - " cs %0,%3,%1\n" - " jl 0b\n" -#else /* CONFIG_64BIT */ " lg %0,%1\n" "0: ltgr %0,%0\n" " jnz 1f\n" " csg %0,%3,%1\n" " jl 0b\n" -#endif /* CONFIG_64BIT */ "1:" : "=&d" (old), "=Q" (sem->count) : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) @@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem) signed long old, new; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " ahi %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " aghi %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); @@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem) tmp = -RWSEM_ACTIVE_WRITE_BIAS; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " a %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " ag %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "m" (tmp) : "cc", "memory"); @@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem) tmp = -RWSEM_WAITING_BIAS; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " a %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " ag %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "m" (tmp) : "cc", "memory"); @@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) signed long old, new; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " ar %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " agr %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "d" (delta) : "cc", "memory"); @@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) signed long old, new; asm volatile( -#ifndef CONFIG_64BIT - " l %0,%2\n" - "0: lr %1,%0\n" - " ar %1,%4\n" - " cs %0,%1,%2\n" - " jl 0b" -#else /* CONFIG_64BIT */ " lg %0,%2\n" "0: lgr %1,%0\n" " agr %1,%4\n" " csg %0,%1,%2\n" " jl 0b" -#endif /* CONFIG_64BIT */ : "=&d" (old), "=&d" (new), "=Q" (sem->count) : "Q" (sem->count), "d" (delta) : "cc", "memory"); diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b8d1e54..b8ffc1b 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -15,19 +15,11 @@ #include <asm/lowcore.h> #include <asm/types.h> -#ifndef CONFIG_64BIT -#define IPL_DEVICE (*(unsigned long *) (0x10404)) -#define INITRD_START (*(unsigned long *) (0x1040C)) -#define INITRD_SIZE (*(unsigned long *) (0x10414)) -#define OLDMEM_BASE (*(unsigned long *) (0x1041C)) -#define OLDMEM_SIZE (*(unsigned long *) (0x10424)) -#else /* CONFIG_64BIT */ #define IPL_DEVICE (*(unsigned long *) (0x10400)) #define INITRD_START (*(unsigned long *) (0x10408)) #define INITRD_SIZE (*(unsigned long *) (0x10410)) #define OLDMEM_BASE (*(unsigned long *) (0x10418)) #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) -#endif /* CONFIG_64BIT */ #define COMMAND_LINE ((char *) (0x10480)) extern int memory_end_set; @@ -68,26 +60,8 @@ extern void detect_memory_memblock(void); #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 -#ifndef CONFIG_64BIT -#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE) -#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP) -#define MACHINE_HAS_IDTE (0) -#define MACHINE_HAS_DIAG44 (1) -#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) -#define MACHINE_HAS_EDAT1 (0) -#define MACHINE_HAS_EDAT2 (0) -#define MACHINE_HAS_LPP (0) -#define MACHINE_HAS_TOPOLOGY (0) -#define MACHINE_HAS_TE (0) -#define MACHINE_HAS_TLB_LC (0) -#define MACHINE_HAS_VX (0) -#define MACHINE_HAS_CAD (0) -#else /* CONFIG_64BIT */ -#define MACHINE_HAS_IEEE (1) -#define MACHINE_HAS_CSP (1) #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) #define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) -#define MACHINE_HAS_MVPG (1) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) @@ -96,7 +70,6 @@ extern void detect_memory_memblock(void); #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) #define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) #define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) -#endif /* CONFIG_64BIT */ /* * Console mode. Override with conmode= @@ -135,19 +108,11 @@ extern void (*_machine_power_off)(void); #else /* __ASSEMBLY__ */ -#ifndef CONFIG_64BIT -#define IPL_DEVICE 0x10404 -#define INITRD_START 0x1040C -#define INITRD_SIZE 0x10414 -#define OLDMEM_BASE 0x1041C -#define OLDMEM_SIZE 0x10424 -#else /* CONFIG_64BIT */ #define IPL_DEVICE 0x10400 #define INITRD_START 0x10408 #define INITRD_SIZE 0x10410 #define OLDMEM_BASE 0x10418 #define OLDMEM_SIZE 0x10420 -#endif /* CONFIG_64BIT */ #define COMMAND_LINE 0x10480 #endif /* __ASSEMBLY__ */ diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h index 5959bfb..c8b7cf9 100644 --- a/arch/s390/include/asm/sfp-util.h +++ b/arch/s390/include/asm/sfp-util.h @@ -51,7 +51,6 @@ wl = __wl; \ }) -#ifdef CONFIG_64BIT #define udiv_qrnnd(q, r, n1, n0, d) \ do { unsigned long __n; \ unsigned int __r, __d; \ @@ -60,15 +59,6 @@ (q) = __n / __d; \ (r) = __n % __d; \ } while (0) -#else -#define udiv_qrnnd(q, r, n1, n0, d) \ - do { unsigned int __r; \ - (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ - (r) = __r; \ - } while (0) -extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int, - unsigned int , unsigned int); -#endif #define UDIV_NEEDS_NORMALIZATION 0 diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h index a60d085..487428b 100644 --- a/arch/s390/include/asm/sparsemem.h +++ b/arch/s390/include/asm/sparsemem.h @@ -1,16 +1,7 @@ #ifndef _ASM_S390_SPARSEMEM_H #define _ASM_S390_SPARSEMEM_H -#ifdef CONFIG_64BIT - #define SECTION_SIZE_BITS 28 #define MAX_PHYSMEM_BITS 46 -#else - -#define SECTION_SIZE_BITS 25 -#define MAX_PHYSMEM_BITS 31 - -#endif /* CONFIG_64BIT */ - #endif /* _ASM_S390_SPARSEMEM_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 2542a7e..d62e7a6 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc) u32 orig_fpc; int rc; - if (!MACHINE_HAS_IEEE) - return 0; - asm volatile( " efpc %1\n" " sfpc %2\n" @@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc) static inline void save_fp_ctl(u32 *fpc) { - if (!MACHINE_HAS_IEEE) - return; - asm volatile( " stfpc %0\n" : "+Q" (*fpc)); @@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc) { int rc; - if (!MACHINE_HAS_IEEE) - return 0; - asm volatile( " lfpc %1\n" "0: la %0,0\n" @@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs) asm volatile("std 2,%0" : "=Q" (fprs[2])); asm volatile("std 4,%0" : "=Q" (fprs[4])); asm volatile("std 6,%0" : "=Q" (fprs[6])); - if (!MACHINE_HAS_IEEE) - return; asm volatile("std 1,%0" : "=Q" (fprs[1])); asm volatile("std 3,%0" : "=Q" (fprs[3])); asm volatile("std 5,%0" : "=Q" (fprs[5])); @@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs) asm volatile("ld 2,%0" : : "Q" (fprs[2])); asm volatile("ld 4,%0" : : "Q" (fprs[4])); asm volatile("ld 6,%0" : : "Q" (fprs[6])); - if (!MACHINE_HAS_IEEE) - return; asm volatile("ld 1,%0" : : "Q" (fprs[1])); asm volatile("ld 3,%0" : : "Q" (fprs[3])); asm volatile("ld 5,%0" : : "Q" (fprs[5])); @@ -140,22 +127,18 @@ static inline void restore_vx_regs(__vector128 *vxrs) static inline void save_fp_vx_regs(struct task_struct *task) { -#ifdef CONFIG_64BIT if (task->thread.vxrs) save_vx_regs(task->thread.vxrs); else -#endif - save_fp_regs(task->thread.fp_regs.fprs); + save_fp_regs(task->thread.fp_regs.fprs); } static inline void restore_fp_vx_regs(struct task_struct *task) { -#ifdef CONFIG_64BIT if (task->thread.vxrs) restore_vx_regs(task->thread.vxrs); else -#endif - restore_fp_regs(task->thread.fp_regs.fprs); + restore_fp_regs(task->thread.fp_regs.fprs); } static inline void save_access_regs(unsigned int *acrs) diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 5bc1259..6ba0bf9 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -95,6 +95,6 @@ static inline int syscall_get_arch(void) if (test_tsk_thread_flag(current, TIF_31BIT)) return AUDIT_ARCH_S390; #endif - return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; + return AUDIT_ARCH_S390X; } #endif /* _ASM_SYSCALL_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ef1df71..d532098 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -10,13 +10,8 @@ /* * Size of kernel stack for each process */ -#ifndef CONFIG_64BIT -#define THREAD_ORDER 1 -#define ASYNC_ORDER 1 -#else /* CONFIG_64BIT */ #define THREAD_ORDER 2 #define ASYNC_ORDER 2 -#endif /* CONFIG_64BIT */ #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) @@ -66,6 +61,8 @@ static inline struct thread_info *current_thread_info(void) return (struct thread_info *) S390_lowcore.thread_info; } +void arch_release_task_struct(struct task_struct *tsk); + #define THREAD_SIZE_ORDER THREAD_ORDER #endif @@ -99,10 +96,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) -#ifdef CONFIG_64BIT #define is_32bit_task() (test_thread_flag(TIF_31BIT)) -#else -#define is_32bit_task() (1) -#endif #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 06d8741..7a92e69 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -118,12 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) { -#ifdef CONFIG_64BIT if (tlb->mm->context.asce_limit <= (1UL << 31)) return; pgtable_pmd_page_dtor(virt_to_page(pmd)); tlb_remove_table(tlb, pmd); -#endif } /* @@ -136,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) { -#ifdef CONFIG_64BIT if (tlb->mm->context.asce_limit <= (1UL << 42)) return; tlb_remove_table(tlb, pud); -#endif } #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 16c9c88..ca148f7 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void) register unsigned long reg4 asm("4"); long dummy; -#ifndef CONFIG_64BIT - if (!MACHINE_HAS_CSP) { - smp_ptlb_all(); - return; - } -#endif /* CONFIG_64BIT */ - dummy = 0; reg2 = reg3 = 0; reg4 = ((unsigned long) &dummy) + 1; diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index dccef3c..6740f4f 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h @@ -8,21 +8,4 @@ #include <uapi/asm/types.h> -/* - * These aren't exported outside the kernel to avoid name space clashes - */ - -#ifndef __ASSEMBLY__ - -#ifndef CONFIG_64BIT -typedef union { - unsigned long long pair; - struct { - unsigned long even; - unsigned long odd; - } subreg; -} register_pair; - -#endif /* ! CONFIG_64BIT */ -#endif /* __ASSEMBLY__ */ #endif /* _S390_TYPES_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index cd4c68e..d64a7a6 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo } int copy_to_user_real(void __user *dest, void *src, unsigned long count); +void s390_kernel_write(void *dst, const void *src, size_t size); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 6518863..91f56b1 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -9,11 +9,7 @@ #include <uapi/asm/unistd.h> -#ifndef CONFIG_64BIT -#define __IGNORE_select -#else #define __IGNORE_time -#endif /* Ignore NUMA system calls. Not wired up on s390. */ #define __IGNORE_mbind @@ -43,10 +39,6 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -# ifndef CONFIG_64BIT -# define __ARCH_WANT_STAT64 -# define __ARCH_WANT_SYS_TIME -# endif # ifdef CONFIG_COMPAT # define __ARCH_WANT_COMPAT_SYS_TIME # endif diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index a62526d..787acd4 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -42,10 +42,8 @@ struct vdso_per_cpu_data { extern struct vdso_data *vdso_data; -#ifdef CONFIG_64BIT int vdso_alloc_per_cpu(struct _lowcore *lowcore); void vdso_free_per_cpu(struct _lowcore *lowcore); -#endif #endif /* __ASSEMBLY__ */ #endif /* __S390_VDSO_H__ */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 31fab26..ffb8761 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -26,25 +26,21 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls # CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w +CFLAGS_sysinfo.o += -w obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o -obj-y += dumpstack.o +obj-y += runtime_instr.o cache.o dumpstack.o +obj-y += entry.o reipl.o relocate_kernel.o -obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) -obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) -obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) - -extra-y += head.o vmlinux.lds -extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) +extra-y += head.o head64.o vmlinux.lds obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCHED_BOOK) += topology.o -obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o +obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o @@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o -ifdef CONFIG_64BIT -obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ - perf_cpum_cf_events.o -obj-y += runtime_instr.o cache.o -endif +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o +obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o # vdso -obj-$(CONFIG_64BIT) += vdso64/ -obj-$(CONFIG_32BIT) += vdso32/ +obj-y += vdso64/ obj-$(CONFIG_COMPAT) += vdso32/ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 8dc4db1..f35058d 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -166,9 +166,6 @@ int main(void) DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); -#ifdef CONFIG_32BIT - DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); -#else /* CONFIG_32BIT */ DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); @@ -184,6 +181,5 @@ int main(void) DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); -#endif /* CONFIG_32BIT */ return 0; } diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f74a53d..daed3fd 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -11,8 +11,6 @@ #include <asm/ptrace.h> #include <asm/sigp.h> -#ifdef CONFIG_64BIT - ENTRY(s390_base_mcck_handler) basr %r13,0 0: lg %r15,__LC_PANIC_STACK # load panic stack @@ -131,77 +129,3 @@ ENTRY(diag308_reset) .Lfpctl: .long 0 .previous - -#else /* CONFIG_64BIT */ - -ENTRY(s390_base_mcck_handler) - basr %r13,0 -0: l %r15,__LC_PANIC_STACK # load panic stack - ahi %r15,-STACK_FRAME_OVERHEAD - l %r1,2f-0b(%r13) - l %r1,0(%r1) - ltr %r1,%r1 - jz 1f - basr %r14,%r1 -1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA - lpsw __LC_MCK_OLD_PSW - -2: .long s390_base_mcck_handler_fn - - .section .bss - .align 4 - .globl s390_base_mcck_handler_fn -s390_base_mcck_handler_fn: - .long 0 - .previous - -ENTRY(s390_base_ext_handler) - stm %r0,%r15,__LC_SAVE_AREA_ASYNC - basr %r13,0 -0: ahi %r15,-STACK_FRAME_OVERHEAD - l %r1,2f-0b(%r13) - l %r1,0(%r1) - ltr %r1,%r1 - jz 1f - basr %r14,%r1 -1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC - ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit - lpsw __LC_EXT_OLD_PSW - -2: .long s390_base_ext_handler_fn - - .section .bss - .align 4 - .globl s390_base_ext_handler_fn -s390_base_ext_handler_fn: - .long 0 - .previous - -ENTRY(s390_base_pgm_handler) - stm %r0,%r15,__LC_SAVE_AREA_SYNC - basr %r13,0 -0: ahi %r15,-STACK_FRAME_OVERHEAD - l %r1,2f-0b(%r13) - l %r1,0(%r1) - ltr %r1,%r1 - jz 1f - basr %r14,%r1 - lm %r0,%r15,__LC_SAVE_AREA_SYNC - lpsw __LC_PGM_OLD_PSW - -1: lpsw disabled_wait_psw-0b(%r13) - -2: .long s390_base_pgm_handler_fn - -disabled_wait_psw: - .align 8 - .long 0x000a0000,0x00000000 + s390_base_pgm_handler - - .section .bss - .align 4 - .globl s390_base_pgm_handler_fn -s390_base_pgm_handler_fn: - .long 0 - .previous - -#endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index 0969d113..bff5e3b 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -70,6 +70,8 @@ void show_cacheinfo(struct seq_file *m) struct cacheinfo *cache; int idx; + if (!test_facility(34)) + return; get_online_cpus(); this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { @@ -159,6 +161,8 @@ int populate_cache_leaves(unsigned int cpu) union cache_topology ct; enum cache_type ctype; + if (!test_facility(34)) + return -EOPNOTSUPP; ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); for (idx = 0, level = 0; level < this_cpu_ci->num_levels && idx < this_cpu_ci->num_leaves; idx++, level++) { diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d7b0c4d..199ec92 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen) register unsigned long reg3 asm ("3") = cmdlen; asm volatile( -#ifndef CONFIG_64BIT - " diag %1,%0,0x8\n" -#else /* CONFIG_64BIT */ " sam31\n" " diag %1,%0,0x8\n" " sam64\n" -#endif /* CONFIG_64BIT */ : "+d" (reg3) : "d" (reg2) : "cc"); return reg3; } @@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen) register unsigned long reg5 asm ("5") = *rlen; asm volatile( -#ifndef CONFIG_64BIT - " diag %2,%0,0x8\n" - " brc 8,1f\n" - " ar %1,%4\n" -#else /* CONFIG_64BIT */ " sam31\n" " diag %2,%0,0x8\n" " sam64\n" " brc 8,1f\n" " agr %1,%4\n" -#endif /* CONFIG_64BIT */ "1:\n" : "+d" (reg4), "+d" (reg5) : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 8237fc0..2f69243 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) int rc = 0; asm volatile( -#ifdef CONFIG_64BIT " sam31\n" " diag %2,2,0x14\n" " sam64\n" -#else - " diag %2,2,0x14\n" -#endif " ipm %0\n" " srl %0,28\n" : "=d" (rc), "+d" (_ry2) @@ -52,7 +48,6 @@ int diag210(struct diag210 *addr) spin_lock_irqsave(&diag210_lock, flags); diag210_tmp = *addr; -#ifdef CONFIG_64BIT asm volatile( " lhi %0,-1\n" " sam31\n" @@ -62,16 +57,6 @@ int diag210(struct diag210 *addr) "1: sam64\n" EX_TABLE(0b, 1b) : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); -#else - asm volatile( - " lhi %0,-1\n" - " diag %1,0,0x210\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b, 1b) - : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); -#endif *addr = diag210_tmp; spin_unlock_irqrestore(&diag210_lock, flags); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 5334303..8140d10 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -32,12 +32,6 @@ #include <asm/debug.h> #include <asm/irq.h> -#ifndef CONFIG_64BIT -#define ONELONG "%08lx: " -#else /* CONFIG_64BIT */ -#define ONELONG "%016lx: " -#endif /* CONFIG_64BIT */ - enum { UNUSED, /* Indicates the end of the operand list */ R_8, /* GPR starting at position 8 */ @@ -536,12 +530,10 @@ static char *long_insn_name[] = { }; static struct s390_insn opcode[] = { -#ifdef CONFIG_64BIT { "bprp", 0xc5, INSTR_MII_UPI }, { "bpp", 0xc7, INSTR_SMI_U0RDP }, { "trtr", 0xd0, INSTR_SS_L0RDRD }, { "lmd", 0xef, INSTR_SS_RRRDRD3 }, -#endif { "spm", 0x04, INSTR_RR_R0 }, { "balr", 0x05, INSTR_RR_RR }, { "bctr", 0x06, INSTR_RR_RR }, @@ -725,11 +717,9 @@ static struct s390_insn opcode[] = { }; static struct s390_insn opcode_01[] = { -#ifdef CONFIG_64BIT { "ptff", 0x04, INSTR_E }, { "pfpo", 0x0a, INSTR_E }, { "sam64", 0x0e, INSTR_E }, -#endif { "pr", 0x01, INSTR_E }, { "upt", 0x02, INSTR_E }, { "sckpf", 0x07, INSTR_E }, @@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = { }; static struct s390_insn opcode_a5[] = { -#ifdef CONFIG_64BIT { "iihh", 0x00, INSTR_RI_RU }, { "iihl", 0x01, INSTR_RI_RU }, { "iilh", 0x02, INSTR_RI_RU }, @@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = { { "llihl", 0x0d, INSTR_RI_RU }, { "llilh", 0x0e, INSTR_RI_RU }, { "llill", 0x0f, INSTR_RI_RU }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_a7[] = { -#ifdef CONFIG_64BIT { "tmhh", 0x02, INSTR_RI_RU }, { "tmhl", 0x03, INSTR_RI_RU }, { "brctg", 0x07, INSTR_RI_RP }, @@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = { { "aghi", 0x0b, INSTR_RI_RI }, { "mghi", 0x0d, INSTR_RI_RI }, { "cghi", 0x0f, INSTR_RI_RI }, -#endif { "tmlh", 0x00, INSTR_RI_RU }, { "tmll", 0x01, INSTR_RI_RU }, { "brc", 0x04, INSTR_RI_UP }, @@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = { }; static struct s390_insn opcode_aa[] = { -#ifdef CONFIG_64BIT { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, { "rion", 0x01, INSTR_RI_RI }, { "tric", 0x02, INSTR_RI_RI }, { "rioff", 0x03, INSTR_RI_RI }, { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_b2[] = { -#ifdef CONFIG_64BIT { "stckf", 0x7c, INSTR_S_RD }, { "lpp", 0x80, INSTR_S_RD }, { "lcctl", 0x84, INSTR_S_RD }, @@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = { { "tend", 0xf8, INSTR_S_00 }, { "niai", 0xfa, INSTR_IE_UU }, { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, -#endif { "stidp", 0x02, INSTR_S_RD }, { "sck", 0x04, INSTR_S_RD }, { "stck", 0x05, INSTR_S_RD }, @@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = { }; static struct s390_insn opcode_b3[] = { -#ifdef CONFIG_64BIT { "maylr", 0x38, INSTR_RRF_F0FF }, { "mylr", 0x39, INSTR_RRF_F0FF }, { "mayr", 0x3a, INSTR_RRF_F0FF }, @@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = { { "qaxtr", 0xfd, INSTR_RRF_FUFF }, { "iextr", 0xfe, INSTR_RRF_F0FR }, { "rrxtr", 0xff, INSTR_RRF_FFRU }, -#endif { "lpebr", 0x00, INSTR_RRE_FF }, { "lnebr", 0x01, INSTR_RRE_FF }, { "ltebr", 0x02, INSTR_RRE_FF }, @@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = { }; static struct s390_insn opcode_b9[] = { -#ifdef CONFIG_64BIT { "lpgr", 0x00, INSTR_RRE_RR }, { "lngr", 0x01, INSTR_RRE_RR }, { "ltgr", 0x02, INSTR_RRE_RR }, @@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = { { "srk", 0xf9, INSTR_RRF_R0RR2 }, { "alrk", 0xfa, INSTR_RRF_R0RR2 }, { "slrk", 0xfb, INSTR_RRF_R0RR2 }, -#endif { "kmac", 0x1e, INSTR_RRE_RR }, { "lrvr", 0x1f, INSTR_RRE_RR }, { "km", 0x2e, INSTR_RRE_RR }, @@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = { }; static struct s390_insn opcode_c0[] = { -#ifdef CONFIG_64BIT { "lgfi", 0x01, INSTR_RIL_RI }, { "xihf", 0x06, INSTR_RIL_RU }, { "xilf", 0x07, INSTR_RIL_RU }, @@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = { { "oilf", 0x0d, INSTR_RIL_RU }, { "llihf", 0x0e, INSTR_RIL_RU }, { "llilf", 0x0f, INSTR_RIL_RU }, -#endif { "larl", 0x00, INSTR_RIL_RP }, { "brcl", 0x04, INSTR_RIL_UP }, { "brasl", 0x05, INSTR_RIL_RP }, @@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = { }; static struct s390_insn opcode_c2[] = { -#ifdef CONFIG_64BIT { "msgfi", 0x00, INSTR_RIL_RI }, { "msfi", 0x01, INSTR_RIL_RI }, { "slgfi", 0x04, INSTR_RIL_RU }, @@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = { { "cfi", 0x0d, INSTR_RIL_RI }, { "clgfi", 0x0e, INSTR_RIL_RU }, { "clfi", 0x0f, INSTR_RIL_RU }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_c4[] = { -#ifdef CONFIG_64BIT { "llhrl", 0x02, INSTR_RIL_RP }, { "lghrl", 0x04, INSTR_RIL_RP }, { "lhrl", 0x05, INSTR_RIL_RP }, @@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = { { "lrl", 0x0d, INSTR_RIL_RP }, { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, { "strl", 0x0f, INSTR_RIL_RP }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_c6[] = { -#ifdef CONFIG_64BIT { "exrl", 0x00, INSTR_RIL_RP }, { "pfdrl", 0x02, INSTR_RIL_UP }, { "cghrl", 0x04, INSTR_RIL_RP }, @@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = { { "crl", 0x0d, INSTR_RIL_RP }, { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, { "clrl", 0x0f, INSTR_RIL_RP }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_c8[] = { -#ifdef CONFIG_64BIT { "mvcos", 0x00, INSTR_SSF_RRDRD }, { "ectg", 0x01, INSTR_SSF_RRDRD }, { "csst", 0x02, INSTR_SSF_RRDRD }, { "lpd", 0x04, INSTR_SSF_RRDRD2 }, { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_cc[] = { -#ifdef CONFIG_64BIT { "brcth", 0x06, INSTR_RIL_RP }, { "aih", 0x08, INSTR_RIL_RI }, { "alsih", 0x0a, INSTR_RIL_RI }, { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, { "cih", 0x0d, INSTR_RIL_RI }, { "clih", 0x0f, INSTR_RIL_RI }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_e3[] = { -#ifdef CONFIG_64BIT { "ltg", 0x02, INSTR_RXY_RRRD }, { "lrag", 0x03, INSTR_RXY_RRRD }, { "lg", 0x04, INSTR_RXY_RRRD }, @@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = { { "clhf", 0xcf, INSTR_RXY_RRRD }, { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, -#endif { "lrv", 0x1e, INSTR_RXY_RRRD }, { "lrvh", 0x1f, INSTR_RXY_RRRD }, { "strv", 0x3e, INSTR_RXY_RRRD }, @@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = { }; static struct s390_insn opcode_e5[] = { -#ifdef CONFIG_64BIT { "strag", 0x02, INSTR_SSE_RDRD }, { "mvhhi", 0x44, INSTR_SIL_RDI }, { "mvghi", 0x48, INSTR_SIL_RDI }, @@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = { { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, -#endif { "lasp", 0x00, INSTR_SSE_RDRD }, { "tprot", 0x01, INSTR_SSE_RDRD }, { "mvcsk", 0x0e, INSTR_SSE_RDRD }, @@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = { }; static struct s390_insn opcode_e7[] = { -#ifdef CONFIG_64BIT { "lcbb", 0x27, INSTR_RXE_RRRDM }, { "vgef", 0x13, INSTR_VRV_VVRDM }, { "vgeg", 0x12, INSTR_VRV_VVRDM }, @@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = { { "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vftci", 0x4a, INSTR_VRI_VVIMM }, -#endif }; static struct s390_insn opcode_eb[] = { -#ifdef CONFIG_64BIT { "lmg", 0x04, INSTR_RSY_RRRD }, { "srag", 0x0a, INSTR_RSY_RRRD }, { "slag", 0x0b, INSTR_RSY_RRRD }, @@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = { { "stric", 0x61, INSTR_RSY_RDRM }, { "mric", 0x62, INSTR_RSY_RDRM }, { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, -#endif { "rll", 0x1d, INSTR_RSY_RRRD }, { "mvclu", 0x8e, INSTR_RSY_RRRD }, { "tp", 0xc0, INSTR_RSL_R0RD }, @@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = { }; static struct s390_insn opcode_ec[] = { -#ifdef CONFIG_64BIT { "brxhg", 0x44, INSTR_RIE_RRP }, { "brxlg", 0x45, INSTR_RIE_RRP }, { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, @@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = { { "clgib", 0xfd, INSTR_RIS_RURDU }, { "cib", 0xfe, INSTR_RIS_RURDI }, { "clib", 0xff, INSTR_RIS_RURDU }, -#endif { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_ed[] = { -#ifdef CONFIG_64BIT { "mayl", 0x38, INSTR_RXF_FRRDF }, { "myl", 0x39, INSTR_RXF_FRRDF }, { "may", 0x3a, INSTR_RXF_FRRDF }, @@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = { { "czxt", 0xa9, INSTR_RSL_LRDFU }, { "cdzt", 0xaa, INSTR_RSL_LRDFU }, { "cxzt", 0xab, INSTR_RSL_LRDFU }, -#endif { "ldeb", 0x04, INSTR_RXE_FRRD }, { "lxdb", 0x05, INSTR_RXE_FRRD }, { "lxeb", 0x06, INSTR_RXE_FRRD }, @@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs) else *ptr++ = ' '; addr = regs->psw.addr + start - 32; - ptr += sprintf(ptr, ONELONG, addr); + ptr += sprintf(ptr, "%016lx: ", addr); if (start + opsize >= end) break; for (i = 0; i < opsize; i++) diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index a99852e..dc8e204 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -18,16 +18,6 @@ #include <asm/dis.h> #include <asm/ipl.h> -#ifndef CONFIG_64BIT -#define LONG "%08lx " -#define FOURLONG "%08lx %08lx %08lx %08lx\n" -static int kstack_depth_to_print = 12; -#else /* CONFIG_64BIT */ -#define LONG "%016lx " -#define FOURLONG "%016lx %016lx %016lx %016lx\n" -static int kstack_depth_to_print = 20; -#endif /* CONFIG_64BIT */ - /* * For show_trace we have tree different stack to consider: * - the panic stack which is used if the kernel stack has overflown @@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp) else stack = sp; - for (i = 0; i < kstack_depth_to_print; i++) { + for (i = 0; i < 20; i++) { if (((addr_t) stack & (THREAD_SIZE-1)) == 0) break; if ((i * sizeof(long) % 32) == 0) printk("%s ", i == 0 ? "" : "\n"); - printk(LONG, *stack++); + printk("%016lx ", *stack++); } printk("\n"); show_trace(task, sp); @@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) static void show_last_breaking_event(struct pt_regs *regs) { -#ifdef CONFIG_64BIT printk("Last Breaking-Event-Address:\n"); printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); -#endif } static inline int mask_bits(struct pt_regs *regs, unsigned long bits) @@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs) mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); -#ifdef CONFIG_64BIT printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); -#endif - printk("\n%s GPRS: " FOURLONG, mode, + printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); - printk(" " FOURLONG, + printk(" %016lx %016lx %016lx %016lx\n", regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); - printk(" " FOURLONG, + printk(" %016lx %016lx %016lx %016lx\n", regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); - printk(" " FOURLONG, + printk(" %016lx %016lx %016lx %016lx\n", regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); show_code(regs); } diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 4427ab7..549a73a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -64,7 +64,6 @@ asm( " .align 4\n" " .type savesys_ipl_nss, @function\n" "savesys_ipl_nss:\n" -#ifdef CONFIG_64BIT " stmg 6,15,48(15)\n" " lgr 14,3\n" " sam31\n" @@ -72,13 +71,6 @@ asm( " sam64\n" " lgr 2,14\n" " lmg 6,15,48(15)\n" -#else - " stm 6,15,24(15)\n" - " lr 14,3\n" - " diag 2,14,0x8\n" - " lr 2,14\n" - " lm 6,15,24(15)\n" -#endif " br 14\n" " .size savesys_ipl_nss, .-savesys_ipl_nss\n" " .previous\n"); @@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void) static __init void setup_topology(void) { -#ifdef CONFIG_64BIT int max_mnest; if (!test_facility(11)) @@ -251,7 +242,6 @@ static __init void setup_topology(void) break; } topology_max_mnest = max_mnest; -#endif } static void early_pgm_check_handler(void) @@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void) ARRAY_SIZE(S390_lowcore.stfle_fac_list)); } -static __init void detect_mvpg(void) -{ -#ifndef CONFIG_64BIT - int rc; - - asm volatile( - " la 0,0\n" - " mvpg %2,%2\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); - if (!rc) - S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; -#endif -} - -static __init void detect_ieee(void) -{ -#ifndef CONFIG_64BIT - int rc, tmp; - - asm volatile( - " efpc %1,0\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); - if (!rc) - S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; -#endif -} - -static __init void detect_csp(void) -{ -#ifndef CONFIG_64BIT - int rc; - - asm volatile( - " la 0,0\n" - " la 1,0\n" - " la 2,4\n" - " csp 0,2\n" - "0: la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); - if (!rc) - S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; -#endif -} - static __init void detect_diag9c(void) { unsigned int cpu_address; @@ -360,7 +298,6 @@ static __init void detect_diag9c(void) static __init void detect_diag44(void) { -#ifdef CONFIG_64BIT int rc; asm volatile( @@ -371,12 +308,10 @@ static __init void detect_diag44(void) : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); if (!rc) S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; -#endif } static __init void detect_machine_facilities(void) { -#ifdef CONFIG_64BIT if (test_facility(8)) { S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; __ctl_set_bit(0, 23); @@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) S390_lowcore.machine_flags |= MACHINE_FLAG_VX; -#endif } static int __init cad_setup(char *str) @@ -501,9 +435,6 @@ void __init startup_init(void) ipl_update_parameters(); setup_boot_command_line(); create_kernel_nss(); - detect_mvpg(); - detect_ieee(); - detect_csp(); detect_diag9c(); detect_diag44(); detect_machine_facilities(); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 398329b..99b44ac 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -22,27 +22,28 @@ #include <asm/irq.h> __PT_R0 = __PT_GPRS -__PT_R1 = __PT_GPRS + 4 -__PT_R2 = __PT_GPRS + 8 -__PT_R3 = __PT_GPRS + 12 -__PT_R4 = __PT_GPRS + 16 -__PT_R5 = __PT_GPRS + 20 -__PT_R6 = __PT_GPRS + 24 -__PT_R7 = __PT_GPRS + 28 -__PT_R8 = __PT_GPRS + 32 -__PT_R9 = __PT_GPRS + 36 -__PT_R10 = __PT_GPRS + 40 -__PT_R11 = __PT_GPRS + 44 -__PT_R12 = __PT_GPRS + 48 -__PT_R13 = __PT_GPRS + 524 -__PT_R14 = __PT_GPRS + 56 -__PT_R15 = __PT_GPRS + 60 +__PT_R1 = __PT_GPRS + 8 +__PT_R2 = __PT_GPRS + 16 +__PT_R3 = __PT_GPRS + 24 +__PT_R4 = __PT_GPRS + 32 +__PT_R5 = __PT_GPRS + 40 +__PT_R6 = __PT_GPRS + 48 +__PT_R7 = __PT_GPRS + 56 +__PT_R8 = __PT_GPRS + 64 +__PT_R9 = __PT_GPRS + 72 +__PT_R10 = __PT_GPRS + 80 +__PT_R11 = __PT_GPRS + 88 +__PT_R12 = __PT_GPRS + 96 +__PT_R13 = __PT_GPRS + 104 +__PT_R14 = __PT_GPRS + 112 +__PT_R15 = __PT_GPRS + 120 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT -STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE +STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE -_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) +_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ + _TIF_UPROBE) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) @@ -53,16 +54,14 @@ _PIF_WORK = (_PIF_PER_TRAP) .macro TRACE_IRQS_ON #ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 - l %r1,BASED(.Lc_hardirqs_on) - basr %r14,%r1 # call trace_hardirqs_on_caller + brasl %r14,trace_hardirqs_on_caller #endif .endm .macro TRACE_IRQS_OFF #ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 - l %r1,BASED(.Lc_hardirqs_off) - basr %r14,%r1 # call trace_hardirqs_off_caller + brasl %r14,trace_hardirqs_off_caller #endif .endm @@ -70,73 +69,104 @@ _PIF_WORK = (_PIF_PER_TRAP) #ifdef CONFIG_LOCKDEP tm __PT_PSW+1(%r11),0x01 # returning to user ? jz .+10 - l %r1,BASED(.Lc_lockdep_sys_exit) - basr %r14,%r1 # call lockdep_sys_exit + brasl %r14,lockdep_sys_exit +#endif + .endm + + .macro LPP newpp +#if IS_ENABLED(CONFIG_KVM) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .+8 + .insn s,0xb2800000,\newpp +#endif + .endm + + .macro HANDLE_SIE_INTERCEPT scratch,reason +#if IS_ENABLED(CONFIG_KVM) + tmhh %r8,0x0001 # interrupting from user ? + jnz .+62 + lgr \scratch,%r9 + slg \scratch,BASED(.Lsie_critical) + clg \scratch,BASED(.Lsie_critical_length) + .if \reason==1 + # Some program interrupts are suppressing (e.g. protection). + # We must also check the instruction after SIE in that case. + # do_protection_exception will rewind to .Lrewind_pad + jh .+42 + .else + jhe .+42 + .endif + lg %r14,__SF_EMPTY(%r15) # get control block pointer + LPP __SF_EMPTY+16(%r15) # set host id + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + larl %r9,sie_exit # skip forward to sie_exit + mvi __SF_EMPTY+31(%r15),\reason # set exit reason #endif .endm .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK tml %r15,\stacksize - CONFIG_STACK_GUARD - la %r14,\savearea + lghi %r14,\savearea jz stack_overflow #endif .endm .macro SWITCH_ASYNC savearea,stack,shift - tmh %r8,0x0001 # interrupting from user ? + tmhh %r8,0x0001 # interrupting from user ? jnz 1f - lr %r14,%r9 - sl %r14,BASED(.Lc_critical_start) - cl %r14,BASED(.Lc_critical_length) + lgr %r14,%r9 + slg %r14,BASED(.Lcritical_start) + clg %r14,BASED(.Lcritical_length) jhe 0f - la %r11,\savearea # inside critical section, do cleanup - bras %r14,cleanup_critical - tmh %r8,0x0001 # retest problem state after cleanup + lghi %r11,\savearea # inside critical section, do cleanup + brasl %r14,cleanup_critical + tmhh %r8,0x0001 # retest problem state after cleanup jnz 1f -0: l %r14,\stack # are we already on the target stack? - slr %r14,%r15 - sra %r14,\shift +0: lg %r14,\stack # are we already on the target stack? + slgr %r14,%r15 + srag %r14,%r14,\shift jnz 1f CHECK_STACK 1<<\shift,\savearea - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f -1: l %r15,\stack # load target stack +1: lg %r15,\stack # load target stack 2: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm - .macro ADD64 high,low,timer - al \high,\timer - al \low,4+\timer - brc 12,.+8 - ahi \high,1 - .endm - - .macro SUB64 high,low,timer - sl \high,\timer - sl \low,4+\timer - brc 3,.+8 - ahi \high,-1 + .macro UPDATE_VTIME scratch,enter_timer + lg \scratch,__LC_EXIT_TIMER + slg \scratch,\enter_timer + alg \scratch,__LC_USER_TIMER + stg \scratch,__LC_USER_TIMER + lg \scratch,__LC_LAST_UPDATE_TIMER + slg \scratch,__LC_EXIT_TIMER + alg \scratch,__LC_SYSTEM_TIMER + stg \scratch,__LC_SYSTEM_TIMER + mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm - .macro UPDATE_VTIME high,low,enter_timer - lm \high,\low,__LC_EXIT_TIMER - SUB64 \high,\low,\enter_timer - ADD64 \high,\low,__LC_USER_TIMER - stm \high,\low,__LC_USER_TIMER - lm \high,\low,__LC_LAST_UPDATE_TIMER - SUB64 \high,\low,__LC_EXIT_TIMER - ADD64 \high,\low,__LC_SYSTEM_TIMER - stm \high,\low,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer + .macro LAST_BREAK scratch + srag \scratch,%r10,23 + jz .+10 + stg %r10,__TI_last_break(%r12) .endm .macro REENABLE_IRQS - st %r8,__LC_RETURN_PSW + stg %r8,__LC_RETURN_PSW ni __LC_RETURN_PSW,0xbf ssm __LC_RETURN_PSW .endm + .macro STCK savearea +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + .insn s,0xb27c0000,\savearea # store clock fast +#else + .insn s,0xb2050000,\savearea # store clock +#endif + .endm + .section .kprobes.text, "ax" /* @@ -147,19 +177,19 @@ _PIF_WORK = (_PIF_PER_TRAP) * gpr2 = prev */ ENTRY(__switch_to) - stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - st %r15,__THREAD_ksp(%r2) # store kernel stack of prev - l %r4,__THREAD_info(%r2) # get thread_info of prev - l %r5,__THREAD_info(%r3) # get thread_info of next - lr %r15,%r5 - ahi %r15,STACK_INIT # end of kernel stack of next - st %r3,__LC_CURRENT # store task struct of next - st %r5,__LC_THREAD_INFO # store thread info of next - st %r15,__LC_KERNEL_STACK # store end of kernel stack + stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task + stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev + lg %r4,__THREAD_info(%r2) # get thread_info of prev + lg %r5,__THREAD_info(%r3) # get thread_info of next + lgr %r15,%r5 + aghi %r15,STACK_INIT # end of kernel stack of next + stg %r3,__LC_CURRENT # store task struct of next + stg %r5,__LC_THREAD_INFO # store thread info of next + stg %r15,__LC_KERNEL_STACK # store end of kernel stack lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next - l %r15,__THREAD_ksp(%r3) # load kernel stack of next - lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task + mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next + lg %r15,__THREAD_ksp(%r3) # load kernel stack of next + lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 .L__critical_start: @@ -170,75 +200,83 @@ ENTRY(__switch_to) ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER -.Lsysc_stm: - stm %r8,%r15,__LC_SAVE_AREA_SYNC - l %r12,__LC_THREAD_INFO - l %r13,__LC_SVC_NEW_PSW+4 - lhi %r14,_PIF_SYSCALL +.Lsysc_stmg: + stmg %r8,%r15,__LC_SAVE_AREA_SYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + lghi %r14,_PIF_SYSCALL .Lsysc_per: - l %r15,__LC_KERNEL_STACK + lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs .Lsysc_vtime: - UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER - stm %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC - mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW + UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER + LAST_BREAK %r13 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC + mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC - st %r14,__PT_FLAGS(%r11) + stg %r14,__PT_FLAGS(%r11) .Lsysc_do_svc: - l %r10,__TI_sysc_table(%r12) # 31 bit system call table - lh %r8,__PT_INT_CODE+2(%r11) - sla %r8,2 # shift and test for svc0 + lg %r10,__TI_sysc_table(%r12) # address of system call table + llgh %r8,__PT_INT_CODE+2(%r11) + slag %r8,%r8,2 # shift and test for svc 0 jnz .Lsysc_nr_ok # svc 0: system call number in %r1 - cl %r1,BASED(.Lnr_syscalls) + llgfr %r1,%r1 # clear high word in r1 + cghi %r1,NR_syscalls jnl .Lsysc_nr_ok sth %r1,__PT_INT_CODE+2(%r11) - lr %r8,%r1 - sla %r8,2 + slag %r8,%r1,2 .Lsysc_nr_ok: - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - st %r2,__PT_ORIG_GPR2(%r11) - st %r7,STACK_FRAME_OVERHEAD(%r15) - l %r9,0(%r8,%r10) # get system call addr. - tm __TI_flags+3(%r12),_TIF_TRACE + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stg %r2,__PT_ORIG_GPR2(%r11) + stg %r7,STACK_FRAME_OVERHEAD(%r15) + lgf %r9,0(%r8,%r10) # get system call add. + tm __TI_flags+7(%r12),_TIF_TRACE jnz .Lsysc_tracesys basr %r14,%r9 # call sys_xxxx - st %r2,__PT_R2(%r11) # store return value + stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: LOCKDEP_SYS_EXIT .Lsysc_tif: tm __PT_PSW+1(%r11),0x01 # returning to user ? jno .Lsysc_restore - tm __PT_FLAGS+3(%r11),_PIF_WORK + tm __PT_FLAGS+7(%r11),_PIF_WORK jnz .Lsysc_work - tm __TI_flags+3(%r12),_TIF_WORK - jnz .Lsysc_work # check for thread work - tm __LC_CPU_FLAGS+3,_CIF_WORK + tm __TI_flags+7(%r12),_TIF_WORK + jnz .Lsysc_work # check for work + tm __LC_CPU_FLAGS+7,_CIF_WORK jnz .Lsysc_work .Lsysc_restore: - mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) stpt __LC_EXIT_TIMER - lm %r0,%r15,__PT_R0(%r11) - lpsw __LC_RETURN_PSW + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER + lmg %r11,%r15,__PT_R11(%r11) + lpswe __LC_RETURN_PSW .Lsysc_done: # # One of the work bits is on. Find out which one. # .Lsysc_work: - tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jo .Lsysc_mcck_pending - tm __TI_flags+3(%r12),_TIF_NEED_RESCHED + tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jo .Lsysc_reschedule - tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP +#ifdef CONFIG_UPROBES + tm __TI_flags+7(%r12),_TIF_UPROBE + jo .Lsysc_uprobe_notify +#endif + tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP jo .Lsysc_singlestep - tm __TI_flags+3(%r12),_TIF_SIGPENDING + tm __TI_flags+7(%r12),_TIF_SIGPENDING jo .Lsysc_sigpending - tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME + tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume - tm __LC_CPU_FLAGS+3,_CIF_ASCE + tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lsysc_uaccess j .Lsysc_return # beware of critical section cleanup @@ -246,109 +284,109 @@ ENTRY(system_call) # _TIF_NEED_RESCHED is set, call schedule # .Lsysc_reschedule: - l %r1,BASED(.Lc_schedule) - la %r14,BASED(.Lsysc_return) - br %r1 # call schedule + larl %r14,.Lsysc_return + jg schedule # # _CIF_MCCK_PENDING is set, call handler # .Lsysc_mcck_pending: - l %r1,BASED(.Lc_handle_mcck) - la %r14,BASED(.Lsysc_return) - br %r1 # TIF bit will be cleared by handler + larl %r14,.Lsysc_return + jg s390_handle_mcck # TIF bit will be cleared by handler # # _CIF_ASCE is set, load user space asce # .Lsysc_uaccess: - ni __LC_CPU_FLAGS+3,255-_CIF_ASCE - lctl %c1,%c1,__LC_USER_ASCE # load primary asce + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce j .Lsysc_return # # _TIF_SIGPENDING is set, call do_signal # .Lsysc_sigpending: - lr %r2,%r11 # pass pointer to pt_regs - l %r1,BASED(.Lc_do_signal) - basr %r14,%r1 # call do_signal - tm __PT_FLAGS+3(%r11),_PIF_SYSCALL + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_signal + tm __PT_FLAGS+7(%r11),_PIF_SYSCALL jno .Lsysc_return - lm %r2,%r7,__PT_R2(%r11) # load svc arguments - l %r10,__TI_sysc_table(%r12) # 31 bit system call table - xr %r8,%r8 # svc 0 returns -ENOSYS - clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) + lmg %r2,%r7,__PT_R2(%r11) # load svc arguments + lg %r10,__TI_sysc_table(%r12) # address of system call table + lghi %r8,0 # svc 0 returns -ENOSYS + llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number + cghi %r1,NR_syscalls jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 - lh %r8,__PT_INT_CODE+2(%r11) # load new svc number - sla %r8,2 + slag %r8,%r1,2 j .Lsysc_nr_ok # restart svc # # _TIF_NOTIFY_RESUME is set, call do_notify_resume # .Lsysc_notify_resume: - lr %r2,%r11 # pass pointer to pt_regs - l %r1,BASED(.Lc_do_notify_resume) - la %r14,BASED(.Lsysc_return) - br %r1 # call do_notify_resume + lgr %r2,%r11 # pass pointer to pt_regs + larl %r14,.Lsysc_return + jg do_notify_resume + +# +# _TIF_UPROBE is set, call uprobe_notify_resume +# +#ifdef CONFIG_UPROBES +.Lsysc_uprobe_notify: + lgr %r2,%r11 # pass pointer to pt_regs + larl %r14,.Lsysc_return + jg uprobe_notify_resume +#endif # # _PIF_PER_TRAP is set, call do_per_trap # .Lsysc_singlestep: - ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP - lr %r2,%r11 # pass pointer to pt_regs - l %r1,BASED(.Lc_do_per_trap) - la %r14,BASED(.Lsysc_return) - br %r1 # call do_per_trap + ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP + lgr %r2,%r11 # pass pointer to pt_regs + larl %r14,.Lsysc_return + jg do_per_trap # # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # and after the system call # .Lsysc_tracesys: - l %r1,BASED(.Lc_trace_enter) - lr %r2,%r11 # pass pointer to pt_regs + lgr %r2,%r11 # pass pointer to pt_regs la %r3,0 - xr %r0,%r0 - icm %r0,3,__PT_INT_CODE+2(%r11) - st %r0,__PT_R2(%r11) - basr %r14,%r1 # call do_syscall_trace_enter - cl %r2,BASED(.Lnr_syscalls) - jnl .Lsysc_tracenogo - lr %r8,%r2 - sll %r8,2 - l %r9,0(%r8,%r10) + llgh %r0,__PT_INT_CODE+2(%r11) + stg %r0,__PT_R2(%r11) + brasl %r14,do_syscall_trace_enter + lghi %r0,NR_syscalls + clgr %r0,%r2 + jnh .Lsysc_tracenogo + sllg %r8,%r2,2 + lgf %r9,0(%r8,%r10) .Lsysc_tracego: - lm %r3,%r7,__PT_R3(%r11) - st %r7,STACK_FRAME_OVERHEAD(%r15) - l %r2,__PT_ORIG_GPR2(%r11) + lmg %r3,%r7,__PT_R3(%r11) + stg %r7,STACK_FRAME_OVERHEAD(%r15) + lg %r2,__PT_ORIG_GPR2(%r11) basr %r14,%r9 # call sys_xxx - st %r2,__PT_R2(%r11) # store return value + stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: - tm __TI_flags+3(%r12),_TIF_TRACE + tm __TI_flags+7(%r12),_TIF_TRACE jz .Lsysc_return - l %r1,BASED(.Lc_trace_exit) - lr %r2,%r11 # pass pointer to pt_regs - la %r14,BASED(.Lsysc_return) - br %r1 # call do_syscall_trace_exit + lgr %r2,%r11 # pass pointer to pt_regs + larl %r14,.Lsysc_return + jg do_syscall_trace_exit # # a new process exits the kernel with ret_from_fork # ENTRY(ret_from_fork) la %r11,STACK_FRAME_OVERHEAD(%r15) - l %r12,__LC_THREAD_INFO - l %r13,__LC_SVC_NEW_PSW+4 - l %r1,BASED(.Lc_schedule_tail) - basr %r14,%r1 # call schedule_tail + lg %r12,__LC_THREAD_INFO + brasl %r14,schedule_tail TRACE_IRQS_ON ssm __LC_SVC_NEW_PSW # reenable interrupts tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? jne .Lsysc_tracenogo # it's a kernel thread - lm %r9,%r10,__PT_R9(%r11) # load gprs + lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) basr %r14,%r9 @@ -360,46 +398,54 @@ ENTRY(kernel_thread_starter) ENTRY(pgm_check_handler) stpt __LC_SYNC_ENTER_TIMER - stm %r8,%r15,__LC_SAVE_AREA_SYNC - l %r12,__LC_THREAD_INFO - l %r13,__LC_SVC_NEW_PSW+4 - lm %r8,%r9,__LC_PGM_OLD_PSW - tmh %r8,0x0001 # test problem state bit + stmg %r8,%r15,__LC_SAVE_AREA_SYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_PGM_OLD_PSW + HANDLE_SIE_INTERCEPT %r14,1 + tmhh %r8,0x0001 # test problem state bit jnz 1f # -> fault in user space - tmh %r8,0x4000 # PER bit set in old PSW ? + tmhh %r8,0x4000 # PER bit set in old PSW ? jnz 0f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f -1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER - l %r15,__LC_KERNEL_STACK +1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER + LAST_BREAK %r14 + lg %r15,__LC_KERNEL_STACK + lg %r14,__TI_task(%r12) + lghi %r13,__LC_PGM_TDB + tm __LC_PGM_ILC+2,0x02 # check for transaction abort + jz 2f + mvc __THREAD_trap_tdb(256,%r14),0(%r13) 2: la %r11,STACK_FRAME_OVERHEAD(%r15) - stm %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC - stm %r8,%r9,__PT_PSW(%r11) + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC + stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC - mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) + mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) + stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception jz 0f - l %r1,__TI_task(%r12) - tmh %r8,0x0001 # kernel per event ? + tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe - oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP - mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE - mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID + oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP + mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS + mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE + mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 0: REENABLE_IRQS - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - l %r1,BASED(.Lc_jump_table) - la %r10,0x7f - n %r10,__PT_INT_CODE(%r11) - je .Lsysc_return + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + larl %r1,pgm_check_table + llgh %r10,__PT_INT_CODE+2(%r11) + nill %r10,0x007f sll %r10,2 - l %r1,0(%r10,%r1) # load address of handler routine - lr %r2,%r11 # pass pointer to pt_regs + je .Lsysc_return + lgf %r1,0(%r10,%r1) # load address of handler routine + lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler j .Lsysc_return @@ -408,54 +454,55 @@ ENTRY(pgm_check_handler) # .Lpgm_kprobe: REENABLE_IRQS - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - l %r1,BASED(.Lc_do_per_trap) - lr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # call do_per_trap + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_per_trap j .Lsysc_return # # single stepped system call # .Lpgm_svcper: - mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW - mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) - lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP - lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs + mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW + larl %r14,.Lsysc_per + stg %r14,__LC_RETURN_PSW+8 + lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP + lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs /* * IO interrupt handler routine */ - ENTRY(io_int_handler) - stck __LC_INT_CLOCK + STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER - stm %r8,%r15,__LC_SAVE_AREA_ASYNC - l %r12,__LC_THREAD_INFO - l %r13,__LC_SVC_NEW_PSW+4 - lm %r8,%r9,__LC_IO_OLD_PSW - tmh %r8,0x0001 # interrupting from user ? + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_IO_OLD_PSW + HANDLE_SIE_INTERCEPT %r14,2 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT + tmhh %r8,0x0001 # interrupting from user? jz .Lio_skip - UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER + UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER + LAST_BREAK %r14 .Lio_skip: - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - stm %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC - stm %r8,%r9,__PT_PSW(%r11) + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC + stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) .Lio_loop: - l %r1,BASED(.Lc_do_IRQ) - lr %r2,%r11 # pass pointer to pt_regs - lhi %r3,IO_INTERRUPT + lgr %r2,%r11 # pass pointer to pt_regs + lghi %r3,IO_INTERRUPT tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? jz .Lio_call - lhi %r3,THIN_INTERRUPT + lghi %r3,THIN_INTERRUPT .Lio_call: - basr %r14,%r1 # call do_IRQ - tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR + brasl %r14,do_IRQ + tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR jz .Lio_return tpi 0 jz .Lio_return @@ -465,21 +512,26 @@ ENTRY(io_int_handler) LOCKDEP_SYS_EXIT TRACE_IRQS_ON .Lio_tif: - tm __TI_flags+3(%r12),_TIF_WORK + tm __TI_flags+7(%r12),_TIF_WORK jnz .Lio_work # there is work to do (signals etc.) - tm __LC_CPU_FLAGS+3,_CIF_WORK + tm __LC_CPU_FLAGS+7,_CIF_WORK jnz .Lio_work .Lio_restore: - mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) stpt __LC_EXIT_TIMER - lm %r0,%r15,__PT_R0(%r11) - lpsw __LC_RETURN_PSW + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER + lmg %r11,%r15,__PT_R11(%r11) + lpswe __LC_RETURN_PSW .Lio_done: # # There is work todo, find out in which context we have been interrupted: # 1) if we return to user space we can do all _TIF_WORK work -# 2) if we return to kernel code and preemptive scheduling is enabled check +# 2) if we return to kernel code and kvm is enabled check if we need to +# modify the psw to leave SIE +# 3) if we return to kernel code and preemptive scheduling is enabled check # the preemption counter and if it is zero call preempt_schedule_irq # Before any work can be done, a switch to the kernel stack is required. # @@ -489,21 +541,20 @@ ENTRY(io_int_handler) #ifdef CONFIG_PREEMPT # check for preemptive scheduling icm %r0,15,__TI_precount(%r12) - jnz .Lio_restore # preemption disabled - tm __TI_flags+3(%r12),_TIF_NEED_RESCHED + jnz .Lio_restore # preemption is disabled + tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jno .Lio_restore # switch to kernel stack - l %r1,__PT_R15(%r11) - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + lg %r1,__PT_R15(%r11) + aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) - lr %r15,%r1 + lgr %r15,%r1 # TRACE_IRQS_ON already done at .Lio_return, call # TRACE_IRQS_OFF to keep things symmetrical TRACE_IRQS_OFF - l %r1,BASED(.Lc_preempt_irq) - basr %r14,%r1 # call preempt_schedule_irq + brasl %r14,preempt_schedule_irq j .Lio_return #else j .Lio_restore @@ -513,25 +564,25 @@ ENTRY(io_int_handler) # Need to do work before returning to userspace, switch to kernel stack # .Lio_work_user: - l %r1,__LC_KERNEL_STACK + lg %r1,__LC_KERNEL_STACK mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) la %r11,STACK_FRAME_OVERHEAD(%r1) - lr %r15,%r1 + lgr %r15,%r1 # # One of the work bits is on. Find out which one. # .Lio_work_tif: - tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jo .Lio_mcck_pending - tm __TI_flags+3(%r12),_TIF_NEED_RESCHED + tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jo .Lio_reschedule - tm __TI_flags+3(%r12),_TIF_SIGPENDING + tm __TI_flags+7(%r12),_TIF_SIGPENDING jo .Lio_sigpending - tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME + tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume - tm __LC_CPU_FLAGS+3,_CIF_ASCE + tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lio_uaccess j .Lio_return # beware of critical section cleanup @@ -540,8 +591,7 @@ ENTRY(io_int_handler) # .Lio_mcck_pending: # TRACE_IRQS_ON already done at .Lio_return - l %r1,BASED(.Lc_handle_mcck) - basr %r14,%r1 # TIF bit will be cleared by handler + brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler TRACE_IRQS_OFF j .Lio_return @@ -549,8 +599,8 @@ ENTRY(io_int_handler) # _CIF_ASCE is set, load user space asce # .Lio_uaccess: - ni __LC_CPU_FLAGS+3,255-_CIF_ASCE - lctl %c1,%c1,__LC_USER_ASCE # load primary asce + ni __LC_CPU_FLAGS+7,255-_CIF_ASCE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce j .Lio_return # @@ -558,35 +608,32 @@ ENTRY(io_int_handler) # .Lio_reschedule: # TRACE_IRQS_ON already done at .Lio_return - l %r1,BASED(.Lc_schedule) ssm __LC_SVC_NEW_PSW # reenable interrupts - basr %r14,%r1 # call scheduler + brasl %r14,schedule # call scheduler ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return # -# _TIF_SIGPENDING is set, call do_signal +# _TIF_SIGPENDING or is set, call do_signal # .Lio_sigpending: # TRACE_IRQS_ON already done at .Lio_return - l %r1,BASED(.Lc_do_signal) ssm __LC_SVC_NEW_PSW # reenable interrupts - lr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # call do_signal + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_signal ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return # -# _TIF_SIGPENDING is set, call do_signal +# _TIF_NOTIFY_RESUME or is set, call do_notify_resume # .Lio_notify_resume: # TRACE_IRQS_ON already done at .Lio_return - l %r1,BASED(.Lc_do_notify_resume) ssm __LC_SVC_NEW_PSW # reenable interrupts - lr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # call do_notify_resume + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_notify_resume ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j .Lio_return @@ -594,45 +641,47 @@ ENTRY(io_int_handler) /* * External interrupt handler routine */ - ENTRY(ext_int_handler) - stck __LC_INT_CLOCK + STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER - stm %r8,%r15,__LC_SAVE_AREA_ASYNC - l %r12,__LC_THREAD_INFO - l %r13,__LC_SVC_NEW_PSW+4 - lm %r8,%r9,__LC_EXT_OLD_PSW - tmh %r8,0x0001 # interrupting from user ? + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_EXT_OLD_PSW + HANDLE_SIE_INTERCEPT %r14,3 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT + tmhh %r8,0x0001 # interrupting from user ? jz .Lext_skip - UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER + UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER + LAST_BREAK %r14 .Lext_skip: - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - stm %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC - stm %r8,%r9,__PT_PSW(%r11) + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC + stmg %r8,%r9,__PT_PSW(%r11) + lghi %r1,__LC_EXT_PARAMS2 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) + mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) TRACE_IRQS_OFF - l %r1,BASED(.Lc_do_IRQ) - lr %r2,%r11 # pass pointer to pt_regs - lhi %r3,EXT_INTERRUPT - basr %r14,%r1 # call do_IRQ + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + lghi %r3,EXT_INTERRUPT + brasl %r14,do_IRQ j .Lio_return /* * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. */ ENTRY(psw_idle) - st %r3,__SF_EMPTY(%r15) - basr %r1,0 - la %r1,.Lpsw_idle_lpsw+4-.(%r1) - st %r1,__SF_EMPTY+4(%r15) - oi __SF_EMPTY+4(%r15),0x80 - stck __CLOCK_IDLE_ENTER(%r2) + stg %r3,__SF_EMPTY(%r15) + larl %r1,.Lpsw_idle_lpsw+4 + stg %r1,__SF_EMPTY+8(%r15) + STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: - lpsw __SF_EMPTY(%r15) + lpswe __SF_EMPTY(%r15) br %r14 .Lpsw_idle_end: @@ -641,17 +690,19 @@ ENTRY(psw_idle) /* * Machine check handler routines */ - ENTRY(mcck_int_handler) - stck __LC_MCCK_CLOCK - spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer - lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs - l %r12,__LC_THREAD_INFO - l %r13,__LC_SVC_NEW_PSW+4 - lm %r8,%r9,__LC_MCK_OLD_PSW + STCK __LC_MCCK_CLOCK + la %r1,4095 # revalidate r1 + spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_MCK_OLD_PSW + HANDLE_SIE_INTERCEPT %r14,4 tm __LC_MCCK_CODE,0x80 # system damage? jo .Lmcck_panic # yes -> rest of mcck code invalid - la %r14,__LC_CPU_TIMER_SAVE_AREA + lghi %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? jo 3f @@ -669,76 +720,76 @@ ENTRY(mcck_int_handler) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? jno .Lmcck_panic # no -> skip cleanup critical + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT tm %r8,0x0001 # interrupting from user ? jz .Lmcck_skip - UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER + UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER + LAST_BREAK %r14 .Lmcck_skip: - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - stm %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 - stm %r8,%r9,__PT_PSW(%r11) - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - l %r1,BASED(.Lc_do_machine_check) - lr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # call s390_do_machine_check + lghi %r14,__LC_GPREGS_SAVE_AREA+64 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),0(%r14) + stmg %r8,%r9,__PT_PSW(%r11) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,s390_do_machine_check tm __PT_PSW+1(%r11),0x01 # returning to user ? jno .Lmcck_return - l %r1,__LC_KERNEL_STACK # switch to kernel stack + lg %r1,__LC_KERNEL_STACK # switch to kernel stack mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) - la %r11,STACK_FRAME_OVERHEAD(%r15) - lr %r15,%r1 + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r1) + lgr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING + tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING jno .Lmcck_return TRACE_IRQS_OFF - l %r1,BASED(.Lc_handle_mcck) - basr %r14,%r1 # call s390_handle_mcck + brasl %r14,s390_handle_mcck TRACE_IRQS_ON .Lmcck_return: - mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f - lm %r0,%r15,__PT_R0(%r11) stpt __LC_EXIT_TIMER - lpsw __LC_RETURN_MCCK_PSW -0: lm %r0,%r15,__PT_R0(%r11) - lpsw __LC_RETURN_MCCK_PSW + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER +0: lmg %r11,%r15,__PT_R11(%r11) + lpswe __LC_RETURN_MCCK_PSW .Lmcck_panic: - l %r14,__LC_PANIC_STACK - slr %r14,%r15 - sra %r14,PAGE_SHIFT + lg %r14,__LC_PANIC_STACK + slgr %r14,%r15 + srag %r14,%r14,PAGE_SHIFT jz 0f - l %r15,__LC_PANIC_STACK - j .Lmcck_skip -0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + lg %r15,__LC_PANIC_STACK +0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j .Lmcck_skip # # PSW restart interrupt handler # ENTRY(restart_int_handler) - st %r15,__LC_SAVE_AREA_RESTART - l %r15,__LC_RESTART_STACK - ahi %r15,-__PT_SIZE # create pt_regs on stack + stg %r15,__LC_SAVE_AREA_RESTART + lg %r15,__LC_RESTART_STACK + aghi %r15,-__PT_SIZE # create pt_regs on stack xc 0(__PT_SIZE,%r15),0(%r15) - stm %r0,%r14,__PT_R0(%r15) - mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART - mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw - ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack + stmg %r0,%r14,__PT_R0(%r15) + mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART + mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw + aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) - l %r1,__LC_RESTART_FN # load fn, parm & source cpu - l %r2,__LC_RESTART_DATA - l %r3,__LC_RESTART_SOURCE - ltr %r3,%r3 # test source cpu address + lg %r1,__LC_RESTART_FN # load fn, parm & source cpu + lg %r2,__LC_RESTART_DATA + lg %r3,__LC_RESTART_SOURCE + ltgr %r3,%r3 # test source cpu address jm 1f # negative -> skip source stop 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu brc 10,0b # wait for status stored 1: basr %r14,%r1 # call function stap __SF_EMPTY(%r15) # store cpu address - lh %r3,__SF_EMPTY(%r15) + llgh %r3,__SF_EMPTY(%r15) 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu brc 2,2b 3: j 3b @@ -752,215 +803,257 @@ ENTRY(restart_int_handler) * Setup a pt_regs so that show_trace can provide a good call trace. */ stack_overflow: - l %r15,__LC_PANIC_STACK # change to panic stack + lg %r15,__LC_PANIC_STACK # change to panic stack la %r11,STACK_FRAME_OVERHEAD(%r15) - stm %r0,%r7,__PT_R0(%r11) - stm %r8,%r9,__PT_PSW(%r11) - mvc __PT_R8(32,%r11),0(%r14) - l %r1,BASED(1f) - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - lr %r2,%r11 # pass pointer to pt_regs - br %r1 # branch to kernel_stack_overflow -1: .long kernel_stack_overflow + stmg %r0,%r7,__PT_R0(%r11) + stmg %r8,%r9,__PT_PSW(%r11) + mvc __PT_R8(64,%r11),0(%r14) + stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + jg kernel_stack_overflow #endif + .align 8 .Lcleanup_table: - .long system_call + 0x80000000 - .long .Lsysc_do_svc + 0x80000000 - .long .Lsysc_tif + 0x80000000 - .long .Lsysc_restore + 0x80000000 - .long .Lsysc_done + 0x80000000 - .long .Lio_tif + 0x80000000 - .long .Lio_restore + 0x80000000 - .long .Lio_done + 0x80000000 - .long psw_idle + 0x80000000 - .long .Lpsw_idle_end + 0x80000000 + .quad system_call + .quad .Lsysc_do_svc + .quad .Lsysc_tif + .quad .Lsysc_restore + .quad .Lsysc_done + .quad .Lio_tif + .quad .Lio_restore + .quad .Lio_done + .quad psw_idle + .quad .Lpsw_idle_end cleanup_critical: - cl %r9,BASED(.Lcleanup_table) # system_call + clg %r9,BASED(.Lcleanup_table) # system_call jl 0f - cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc jl .Lcleanup_system_call - cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif jl 0f - cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore + clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore jl .Lcleanup_sysc_tif - cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done + clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done jl .Lcleanup_sysc_restore - cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif + clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif jl 0f - cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore + clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore jl .Lcleanup_io_tif - cl %r9,BASED(.Lcleanup_table+28) # .Lio_done + clg %r9,BASED(.Lcleanup_table+56) # .Lio_done jl .Lcleanup_io_restore - cl %r9,BASED(.Lcleanup_table+32) # psw_idle + clg %r9,BASED(.Lcleanup_table+64) # psw_idle jl 0f - cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end + clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end jl .Lcleanup_idle 0: br %r14 + .Lcleanup_system_call: # check if stpt has been executed - cl %r9,BASED(.Lcleanup_system_call_insn) + clg %r9,BASED(.Lcleanup_system_call_insn) jh 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER - chi %r11,__LC_SAVE_AREA_ASYNC + cghi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER -0: # check if stm has been executed - cl %r9,BASED(.Lcleanup_system_call_insn+4) +0: # check if stmg has been executed + clg %r9,BASED(.Lcleanup_system_call_insn+8) jh 0f - mvc __LC_SAVE_AREA_SYNC(32),0(%r11) -0: # set up saved registers r12, and r13 - st %r12,16(%r11) # r12 thread-info pointer - st %r13,20(%r11) # r13 literal-pool pointer - # check if the user time calculation has been done - cl %r9,BASED(.Lcleanup_system_call_insn+8) + mvc __LC_SAVE_AREA_SYNC(64),0(%r11) +0: # check if base register setup + TIF bit load has been done + clg %r9,BASED(.Lcleanup_system_call_insn+16) + jhe 0f + # set up saved registers r10 and r12 + stg %r10,16(%r11) # r10 last break + stg %r12,32(%r11) # r12 thread-info pointer +0: # check if the user time update has been done + clg %r9,BASED(.Lcleanup_system_call_insn+24) jh 0f - l %r10,__LC_EXIT_TIMER - l %r15,__LC_EXIT_TIMER+4 - SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER - ADD64 %r10,%r15,__LC_USER_TIMER - st %r10,__LC_USER_TIMER - st %r15,__LC_USER_TIMER+4 -0: # check if the system time calculation has been done - cl %r9,BASED(.Lcleanup_system_call_insn+12) + lg %r15,__LC_EXIT_TIMER + slg %r15,__LC_SYNC_ENTER_TIMER + alg %r15,__LC_USER_TIMER + stg %r15,__LC_USER_TIMER +0: # check if the system time update has been done + clg %r9,BASED(.Lcleanup_system_call_insn+32) jh 0f - l %r10,__LC_LAST_UPDATE_TIMER - l %r15,__LC_LAST_UPDATE_TIMER+4 - SUB64 %r10,%r15,__LC_EXIT_TIMER - ADD64 %r10,%r15,__LC_SYSTEM_TIMER - st %r10,__LC_SYSTEM_TIMER - st %r15,__LC_SYSTEM_TIMER+4 + lg %r15,__LC_LAST_UPDATE_TIMER + slg %r15,__LC_EXIT_TIMER + alg %r15,__LC_SYSTEM_TIMER + stg %r15,__LC_SYSTEM_TIMER 0: # update accounting time stamp mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - # set up saved register 11 - l %r15,__LC_KERNEL_STACK + # do LAST_BREAK + lg %r9,16(%r11) + srag %r9,%r9,23 + jz 0f + mvc __TI_last_break(8,%r12),16(%r11) +0: # set up saved register r11 + lg %r15,__LC_KERNEL_STACK la %r9,STACK_FRAME_OVERHEAD(%r15) - st %r9,12(%r11) # r11 pt_regs pointer + stg %r9,24(%r11) # r11 pt_regs pointer # fill pt_regs - mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC - stm %r0,%r7,__PT_R0(%r9) - mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW + mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC + stmg %r0,%r7,__PT_R0(%r9) + mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC - xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) - mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL - # setup saved register 15 - st %r15,28(%r11) # r15 stack pointer + xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) + mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL + # setup saved register r15 + stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit - l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 + larl %r9,.Lsysc_do_svc br %r14 .Lcleanup_system_call_insn: - .long system_call + 0x80000000 - .long .Lsysc_stm + 0x80000000 - .long .Lsysc_vtime + 0x80000000 + 36 - .long .Lsysc_vtime + 0x80000000 + 76 + .quad system_call + .quad .Lsysc_stmg + .quad .Lsysc_per + .quad .Lsysc_vtime+18 + .quad .Lsysc_vtime+42 .Lcleanup_sysc_tif: - l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 + larl %r9,.Lsysc_tif br %r14 .Lcleanup_sysc_restore: - cl %r9,BASED(.Lcleanup_sysc_restore_insn) - jhe 0f - l %r9,12(%r11) # get saved pointer to pt_regs - mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) - mvc 0(32,%r11),__PT_R8(%r9) - lm %r0,%r7,__PT_R0(%r9) -0: lm %r8,%r9,__LC_RETURN_PSW + clg %r9,BASED(.Lcleanup_sysc_restore_insn) + je 0f + lg %r9,24(%r11) # get saved pointer to pt_regs + mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) + mvc 0(64,%r11),__PT_R8(%r9) + lmg %r0,%r7,__PT_R0(%r9) +0: lmg %r8,%r9,__LC_RETURN_PSW br %r14 .Lcleanup_sysc_restore_insn: - .long .Lsysc_done - 4 + 0x80000000 + .quad .Lsysc_done - 4 .Lcleanup_io_tif: - l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 + larl %r9,.Lio_tif br %r14 .Lcleanup_io_restore: - cl %r9,BASED(.Lcleanup_io_restore_insn) - jhe 0f - l %r9,12(%r11) # get saved r11 pointer to pt_regs - mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) - mvc 0(32,%r11),__PT_R8(%r9) - lm %r0,%r7,__PT_R0(%r9) -0: lm %r8,%r9,__LC_RETURN_PSW + clg %r9,BASED(.Lcleanup_io_restore_insn) + je 0f + lg %r9,24(%r11) # get saved r11 pointer to pt_regs + mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) + mvc 0(64,%r11),__PT_R8(%r9) + lmg %r0,%r7,__PT_R0(%r9) +0: lmg %r8,%r9,__LC_RETURN_PSW br %r14 .Lcleanup_io_restore_insn: - .long .Lio_done - 4 + 0x80000000 + .quad .Lio_done - 4 .Lcleanup_idle: # copy interrupt clock & cpu timer mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER - chi %r11,__LC_SAVE_AREA_ASYNC + cghi %r11,__LC_SAVE_AREA_ASYNC je 0f mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER -0: # check if stck has been executed - cl %r9,BASED(.Lcleanup_idle_insn) +0: # check if stck & stpt have been executed + clg %r9,BASED(.Lcleanup_idle_insn) jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) - mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) + mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 1: # account system time going idle - lm %r9,%r10,__LC_STEAL_TIMER - ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) - SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK - stm %r9,%r10,__LC_STEAL_TIMER + lg %r9,__LC_STEAL_TIMER + alg %r9,__CLOCK_IDLE_ENTER(%r2) + slg %r9,__LC_LAST_UPDATE_CLOCK + stg %r9,__LC_STEAL_TIMER mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) - lm %r9,%r10,__LC_SYSTEM_TIMER - ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER - SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) - stm %r9,%r10,__LC_SYSTEM_TIMER + lg %r9,__LC_SYSTEM_TIMER + alg %r9,__LC_LAST_UPDATE_TIMER + slg %r9,__TIMER_IDLE_ENTER(%r2) + stg %r9,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) # prepare return psw - n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits - l %r9,24(%r11) # return from psw_idle + nihh %r8,0xfcfd # clear irq & wait state bits + lg %r9,48(%r11) # return from psw_idle br %r14 .Lcleanup_idle_insn: - .long .Lpsw_idle_lpsw + 0x80000000 -.Lcleanup_idle_wait: - .long 0xfcfdffff + .quad .Lpsw_idle_lpsw /* * Integer constants */ - .align 4 -.Lnr_syscalls: - .long NR_syscalls -.Lvtimer_max: - .quad 0x7fffffffffffffff + .align 8 +.Lcritical_start: + .quad .L__critical_start +.Lcritical_length: + .quad .L__critical_end - .L__critical_start + +#if IS_ENABLED(CONFIG_KVM) /* - * Symbol constants + * sie64a calling convention: + * %r2 pointer to sie control block + * %r3 guest register save area */ -.Lc_do_machine_check: .long s390_do_machine_check -.Lc_handle_mcck: .long s390_handle_mcck -.Lc_do_IRQ: .long do_IRQ -.Lc_do_signal: .long do_signal -.Lc_do_notify_resume: .long do_notify_resume -.Lc_do_per_trap: .long do_per_trap -.Lc_jump_table: .long pgm_check_table -.Lc_schedule: .long schedule -#ifdef CONFIG_PREEMPT -.Lc_preempt_irq: .long preempt_schedule_irq -#endif -.Lc_trace_enter: .long do_syscall_trace_enter -.Lc_trace_exit: .long do_syscall_trace_exit -.Lc_schedule_tail: .long schedule_tail -.Lc_sysc_per: .long .Lsysc_per + 0x80000000 -#ifdef CONFIG_TRACE_IRQFLAGS -.Lc_hardirqs_on: .long trace_hardirqs_on_caller -.Lc_hardirqs_off: .long trace_hardirqs_off_caller -#endif -#ifdef CONFIG_LOCKDEP -.Lc_lockdep_sys_exit: .long lockdep_sys_exit +ENTRY(sie64a) + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + stg %r2,__SF_EMPTY(%r15) # save control block pointer + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason + lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lg %r14,__LC_GMAP # get gmap pointer + ltgr %r14,%r14 + jz .Lsie_gmap + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce +.Lsie_gmap: + lg %r14,__SF_EMPTY(%r15) # get control block pointer + oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now + tm __SIE_PROG20+3(%r14),1 # last exit... + jnz .Lsie_done + LPP __SF_EMPTY(%r15) # set guest id + sie 0(%r14) +.Lsie_done: + LPP __SF_EMPTY+16(%r15) # set host id + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce +# some program checks are suppressing. C code (e.g. do_protection_exception) +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other +# instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# See also HANDLE_SIE_INTERCEPT +.Lrewind_pad: + nop 0 + .globl sie_exit +sie_exit: + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lg %r2,__SF_EMPTY+24(%r15) # return exit reason code + br %r14 +.Lsie_fault: + lghi %r14,-EFAULT + stg %r14,__SF_EMPTY+24(%r15) # set exit reason code + j sie_exit + + .align 8 +.Lsie_critical: + .quad .Lsie_gmap +.Lsie_critical_length: + .quad .Lsie_done - .Lsie_gmap + + EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(sie_exit,.Lsie_fault) #endif -.Lc_critical_start: .long .L__critical_start + 0x80000000 -.Lc_critical_length: .long .L__critical_end - .L__critical_start - .section .rodata, "a" -#define SYSCALL(esa,esame,emu) .long esa + .section .rodata, "a" +#define SYSCALL(esame,emu) .long esame .globl sys_call_table sys_call_table: #include "syscalls.S" #undef SYSCALL + +#ifdef CONFIG_COMPAT + +#define SYSCALL(esame,emu) .long emu + .globl sys_call_table_emu +sys_call_table_emu: +#include "syscalls.S" +#undef SYSCALL +#endif diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S deleted file mode 100644 index c329446..0000000 --- a/arch/s390/kernel/entry64.S +++ /dev/null @@ -1,1059 +0,0 @@ -/* - * S390 low-level entry points. - * - * Copyright IBM Corp. 1999, 2012 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * Hartmut Penner (hp@de.ibm.com), - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Heiko Carstens <heiko.carstens@de.ibm.com> - */ - -#include <linux/init.h> -#include <linux/linkage.h> -#include <asm/processor.h> -#include <asm/cache.h> -#include <asm/errno.h> -#include <asm/ptrace.h> -#include <asm/thread_info.h> -#include <asm/asm-offsets.h> -#include <asm/unistd.h> -#include <asm/page.h> -#include <asm/sigp.h> -#include <asm/irq.h> - -__PT_R0 = __PT_GPRS -__PT_R1 = __PT_GPRS + 8 -__PT_R2 = __PT_GPRS + 16 -__PT_R3 = __PT_GPRS + 24 -__PT_R4 = __PT_GPRS + 32 -__PT_R5 = __PT_GPRS + 40 -__PT_R6 = __PT_GPRS + 48 -__PT_R7 = __PT_GPRS + 56 -__PT_R8 = __PT_GPRS + 64 -__PT_R9 = __PT_GPRS + 72 -__PT_R10 = __PT_GPRS + 80 -__PT_R11 = __PT_GPRS + 88 -__PT_R12 = __PT_GPRS + 96 -__PT_R13 = __PT_GPRS + 104 -__PT_R14 = __PT_GPRS + 112 -__PT_R15 = __PT_GPRS + 120 - -STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER -STACK_SIZE = 1 << STACK_SHIFT -STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE - -_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_UPROBE) -_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_TRACEPOINT) -_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) -_PIF_WORK = (_PIF_PER_TRAP) - -#define BASED(name) name-system_call(%r13) - - .macro TRACE_IRQS_ON -#ifdef CONFIG_TRACE_IRQFLAGS - basr %r2,%r0 - brasl %r14,trace_hardirqs_on_caller -#endif - .endm - - .macro TRACE_IRQS_OFF -#ifdef CONFIG_TRACE_IRQFLAGS - basr %r2,%r0 - brasl %r14,trace_hardirqs_off_caller -#endif - .endm - - .macro LOCKDEP_SYS_EXIT -#ifdef CONFIG_LOCKDEP - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jz .+10 - brasl %r14,lockdep_sys_exit -#endif - .endm - - .macro LPP newpp -#if IS_ENABLED(CONFIG_KVM) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz .+8 - .insn s,0xb2800000,\newpp -#endif - .endm - - .macro HANDLE_SIE_INTERCEPT scratch,reason -#if IS_ENABLED(CONFIG_KVM) - tmhh %r8,0x0001 # interrupting from user ? - jnz .+62 - lgr \scratch,%r9 - slg \scratch,BASED(.Lsie_critical) - clg \scratch,BASED(.Lsie_critical_length) - .if \reason==1 - # Some program interrupts are suppressing (e.g. protection). - # We must also check the instruction after SIE in that case. - # do_protection_exception will rewind to .Lrewind_pad - jh .+42 - .else - jhe .+42 - .endif - lg %r14,__SF_EMPTY(%r15) # get control block pointer - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - larl %r9,sie_exit # skip forward to sie_exit - mvi __SF_EMPTY+31(%r15),\reason # set exit reason -#endif - .endm - - .macro CHECK_STACK stacksize,savearea -#ifdef CONFIG_CHECK_STACK - tml %r15,\stacksize - CONFIG_STACK_GUARD - lghi %r14,\savearea - jz stack_overflow -#endif - .endm - - .macro SWITCH_ASYNC savearea,stack,shift - tmhh %r8,0x0001 # interrupting from user ? - jnz 1f - lgr %r14,%r9 - slg %r14,BASED(.Lcritical_start) - clg %r14,BASED(.Lcritical_length) - jhe 0f - lghi %r11,\savearea # inside critical section, do cleanup - brasl %r14,cleanup_critical - tmhh %r8,0x0001 # retest problem state after cleanup - jnz 1f -0: lg %r14,\stack # are we already on the target stack? - slgr %r14,%r15 - srag %r14,%r14,\shift - jnz 1f - CHECK_STACK 1<<\shift,\savearea - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: lg %r15,\stack # load target stack -2: la %r11,STACK_FRAME_OVERHEAD(%r15) - .endm - - .macro UPDATE_VTIME scratch,enter_timer - lg \scratch,__LC_EXIT_TIMER - slg \scratch,\enter_timer - alg \scratch,__LC_USER_TIMER - stg \scratch,__LC_USER_TIMER - lg \scratch,__LC_LAST_UPDATE_TIMER - slg \scratch,__LC_EXIT_TIMER - alg \scratch,__LC_SYSTEM_TIMER - stg \scratch,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer - .endm - - .macro LAST_BREAK scratch - srag \scratch,%r10,23 - jz .+10 - stg %r10,__TI_last_break(%r12) - .endm - - .macro REENABLE_IRQS - stg %r8,__LC_RETURN_PSW - ni __LC_RETURN_PSW,0xbf - ssm __LC_RETURN_PSW - .endm - - .macro STCK savearea -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - .insn s,0xb27c0000,\savearea # store clock fast -#else - .insn s,0xb2050000,\savearea # store clock -#endif - .endm - - .section .kprobes.text, "ax" - -/* - * Scheduler resume function, called by switch_to - * gpr2 = (task_struct *) prev - * gpr3 = (task_struct *) next - * Returns: - * gpr2 = prev - */ -ENTRY(__switch_to) - stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev - lg %r4,__THREAD_info(%r2) # get thread_info of prev - lg %r5,__THREAD_info(%r3) # get thread_info of next - lgr %r15,%r5 - aghi %r15,STACK_INIT # end of kernel stack of next - stg %r3,__LC_CURRENT # store task struct of next - stg %r5,__LC_THREAD_INFO # store thread info of next - stg %r15,__LC_KERNEL_STACK # store end of kernel stack - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next - lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task - br %r14 - -.L__critical_start: -/* - * SVC interrupt handler routine. System calls are synchronous events and - * are executed with interrupts enabled. - */ - -ENTRY(system_call) - stpt __LC_SYNC_ENTER_TIMER -.Lsysc_stmg: - stmg %r8,%r15,__LC_SAVE_AREA_SYNC - lg %r10,__LC_LAST_BREAK - lg %r12,__LC_THREAD_INFO - lghi %r14,_PIF_SYSCALL -.Lsysc_per: - lg %r15,__LC_KERNEL_STACK - la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs -.Lsysc_vtime: - UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER - LAST_BREAK %r13 - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC - mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC - stg %r14,__PT_FLAGS(%r11) -.Lsysc_do_svc: - lg %r10,__TI_sysc_table(%r12) # address of system call table - llgh %r8,__PT_INT_CODE+2(%r11) - slag %r8,%r8,2 # shift and test for svc 0 - jnz .Lsysc_nr_ok - # svc 0: system call number in %r1 - llgfr %r1,%r1 # clear high word in r1 - cghi %r1,NR_syscalls - jnl .Lsysc_nr_ok - sth %r1,__PT_INT_CODE+2(%r11) - slag %r8,%r1,2 -.Lsysc_nr_ok: - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - stg %r2,__PT_ORIG_GPR2(%r11) - stg %r7,STACK_FRAME_OVERHEAD(%r15) - lgf %r9,0(%r8,%r10) # get system call add. - tm __TI_flags+7(%r12),_TIF_TRACE - jnz .Lsysc_tracesys - basr %r14,%r9 # call sys_xxxx - stg %r2,__PT_R2(%r11) # store return value - -.Lsysc_return: - LOCKDEP_SYS_EXIT -.Lsysc_tif: - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lsysc_restore - tm __PT_FLAGS+7(%r11),_PIF_WORK - jnz .Lsysc_work - tm __TI_flags+7(%r12),_TIF_WORK - jnz .Lsysc_work # check for work - tm __LC_CPU_FLAGS+7,_CIF_WORK - jnz .Lsysc_work -.Lsysc_restore: - lg %r14,__LC_VDSO_PER_CPU - lmg %r0,%r10,__PT_R0(%r11) - mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) - stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER - lmg %r11,%r15,__PT_R11(%r11) - lpswe __LC_RETURN_PSW -.Lsysc_done: - -# -# One of the work bits is on. Find out which one. -# -.Lsysc_work: - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING - jo .Lsysc_mcck_pending - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED - jo .Lsysc_reschedule -#ifdef CONFIG_UPROBES - tm __TI_flags+7(%r12),_TIF_UPROBE - jo .Lsysc_uprobe_notify -#endif - tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP - jo .Lsysc_singlestep - tm __TI_flags+7(%r12),_TIF_SIGPENDING - jo .Lsysc_sigpending - tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME - jo .Lsysc_notify_resume - tm __LC_CPU_FLAGS+7,_CIF_ASCE - jo .Lsysc_uaccess - j .Lsysc_return # beware of critical section cleanup - -# -# _TIF_NEED_RESCHED is set, call schedule -# -.Lsysc_reschedule: - larl %r14,.Lsysc_return - jg schedule - -# -# _CIF_MCCK_PENDING is set, call handler -# -.Lsysc_mcck_pending: - larl %r14,.Lsysc_return - jg s390_handle_mcck # TIF bit will be cleared by handler - -# -# _CIF_ASCE is set, load user space asce -# -.Lsysc_uaccess: - ni __LC_CPU_FLAGS+7,255-_CIF_ASCE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - j .Lsysc_return - -# -# _TIF_SIGPENDING is set, call do_signal -# -.Lsysc_sigpending: - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_signal - tm __PT_FLAGS+7(%r11),_PIF_SYSCALL - jno .Lsysc_return - lmg %r2,%r7,__PT_R2(%r11) # load svc arguments - lg %r10,__TI_sysc_table(%r12) # address of system call table - lghi %r8,0 # svc 0 returns -ENOSYS - llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number - cghi %r1,NR_syscalls - jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 - slag %r8,%r1,2 - j .Lsysc_nr_ok # restart svc - -# -# _TIF_NOTIFY_RESUME is set, call do_notify_resume -# -.Lsysc_notify_resume: - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg do_notify_resume - -# -# _TIF_UPROBE is set, call uprobe_notify_resume -# -#ifdef CONFIG_UPROBES -.Lsysc_uprobe_notify: - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg uprobe_notify_resume -#endif - -# -# _PIF_PER_TRAP is set, call do_per_trap -# -.Lsysc_singlestep: - ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg do_per_trap - -# -# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before -# and after the system call -# -.Lsysc_tracesys: - lgr %r2,%r11 # pass pointer to pt_regs - la %r3,0 - llgh %r0,__PT_INT_CODE+2(%r11) - stg %r0,__PT_R2(%r11) - brasl %r14,do_syscall_trace_enter - lghi %r0,NR_syscalls - clgr %r0,%r2 - jnh .Lsysc_tracenogo - sllg %r8,%r2,2 - lgf %r9,0(%r8,%r10) -.Lsysc_tracego: - lmg %r3,%r7,__PT_R3(%r11) - stg %r7,STACK_FRAME_OVERHEAD(%r15) - lg %r2,__PT_ORIG_GPR2(%r11) - basr %r14,%r9 # call sys_xxx - stg %r2,__PT_R2(%r11) # store return value -.Lsysc_tracenogo: - tm __TI_flags+7(%r12),_TIF_TRACE - jz .Lsysc_return - lgr %r2,%r11 # pass pointer to pt_regs - larl %r14,.Lsysc_return - jg do_syscall_trace_exit - -# -# a new process exits the kernel with ret_from_fork -# -ENTRY(ret_from_fork) - la %r11,STACK_FRAME_OVERHEAD(%r15) - lg %r12,__LC_THREAD_INFO - brasl %r14,schedule_tail - TRACE_IRQS_ON - ssm __LC_SVC_NEW_PSW # reenable interrupts - tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? - jne .Lsysc_tracenogo - # it's a kernel thread - lmg %r9,%r10,__PT_R9(%r11) # load gprs -ENTRY(kernel_thread_starter) - la %r2,0(%r10) - basr %r14,%r9 - j .Lsysc_tracenogo - -/* - * Program check handler routine - */ - -ENTRY(pgm_check_handler) - stpt __LC_SYNC_ENTER_TIMER - stmg %r8,%r15,__LC_SAVE_AREA_SYNC - lg %r10,__LC_LAST_BREAK - lg %r12,__LC_THREAD_INFO - larl %r13,system_call - lmg %r8,%r9,__LC_PGM_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,1 - tmhh %r8,0x0001 # test problem state bit - jnz 1f # -> fault in user space - tmhh %r8,0x4000 # PER bit set in old PSW ? - jnz 0f # -> enabled, can't be a double fault - tm __LC_PGM_ILC+3,0x80 # check for per exception - jnz .Lpgm_svcper # -> single stepped svc -0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC - aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER - LAST_BREAK %r14 - lg %r15,__LC_KERNEL_STACK - lg %r14,__TI_task(%r12) - lghi %r13,__LC_PGM_TDB - tm __LC_PGM_ILC+2,0x02 # check for transaction abort - jz 2f - mvc __THREAD_trap_tdb(256,%r14),0(%r13) -2: la %r11,STACK_FRAME_OVERHEAD(%r15) - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC - stmg %r8,%r9,__PT_PSW(%r11) - mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC - mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - stg %r10,__PT_ARGS(%r11) - tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 0f - tmhh %r8,0x0001 # kernel per event ? - jz .Lpgm_kprobe - oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP - mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS - mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE - mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -0: REENABLE_IRQS - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - larl %r1,pgm_check_table - llgh %r10,__PT_INT_CODE+2(%r11) - nill %r10,0x007f - sll %r10,2 - je .Lsysc_return - lgf %r1,0(%r10,%r1) # load address of handler routine - lgr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # branch to interrupt-handler - j .Lsysc_return - -# -# PER event in supervisor state, must be kprobes -# -.Lpgm_kprobe: - REENABLE_IRQS - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_per_trap - j .Lsysc_return - -# -# single stepped system call -# -.Lpgm_svcper: - mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW - larl %r14,.Lsysc_per - stg %r14,__LC_RETURN_PSW+8 - lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP - lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs - -/* - * IO interrupt handler routine - */ -ENTRY(io_int_handler) - STCK __LC_INT_CLOCK - stpt __LC_ASYNC_ENTER_TIMER - stmg %r8,%r15,__LC_SAVE_AREA_ASYNC - lg %r10,__LC_LAST_BREAK - lg %r12,__LC_THREAD_INFO - larl %r13,system_call - lmg %r8,%r9,__LC_IO_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,2 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user? - jz .Lio_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lio_skip: - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC - stmg %r8,%r9,__PT_PSW(%r11) - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - TRACE_IRQS_OFF - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) -.Lio_loop: - lgr %r2,%r11 # pass pointer to pt_regs - lghi %r3,IO_INTERRUPT - tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? - jz .Lio_call - lghi %r3,THIN_INTERRUPT -.Lio_call: - brasl %r14,do_IRQ - tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR - jz .Lio_return - tpi 0 - jz .Lio_return - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID - j .Lio_loop -.Lio_return: - LOCKDEP_SYS_EXIT - TRACE_IRQS_ON -.Lio_tif: - tm __TI_flags+7(%r12),_TIF_WORK - jnz .Lio_work # there is work to do (signals etc.) - tm __LC_CPU_FLAGS+7,_CIF_WORK - jnz .Lio_work -.Lio_restore: - lg %r14,__LC_VDSO_PER_CPU - lmg %r0,%r10,__PT_R0(%r11) - mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) - stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER - lmg %r11,%r15,__PT_R11(%r11) - lpswe __LC_RETURN_PSW -.Lio_done: - -# -# There is work todo, find out in which context we have been interrupted: -# 1) if we return to user space we can do all _TIF_WORK work -# 2) if we return to kernel code and kvm is enabled check if we need to -# modify the psw to leave SIE -# 3) if we return to kernel code and preemptive scheduling is enabled check -# the preemption counter and if it is zero call preempt_schedule_irq -# Before any work can be done, a switch to the kernel stack is required. -# -.Lio_work: - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jo .Lio_work_user # yes -> do resched & signal -#ifdef CONFIG_PREEMPT - # check for preemptive scheduling - icm %r0,15,__TI_precount(%r12) - jnz .Lio_restore # preemption is disabled - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED - jno .Lio_restore - # switch to kernel stack - lg %r1,__PT_R15(%r11) - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) - la %r11,STACK_FRAME_OVERHEAD(%r1) - lgr %r15,%r1 - # TRACE_IRQS_ON already done at .Lio_return, call - # TRACE_IRQS_OFF to keep things symmetrical - TRACE_IRQS_OFF - brasl %r14,preempt_schedule_irq - j .Lio_return -#else - j .Lio_restore -#endif - -# -# Need to do work before returning to userspace, switch to kernel stack -# -.Lio_work_user: - lg %r1,__LC_KERNEL_STACK - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) - la %r11,STACK_FRAME_OVERHEAD(%r1) - lgr %r15,%r1 - -# -# One of the work bits is on. Find out which one. -# -.Lio_work_tif: - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING - jo .Lio_mcck_pending - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED - jo .Lio_reschedule - tm __TI_flags+7(%r12),_TIF_SIGPENDING - jo .Lio_sigpending - tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME - jo .Lio_notify_resume - tm __LC_CPU_FLAGS+7,_CIF_ASCE - jo .Lio_uaccess - j .Lio_return # beware of critical section cleanup - -# -# _CIF_MCCK_PENDING is set, call handler -# -.Lio_mcck_pending: - # TRACE_IRQS_ON already done at .Lio_return - brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler - TRACE_IRQS_OFF - j .Lio_return - -# -# _CIF_ASCE is set, load user space asce -# -.Lio_uaccess: - ni __LC_CPU_FLAGS+7,255-_CIF_ASCE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - j .Lio_return - -# -# _TIF_NEED_RESCHED is set, call schedule -# -.Lio_reschedule: - # TRACE_IRQS_ON already done at .Lio_return - ssm __LC_SVC_NEW_PSW # reenable interrupts - brasl %r14,schedule # call scheduler - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts - TRACE_IRQS_OFF - j .Lio_return - -# -# _TIF_SIGPENDING or is set, call do_signal -# -.Lio_sigpending: - # TRACE_IRQS_ON already done at .Lio_return - ssm __LC_SVC_NEW_PSW # reenable interrupts - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_signal - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts - TRACE_IRQS_OFF - j .Lio_return - -# -# _TIF_NOTIFY_RESUME or is set, call do_notify_resume -# -.Lio_notify_resume: - # TRACE_IRQS_ON already done at .Lio_return - ssm __LC_SVC_NEW_PSW # reenable interrupts - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,do_notify_resume - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts - TRACE_IRQS_OFF - j .Lio_return - -/* - * External interrupt handler routine - */ -ENTRY(ext_int_handler) - STCK __LC_INT_CLOCK - stpt __LC_ASYNC_ENTER_TIMER - stmg %r8,%r15,__LC_SAVE_AREA_ASYNC - lg %r10,__LC_LAST_BREAK - lg %r12,__LC_THREAD_INFO - larl %r13,system_call - lmg %r8,%r9,__LC_EXT_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,3 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user ? - jz .Lext_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lext_skip: - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC - stmg %r8,%r9,__PT_PSW(%r11) - lghi %r1,__LC_EXT_PARAMS2 - mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR - mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS - mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - TRACE_IRQS_OFF - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - lgr %r2,%r11 # pass pointer to pt_regs - lghi %r3,EXT_INTERRUPT - brasl %r14,do_IRQ - j .Lio_return - -/* - * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. - */ -ENTRY(psw_idle) - stg %r3,__SF_EMPTY(%r15) - larl %r1,.Lpsw_idle_lpsw+4 - stg %r1,__SF_EMPTY+8(%r15) - STCK __CLOCK_IDLE_ENTER(%r2) - stpt __TIMER_IDLE_ENTER(%r2) -.Lpsw_idle_lpsw: - lpswe __SF_EMPTY(%r15) - br %r14 -.Lpsw_idle_end: - -.L__critical_end: - -/* - * Machine check handler routines - */ -ENTRY(mcck_int_handler) - STCK __LC_MCCK_CLOCK - la %r1,4095 # revalidate r1 - spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer - lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs - lg %r10,__LC_LAST_BREAK - lg %r12,__LC_THREAD_INFO - larl %r13,system_call - lmg %r8,%r9,__LC_MCK_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,4 - tm __LC_MCCK_CODE,0x80 # system damage? - jo .Lmcck_panic # yes -> rest of mcck code invalid - lghi %r14,__LC_CPU_TIMER_SAVE_AREA - mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) - tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? - jo 3f - la %r14,__LC_SYNC_ENTER_TIMER - clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER - jl 0f - la %r14,__LC_ASYNC_ENTER_TIMER -0: clc 0(8,%r14),__LC_EXIT_TIMER - jl 1f - la %r14,__LC_EXIT_TIMER -1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER - jl 2f - la %r14,__LC_LAST_UPDATE_TIMER -2: spt 0(%r14) - mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) -3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? - jno .Lmcck_panic # no -> skip cleanup critical - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT - tm %r8,0x0001 # interrupting from user ? - jz .Lmcck_skip - UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER - LAST_BREAK %r14 -.Lmcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA+64 - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),0(%r14) - stmg %r8,%r9,__PT_PSW(%r11) - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - lgr %r2,%r11 # pass pointer to pt_regs - brasl %r14,s390_do_machine_check - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lmcck_return - lg %r1,__LC_KERNEL_STACK # switch to kernel stack - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) - la %r11,STACK_FRAME_OVERHEAD(%r1) - lgr %r15,%r1 - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING - jno .Lmcck_return - TRACE_IRQS_OFF - brasl %r14,s390_handle_mcck - TRACE_IRQS_ON -.Lmcck_return: - lg %r14,__LC_VDSO_PER_CPU - lmg %r0,%r10,__PT_R0(%r11) - mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW - tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? - jno 0f - stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER -0: lmg %r11,%r15,__PT_R11(%r11) - lpswe __LC_RETURN_MCCK_PSW - -.Lmcck_panic: - lg %r14,__LC_PANIC_STACK - slgr %r14,%r15 - srag %r14,%r14,PAGE_SHIFT - jz 0f - lg %r15,__LC_PANIC_STACK -0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j .Lmcck_skip - -# -# PSW restart interrupt handler -# -ENTRY(restart_int_handler) - stg %r15,__LC_SAVE_AREA_RESTART - lg %r15,__LC_RESTART_STACK - aghi %r15,-__PT_SIZE # create pt_regs on stack - xc 0(__PT_SIZE,%r15),0(%r15) - stmg %r0,%r14,__PT_R0(%r15) - mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART - mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw - aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack - xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) - lg %r1,__LC_RESTART_FN # load fn, parm & source cpu - lg %r2,__LC_RESTART_DATA - lg %r3,__LC_RESTART_SOURCE - ltgr %r3,%r3 # test source cpu address - jm 1f # negative -> skip source stop -0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu - brc 10,0b # wait for status stored -1: basr %r14,%r1 # call function - stap __SF_EMPTY(%r15) # store cpu address - llgh %r3,__SF_EMPTY(%r15) -2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu - brc 2,2b -3: j 3b - - .section .kprobes.text, "ax" - -#ifdef CONFIG_CHECK_STACK -/* - * The synchronous or the asynchronous stack overflowed. We are dead. - * No need to properly save the registers, we are going to panic anyway. - * Setup a pt_regs so that show_trace can provide a good call trace. - */ -stack_overflow: - lg %r15,__LC_PANIC_STACK # change to panic stack - la %r11,STACK_FRAME_OVERHEAD(%r15) - stmg %r0,%r7,__PT_R0(%r11) - stmg %r8,%r9,__PT_PSW(%r11) - mvc __PT_R8(64,%r11),0(%r14) - stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - lgr %r2,%r11 # pass pointer to pt_regs - jg kernel_stack_overflow -#endif - - .align 8 -.Lcleanup_table: - .quad system_call - .quad .Lsysc_do_svc - .quad .Lsysc_tif - .quad .Lsysc_restore - .quad .Lsysc_done - .quad .Lio_tif - .quad .Lio_restore - .quad .Lio_done - .quad psw_idle - .quad .Lpsw_idle_end - -cleanup_critical: - clg %r9,BASED(.Lcleanup_table) # system_call - jl 0f - clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc - jl .Lcleanup_system_call - clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif - jl 0f - clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore - jl .Lcleanup_sysc_tif - clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done - jl .Lcleanup_sysc_restore - clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif - jl 0f - clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore - jl .Lcleanup_io_tif - clg %r9,BASED(.Lcleanup_table+56) # .Lio_done - jl .Lcleanup_io_restore - clg %r9,BASED(.Lcleanup_table+64) # psw_idle - jl 0f - clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end - jl .Lcleanup_idle -0: br %r14 - - -.Lcleanup_system_call: - # check if stpt has been executed - clg %r9,BASED(.Lcleanup_system_call_insn) - jh 0f - mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER - cghi %r11,__LC_SAVE_AREA_ASYNC - je 0f - mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER -0: # check if stmg has been executed - clg %r9,BASED(.Lcleanup_system_call_insn+8) - jh 0f - mvc __LC_SAVE_AREA_SYNC(64),0(%r11) -0: # check if base register setup + TIF bit load has been done - clg %r9,BASED(.Lcleanup_system_call_insn+16) - jhe 0f - # set up saved registers r10 and r12 - stg %r10,16(%r11) # r10 last break - stg %r12,32(%r11) # r12 thread-info pointer -0: # check if the user time update has been done - clg %r9,BASED(.Lcleanup_system_call_insn+24) - jh 0f - lg %r15,__LC_EXIT_TIMER - slg %r15,__LC_SYNC_ENTER_TIMER - alg %r15,__LC_USER_TIMER - stg %r15,__LC_USER_TIMER -0: # check if the system time update has been done - clg %r9,BASED(.Lcleanup_system_call_insn+32) - jh 0f - lg %r15,__LC_LAST_UPDATE_TIMER - slg %r15,__LC_EXIT_TIMER - alg %r15,__LC_SYSTEM_TIMER - stg %r15,__LC_SYSTEM_TIMER -0: # update accounting time stamp - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - # do LAST_BREAK - lg %r9,16(%r11) - srag %r9,%r9,23 - jz 0f - mvc __TI_last_break(8,%r12),16(%r11) -0: # set up saved register r11 - lg %r15,__LC_KERNEL_STACK - la %r9,STACK_FRAME_OVERHEAD(%r15) - stg %r9,24(%r11) # r11 pt_regs pointer - # fill pt_regs - mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC - stmg %r0,%r7,__PT_R0(%r9) - mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW - mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC - xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) - mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL - # setup saved register r15 - stg %r15,56(%r11) # r15 stack pointer - # set new psw address and exit - larl %r9,.Lsysc_do_svc - br %r14 -.Lcleanup_system_call_insn: - .quad system_call - .quad .Lsysc_stmg - .quad .Lsysc_per - .quad .Lsysc_vtime+18 - .quad .Lsysc_vtime+42 - -.Lcleanup_sysc_tif: - larl %r9,.Lsysc_tif - br %r14 - -.Lcleanup_sysc_restore: - clg %r9,BASED(.Lcleanup_sysc_restore_insn) - je 0f - lg %r9,24(%r11) # get saved pointer to pt_regs - mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) - mvc 0(64,%r11),__PT_R8(%r9) - lmg %r0,%r7,__PT_R0(%r9) -0: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 -.Lcleanup_sysc_restore_insn: - .quad .Lsysc_done - 4 - -.Lcleanup_io_tif: - larl %r9,.Lio_tif - br %r14 - -.Lcleanup_io_restore: - clg %r9,BASED(.Lcleanup_io_restore_insn) - je 0f - lg %r9,24(%r11) # get saved r11 pointer to pt_regs - mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) - mvc 0(64,%r11),__PT_R8(%r9) - lmg %r0,%r7,__PT_R0(%r9) -0: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 -.Lcleanup_io_restore_insn: - .quad .Lio_done - 4 - -.Lcleanup_idle: - # copy interrupt clock & cpu timer - mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK - mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER - cghi %r11,__LC_SAVE_AREA_ASYNC - je 0f - mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK - mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER -0: # check if stck & stpt have been executed - clg %r9,BASED(.Lcleanup_idle_insn) - jhe 1f - mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) - mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) -1: # account system time going idle - lg %r9,__LC_STEAL_TIMER - alg %r9,__CLOCK_IDLE_ENTER(%r2) - slg %r9,__LC_LAST_UPDATE_CLOCK - stg %r9,__LC_STEAL_TIMER - mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) - lg %r9,__LC_SYSTEM_TIMER - alg %r9,__LC_LAST_UPDATE_TIMER - slg %r9,__TIMER_IDLE_ENTER(%r2) - stg %r9,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) - # prepare return psw - nihh %r8,0xfcfd # clear irq & wait state bits - lg %r9,48(%r11) # return from psw_idle - br %r14 -.Lcleanup_idle_insn: - .quad .Lpsw_idle_lpsw - -/* - * Integer constants - */ - .align 8 -.Lcritical_start: - .quad .L__critical_start -.Lcritical_length: - .quad .L__critical_end - .L__critical_start - - -#if IS_ENABLED(CONFIG_KVM) -/* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area - */ -ENTRY(sie64a) - stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers - stg %r2,__SF_EMPTY(%r15) # save control block pointer - stg %r3,__SF_EMPTY+8(%r15) # save guest register save area - xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 - lg %r14,__LC_GMAP # get gmap pointer - ltgr %r14,%r14 - jz .Lsie_gmap - lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce -.Lsie_gmap: - lg %r14,__SF_EMPTY(%r15) # get control block pointer - oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now - tm __SIE_PROG20+3(%r14),1 # last exit... - jnz .Lsie_done - LPP __SF_EMPTY(%r15) # set guest id - sie 0(%r14) -.Lsie_done: - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce -# some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. -# See also HANDLE_SIE_INTERCEPT -.Lrewind_pad: - nop 0 - .globl sie_exit -sie_exit: - lg %r14,__SF_EMPTY+8(%r15) # load guest register save area - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lg %r2,__SF_EMPTY+24(%r15) # return exit reason code - br %r14 -.Lsie_fault: - lghi %r14,-EFAULT - stg %r14,__SF_EMPTY+24(%r15) # set exit reason code - j sie_exit - - .align 8 -.Lsie_critical: - .quad .Lsie_gmap -.Lsie_critical_length: - .quad .Lsie_done - .Lsie_gmap - - EX_TABLE(.Lrewind_pad,.Lsie_fault) - EX_TABLE(sie_exit,.Lsie_fault) -#endif - - .section .rodata, "a" -#define SYSCALL(esa,esame,emu) .long esame - .globl sys_call_table -sys_call_table: -#include "syscalls.S" -#undef SYSCALL - -#ifdef CONFIG_COMPAT - -#define SYSCALL(esa,esame,emu) .long emu - .globl sys_call_table_emu -sys_call_table_emu: -#include "syscalls.S" -#undef SYSCALL -#endif diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6c79f1b..e0eaf11 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -130,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, /* Verify that the to be replaced code matches what we expect. */ if (memcmp(&orig, &old, sizeof(old))) return -EINVAL; - if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) - return -EPERM; + s390_kernel_write((void *) rec->ip, &new, sizeof(new)); return 0; } @@ -159,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* Verify that the to be replaced code matches what we expect. */ if (memcmp(&orig, &old, sizeof(old))) return -EINVAL; - if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) - return -EPERM; + s390_kernel_write((void *) rec->ip, &new, sizeof(new)); return 0; } @@ -231,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void) { u8 op = 0x04; /* set mask field to zero */ - return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + return 0; } int ftrace_disable_ftrace_graph_caller(void) { u8 op = 0xf4; /* set mask field to all ones */ - return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); + return 0; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 132f4c9..59b7c64 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -27,11 +27,7 @@ #include <asm/thread_info.h> #include <asm/page.h> -#ifdef CONFIG_64BIT #define ARCH_OFFSET 4 -#else -#define ARCH_OFFSET 0 -#endif __HEAD @@ -67,7 +63,6 @@ __HEAD # subroutine to set architecture mode # .Lsetmode: -#ifdef CONFIG_64BIT mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) @@ -76,16 +71,12 @@ __HEAD .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs sam31 # switch to 31 bit addressing mode -#else - mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) -#endif br %r14 # # subroutine to wait for end I/O # .Lirqwait: -#ifdef CONFIG_64BIT mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw lpsw .Lwaitpsw .Lioint: @@ -93,15 +84,6 @@ __HEAD .align 8 .Lnewpsw: .quad 0x0000000080000000,.Lioint -#else - mvc 0x78(8),.Lnewpsw # set up IO interrupt psw - lpsw .Lwaitpsw -.Lioint: - br %r14 - .align 8 -.Lnewpsw: - .long 0x00080000,0x80000000+.Lioint -#endif .Lwaitpsw: .long 0x020a0000,0x80000000+.Lioint @@ -375,7 +357,6 @@ ENTRY(startup) ENTRY(startup_kdump) j .Lep_startup_kdump .Lep_startup_normal: -#ifdef CONFIG_64BIT mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero lhi %r1,2 # mode 2 = esame (dump) @@ -384,9 +365,6 @@ ENTRY(startup_kdump) .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs sam31 # switch to 31 bit addressing mode -#else - mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) -#endif basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore @@ -396,7 +374,6 @@ ENTRY(startup_kdump) spt 6f-.LPG0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST -#ifndef CONFIG_MARCH_G5 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST tm __LC_STFL_FAC_LIST,0x01 # stfle available ? @@ -435,7 +412,6 @@ ENTRY(startup_kdump) # the kernel will crash. Format is number of facility words with bits set, # followed by the facility words. -#if defined(CONFIG_64BIT) #if defined(CONFIG_MARCH_Z13) .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 #elif defined(CONFIG_MARCH_ZEC12) @@ -451,35 +427,10 @@ ENTRY(startup_kdump) #elif defined(CONFIG_MARCH_Z900) .long 1, 0xc0000000 #endif -#else -#if defined(CONFIG_MARCH_ZEC12) - .long 1, 0x8100c880 -#elif defined(CONFIG_MARCH_Z196) - .long 1, 0x8100c880 -#elif defined(CONFIG_MARCH_Z10) - .long 1, 0x8100c880 -#elif defined(CONFIG_MARCH_Z9_109) - .long 1, 0x8100c880 -#elif defined(CONFIG_MARCH_Z990) - .long 1, 0x80002000 -#elif defined(CONFIG_MARCH_Z900) - .long 1, 0x80000000 -#endif -#endif 4: -#endif - -#ifdef CONFIG_64BIT /* Continue with 64bit startup code in head64.S */ sam64 # switch to 64 bit mode jg startup_continue -#else - /* Continue with 31bit startup code in head31.S */ - l %r13,5f-.LPG0(%r13) - b 0(%r13) - .align 8 -5: .long startup_continue -#endif .align 8 6: .long 0x7fffffff,0xffffffff diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S deleted file mode 100644 index 6dbe809..0000000 --- a/arch/s390/kernel/head31.S +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright IBM Corp. 2005, 2010 - * - * Author(s): Hartmut Penner <hp@de.ibm.com> - * Martin Schwidefsky <schwidefsky@de.ibm.com> - * Rob van der Heij <rvdhei@iae.nl> - * Heiko Carstens <heiko.carstens@de.ibm.com> - * - */ - -#include <linux/init.h> -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/thread_info.h> -#include <asm/page.h> - -__HEAD -ENTRY(startup_continue) - basr %r13,0 # get base -.LPG1: - - l %r1,.Lbase_cc-.LPG1(%r13) - mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK - lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers - l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area - # move IPL device to lowcore -# -# Setup stack -# - l %r15,.Linittu-.LPG1(%r13) - st %r15,__LC_THREAD_INFO # cache thread info in lowcore - mvc __LC_CURRENT(4),__TI_task(%r15) - ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE - st %r15,__LC_KERNEL_STACK # set end of kernel stack - ahi %r15,-96 -# -# Save ipl parameters, clear bss memory, initialize storage key for kernel pages, -# and create a kernel NSS if the SAVESYS= parm is defined -# - l %r14,.Lstartup_init-.LPG1(%r13) - basr %r14,%r14 - lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, - # virtual and never return ... - .align 8 -.Lentry:.long 0x00080000,0x80000000 + _stext -.Lctl: .long 0x04b50000 # cr0: various things - .long 0 # cr1: primary space segment table - .long .Lduct # cr2: dispatchable unit control table - .long 0 # cr3: instruction authorization - .long 0 # cr4: instruction authorization - .long .Lduct # cr5: primary-aste origin - .long 0 # cr6: I/O interrupts - .long 0 # cr7: secondary space segment table - .long 0 # cr8: access registers translation - .long 0 # cr9: tracing off - .long 0 # cr10: tracing off - .long 0 # cr11: tracing off - .long 0 # cr12: tracing off - .long 0 # cr13: home space segment table - .long 0xc0000000 # cr14: machine check handling off - .long 0 # cr15: linkage stack operations -.Lbss_bgn: .long __bss_start -.Lbss_end: .long _end -.Lparmaddr: .long PARMAREA -.Linittu: .long init_thread_union -.Lstartup_init: - .long startup_init - .align 64 -.Lduct: .long 0,0,0,0,.Lduald,0,0,0 - .long 0,0,0,0,0,0,0,0 - .align 128 -.Lduald:.rept 8 - .long 0x80000000,0,0,0 # invalid access-list entries - .endr -.Lbase_cc: - .long sched_clock_base_cc - -ENTRY(_ehead) - - .org 0x100000 - 0x11000 # head.o ends at 0x11000 -# -# startup-code, running in absolute addressing mode -# -ENTRY(_stext) - basr %r13,0 # get base -.LPG3: -# check control registers - stctl %c0,%c15,0(%r15) - oi 2(%r15),0x60 # enable sigp emergency & external call - oi 0(%r15),0x10 # switch on low address protection - lctl %c0,%c15,0(%r15) - -# - lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess - l %r14,.Lstart-.LPG3(%r13) - basr %r14,%r14 # call start_kernel -# -# We returned from start_kernel ?!? PANIK -# - basr %r13,0 - lpsw .Ldw-.(%r13) # load disabled wait psw -# - .align 8 -.Ldw: .long 0x000a0000,0x00000000 -.Lstart:.long start_kernel -.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S index 085a95e..d05950f 100644 --- a/arch/s390/kernel/head_kdump.S +++ b/arch/s390/kernel/head_kdump.S @@ -92,17 +92,9 @@ startup_kdump_relocated: #else .align 2 .Lep_startup_kdump: -#ifdef CONFIG_64BIT larl %r13,startup_kdump_crash lpswe 0(%r13) .align 8 startup_kdump_crash: .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash -#else - basr %r13,0 -0: lpsw startup_kdump_crash-0b(%r13) -.align 8 -startup_kdump_crash: - .long 0x000a0000,0x00000000 + startup_kdump_crash -#endif /* CONFIG_64BIT */ #endif /* CONFIG_CRASH_DUMP */ diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5c8651f..52fbef9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -182,24 +182,21 @@ EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ -#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ +#define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, \ char *page) \ { \ - return sprintf(page, _format, _value); \ -} \ + return snprintf(page, PAGE_SIZE, _format, ##args); \ +} + +#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ +IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ - __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); + __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL) #define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ -static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - char *page) \ -{ \ - return sprintf(page, _fmt_out, \ - (unsigned long long) _value); \ -} \ +IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ struct kobj_attribute *attr, \ const char *buf, size_t len) \ @@ -213,15 +210,10 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ __ATTR(_name,(S_IRUGO | S_IWUSR), \ sys_##_prefix##_##_name##_show, \ - sys_##_prefix##_##_name##_store); + sys_##_prefix##_##_name##_store) #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ -static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - char *page) \ -{ \ - return sprintf(page, _fmt_out, _value); \ -} \ +IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ struct kobj_attribute *attr, \ const char *buf, size_t len) \ @@ -233,7 +225,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ __ATTR(_name,(S_IRUGO | S_IWUSR), \ sys_##_prefix##_##_name##_show, \ - sys_##_prefix##_##_name##_store); + sys_##_prefix##_##_name##_store) static void make_attrs_ro(struct attribute **attrs) { @@ -415,15 +407,9 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, IPL_PARMBLOCK_SIZE); } - -static struct bin_attribute ipl_parameter_attr = { - .attr = { - .name = "binary_parameter", - .mode = S_IRUGO, - }, - .size = PAGE_SIZE, - .read = &ipl_parameter_read, -}; +static struct bin_attribute ipl_parameter_attr = + __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL, + PAGE_SIZE); static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, @@ -434,14 +420,13 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, return memory_read_from_buffer(buf, count, &off, scp_data, size); } +static struct bin_attribute ipl_scp_data_attr = + __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE); -static struct bin_attribute ipl_scp_data_attr = { - .attr = { - .name = "scp_data", - .mode = S_IRUGO, - }, - .size = PAGE_SIZE, - .read = ipl_scp_data_read, +static struct bin_attribute *ipl_fcp_bin_attrs[] = { + &ipl_parameter_attr, + &ipl_scp_data_attr, + NULL, }; /* FCP ipl device attributes */ @@ -484,6 +469,7 @@ static struct attribute *ipl_fcp_attrs[] = { static struct attribute_group ipl_fcp_attr_group = { .attrs = ipl_fcp_attrs, + .bin_attrs = ipl_fcp_bin_attrs, }; /* CCW ipl device attributes */ @@ -540,28 +526,6 @@ static struct attribute_group ipl_unknown_attr_group = { static struct kset *ipl_kset; -static int __init ipl_register_fcp_files(void) -{ - int rc; - - rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); - if (rc) - goto out; - rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); - if (rc) - goto out_ipl_parm; - rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr); - if (!rc) - goto out; - - sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr); - -out_ipl_parm: - sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group); -out: - return rc; -} - static void __ipl_run(void *unused) { diag308(DIAG308_IPL, NULL); @@ -596,7 +560,7 @@ static int __init ipl_init(void) break; case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: - rc = ipl_register_fcp_files(); + rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group); break; case IPL_TYPE_NSS: rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group); @@ -744,15 +708,13 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, return count; } +static struct bin_attribute sys_reipl_fcp_scp_data_attr = + __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, + reipl_fcp_scpdata_write, PAGE_SIZE); -static struct bin_attribute sys_reipl_fcp_scp_data_attr = { - .attr = { - .name = "scp_data", - .mode = S_IRUGO | S_IWUSR, - }, - .size = PAGE_SIZE, - .read = reipl_fcp_scpdata_read, - .write = reipl_fcp_scpdata_write, +static struct bin_attribute *reipl_fcp_bin_attrs[] = { + &sys_reipl_fcp_scp_data_attr, + NULL, }; DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", @@ -841,6 +803,7 @@ static struct attribute *reipl_fcp_attrs[] = { static struct attribute_group reipl_fcp_attr_group = { .attrs = reipl_fcp_attrs, + .bin_attrs = reipl_fcp_bin_attrs, }; /* CCW reipl device attributes */ @@ -1261,15 +1224,6 @@ static int __init reipl_fcp_init(void) return rc; } - rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, - &sys_reipl_fcp_scp_data_attr); - if (rc) { - sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); - kset_unregister(reipl_fcp_kset); - free_page((unsigned long) reipl_block_fcp); - return rc; - } - if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); /* @@ -1713,9 +1667,7 @@ static ssize_t on_reboot_store(struct kobject *kobj, { return set_trigger(buf, &on_reboot_trigger, len); } - -static struct kobj_attribute on_reboot_attr = - __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store); +static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot); static void do_machine_restart(char *__unused) { @@ -1741,9 +1693,7 @@ static ssize_t on_panic_store(struct kobject *kobj, { return set_trigger(buf, &on_panic_trigger, len); } - -static struct kobj_attribute on_panic_attr = - __ATTR(on_panic, 0644, on_panic_show, on_panic_store); +static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic); static void do_panic(void) { @@ -1769,9 +1719,7 @@ static ssize_t on_restart_store(struct kobject *kobj, { return set_trigger(buf, &on_restart_trigger, len); } - -static struct kobj_attribute on_restart_attr = - __ATTR(on_restart, 0644, on_restart_show, on_restart_store); +static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart); static void __do_restart(void *ignore) { @@ -1808,10 +1756,7 @@ static ssize_t on_halt_store(struct kobject *kobj, { return set_trigger(buf, &on_halt_trigger, len); } - -static struct kobj_attribute on_halt_attr = - __ATTR(on_halt, 0644, on_halt_show, on_halt_store); - +static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt); static void do_machine_halt(void) { @@ -1837,10 +1782,7 @@ static ssize_t on_poff_store(struct kobject *kobj, { return set_trigger(buf, &on_poff_trigger, len); } - -static struct kobj_attribute on_poff_attr = - __ATTR(on_poff, 0644, on_poff_show, on_poff_store); - +static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff); static void do_machine_power_off(void) { @@ -1850,26 +1792,27 @@ static void do_machine_power_off(void) } void (*_machine_power_off)(void) = do_machine_power_off; +static struct attribute *shutdown_action_attrs[] = { + &on_restart_attr.attr, + &on_reboot_attr.attr, + &on_panic_attr.attr, + &on_halt_attr.attr, + &on_poff_attr.attr, + NULL, +}; + +static struct attribute_group shutdown_action_attr_group = { + .attrs = shutdown_action_attrs, +}; + static void __init shutdown_triggers_init(void) { shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, firmware_kobj); if (!shutdown_actions_kset) goto fail; - if (sysfs_create_file(&shutdown_actions_kset->kobj, - &on_reboot_attr.attr)) - goto fail; - if (sysfs_create_file(&shutdown_actions_kset->kobj, - &on_panic_attr.attr)) - goto fail; - if (sysfs_create_file(&shutdown_actions_kset->kobj, - &on_halt_attr.attr)) - goto fail; - if (sysfs_create_file(&shutdown_actions_kset->kobj, - &on_poff_attr.attr)) - goto fail; - if (sysfs_create_file(&shutdown_actions_kset->kobj, - &on_restart_attr.attr)) + if (sysfs_create_group(&shutdown_actions_kset->kobj, + &shutdown_action_attr_group)) goto fail; return; fail: @@ -2062,12 +2005,10 @@ static void do_reset_calls(void) { struct reset_call *reset; -#ifdef CONFIG_64BIT if (diag308_set_works) { diag308_reset(); return; } -#endif list_for_each_entry(reset, &rcall, list) reset->fn(); } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f238720..02ab9aa 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -56,7 +56,7 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { * /proc/interrupts. * In addition this list contains non external / I/O events like NMIs. */ -static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { +static const struct irq_class irqclass_sub_desc[] = { {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"}, {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"}, {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"}, @@ -94,6 +94,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { void __init init_IRQ(void) { + BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS); init_cio_interrupts(); init_airq_interrupts(); init_ext_interrupts(); diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 830066f..a902996 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -78,7 +78,7 @@ static void __jump_label_transform(struct jump_entry *entry, if (memcmp((void *)entry->code, &old, sizeof(old))) jump_label_bug(entry, &old, &new); } - probe_kernel_write((void *)entry->code, &new, sizeof(new)); + s390_kernel_write((void *)entry->code, &new, sizeof(new)); } static int __sm_arch_jump_label_transform(void *data) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index f516edc..389db56 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -178,7 +178,7 @@ static int swap_instruction(void *data) } skip_ftrace: kcb->kprobe_status = KPROBE_SWAP_INST; - probe_kernel_write(p->addr, &new_insn, len); + s390_kernel_write(p->addr, &new_insn, len); kcb->kprobe_status = status; return 0; } diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 2ca9586..0c1a679 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -38,13 +38,8 @@ #define DEBUGP(fmt , ...) #endif -#ifndef CONFIG_64BIT -#define PLT_ENTRY_SIZE 12 -#else /* CONFIG_64BIT */ #define PLT_ENTRY_SIZE 20 -#endif /* CONFIG_64BIT */ -#ifdef CONFIG_64BIT void *module_alloc(unsigned long size) { if (PAGE_ALIGN(size) > MODULES_LEN) @@ -53,7 +48,6 @@ void *module_alloc(unsigned long size) GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } -#endif void module_arch_freeing_init(struct module *mod) { @@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, unsigned int *ip; ip = me->module_core + me->arch.plt_offset + info->plt_offset; -#ifndef CONFIG_64BIT - ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ - ip[1] = 0x100607f1; - ip[2] = val; -#else /* CONFIG_64BIT */ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[1] = 0x100a0004; ip[2] = 0x07f10000; ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; -#endif /* CONFIG_64BIT */ info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 3f51cf4..505c17c 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci) */ kill_task = 1; } -#ifndef CONFIG_64BIT + fpt_save_area = &S390_lowcore.floating_pt_save_area; + fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; + if (!mci->fc) { + /* + * Floating point control register can't be restored. + * Task will be terminated. + */ + asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); + kill_task = 1; + } else + asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); + asm volatile( " ld 0,0(%0)\n" - " ld 2,8(%0)\n" - " ld 4,16(%0)\n" - " ld 6,24(%0)" - : : "a" (&S390_lowcore.floating_pt_save_area)); -#endif - - if (MACHINE_HAS_IEEE) { -#ifdef CONFIG_64BIT - fpt_save_area = &S390_lowcore.floating_pt_save_area; - fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; -#else - fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; - fpt_creg_save_area = fpt_save_area + 128; -#endif - if (!mci->fc) { - /* - * Floating point control register can't be restored. - * Task will be terminated. - */ - asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); - kill_task = 1; - - } else - asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); - - asm volatile( - " ld 0,0(%0)\n" - " ld 1,8(%0)\n" - " ld 2,16(%0)\n" - " ld 3,24(%0)\n" - " ld 4,32(%0)\n" - " ld 5,40(%0)\n" - " ld 6,48(%0)\n" - " ld 7,56(%0)\n" - " ld 8,64(%0)\n" - " ld 9,72(%0)\n" - " ld 10,80(%0)\n" - " ld 11,88(%0)\n" - " ld 12,96(%0)\n" - " ld 13,104(%0)\n" - " ld 14,112(%0)\n" - " ld 15,120(%0)\n" - : : "a" (fpt_save_area)); - } - -#ifdef CONFIG_64BIT + " ld 1,8(%0)\n" + " ld 2,16(%0)\n" + " ld 3,24(%0)\n" + " ld 4,32(%0)\n" + " ld 5,40(%0)\n" + " ld 6,48(%0)\n" + " ld 7,56(%0)\n" + " ld 8,64(%0)\n" + " ld 9,72(%0)\n" + " ld 10,80(%0)\n" + " ld 11,88(%0)\n" + " ld 12,96(%0)\n" + " ld 13,104(%0)\n" + " ld 14,112(%0)\n" + " ld 15,120(%0)\n" + : : "a" (fpt_save_area)); /* Revalidate vector registers */ if (MACHINE_HAS_VX && current->thread.vxrs) { if (!mci->vr) { @@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci) restore_vx_regs((__vector128 *) S390_lowcore.vector_save_area_addr); } -#endif /* Revalidate access registers */ asm volatile( " lam 0,15,0(%0)" @@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci) */ s390_handle_damage("invalid control registers."); } else { -#ifdef CONFIG_64BIT asm volatile( " lctlg 0,15,0(%0)" : : "a" (&S390_lowcore.cregs_save_area)); -#else - asm volatile( - " lctl 0,15,0(%0)" - : : "a" (&S390_lowcore.cregs_save_area)); -#endif } /* * We don't even try to revalidate the TOD register, since we simply * can't write something sensible into that register. */ -#ifdef CONFIG_64BIT /* * See if we can revalidate the TOD programmable register with its * old contents (should be zero) otherwise set it to zero. @@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci) " sckpf" : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); -#endif /* Revalidate clock comparator register */ set_clock_comparator(S390_lowcore.clock_comparator); /* Check if old PSW is valid */ @@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs) if (mci->b) { /* Processing backup -> verify if we can survive this */ u64 z_mcic, o_mcic, t_mcic; -#ifdef CONFIG_64BIT z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); -#else - z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | - 1ULL<<29); - o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | - 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | - 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); -#endif t_mcic = *(u64 *)mci; if (((t_mcic & z_mcic) != 0) || diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S index f6f8886..036aa01 100644 --- a/arch/s390/kernel/pgm_check.S +++ b/arch/s390/kernel/pgm_check.S @@ -6,19 +6,13 @@ #include <linux/linkage.h> -#ifdef CONFIG_32BIT -#define PGM_CHECK_64BIT(handler) .long default_trap_handler -#else -#define PGM_CHECK_64BIT(handler) .long handler -#endif - #define PGM_CHECK(handler) .long handler #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) /* * The program check table contains exactly 128 (0x00-0x7f) entries. Each - * line defines the 31 and/or 64 bit function to be called corresponding - * to the program check interruption code. + * line defines the function to be called corresponding to the program check + * interruption code. */ .section .rodata, "a" ENTRY(pgm_check_table) @@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */ PGM_CHECK(operand_exception) /* 15 */ PGM_CHECK_DEFAULT /* 16 */ PGM_CHECK_DEFAULT /* 17 */ -PGM_CHECK_64BIT(transaction_exception) /* 18 */ +PGM_CHECK(transaction_exception) /* 18 */ PGM_CHECK_DEFAULT /* 19 */ PGM_CHECK_DEFAULT /* 1a */ -PGM_CHECK_64BIT(vector_exception) /* 1b */ +PGM_CHECK(vector_exception) /* 1b */ PGM_CHECK(space_switch_exception) /* 1c */ PGM_CHECK(hfp_sqrt_exception) /* 1d */ PGM_CHECK_DEFAULT /* 1e */ @@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */ PGM_CHECK_DEFAULT /* 35 */ PGM_CHECK_DEFAULT /* 36 */ PGM_CHECK_DEFAULT /* 37 */ -PGM_CHECK_64BIT(do_dat_exception) /* 38 */ -PGM_CHECK_64BIT(do_dat_exception) /* 39 */ -PGM_CHECK_64BIT(do_dat_exception) /* 3a */ -PGM_CHECK_64BIT(do_dat_exception) /* 3b */ +PGM_CHECK(do_dat_exception) /* 38 */ +PGM_CHECK(do_dat_exception) /* 39 */ +PGM_CHECK(do_dat_exception) /* 3a */ +PGM_CHECK(do_dat_exception) /* 3b */ PGM_CHECK_DEFAULT /* 3c */ PGM_CHECK_DEFAULT /* 3d */ PGM_CHECK_DEFAULT /* 3e */ diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 13fc097..dc5edc2 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task) { } -#ifdef CONFIG_64BIT void arch_release_task_struct(struct task_struct *tsk) { if (tsk->thread.vxrs) kfree(tsk->thread.vxrs); } -#endif int copy_thread(unsigned long clone_flags, unsigned long new_stackp, unsigned long arg, struct task_struct *p) @@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, p->thread.ri_signum = 0; frame->childregs.psw.mask &= ~PSW_MASK_RI; -#ifndef CONFIG_64BIT - /* - * save fprs to current->thread.fp_regs to merge them with - * the emulated registers and then copy the result to the child. - */ - save_fp_ctl(¤t->thread.fp_regs.fpc); - save_fp_regs(current->thread.fp_regs.fprs); - memcpy(&p->thread.fp_regs, ¤t->thread.fp_regs, - sizeof(s390_fp_regs)); - /* Set a new TLS ? */ - if (clone_flags & CLONE_SETTLS) - p->thread.acrs[0] = frame->childregs.gprs[6]; -#else /* CONFIG_64BIT */ /* Save the fpu registers to new thread structure. */ save_fp_ctl(&p->thread.fp_regs.fpc); save_fp_regs(p->thread.fp_regs.fprs); @@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, p->thread.acrs[1] = (unsigned int)tls; } } -#endif /* CONFIG_64BIT */ return 0; } asmlinkage void execve_tail(void) { current->thread.fp_regs.fpc = 0; - if (MACHINE_HAS_IEEE) - asm volatile("sfpc %0,%0" : : "d" (0)); + asm volatile("sfpc %0,%0" : : "d" (0)); } /* @@ -188,18 +171,8 @@ asmlinkage void execve_tail(void) */ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { -#ifndef CONFIG_64BIT - /* - * save fprs to current->thread.fp_regs to merge them with - * the emulated registers and then copy the result to the dump. - */ - save_fp_ctl(¤t->thread.fp_regs.fpc); - save_fp_regs(current->thread.fp_regs.fprs); - memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs)); -#else /* CONFIG_64BIT */ save_fp_ctl(&fpregs->fpc); save_fp_regs(fpregs->fprs); -#endif /* CONFIG_64BIT */ return 1; } EXPORT_SYMBOL(dump_fpu); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index eabfb45..d363c9c 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task) struct thread_struct *thread = &task->thread; struct per_regs old, new; -#ifdef CONFIG_64BIT /* Take care of the enable/disable of transactional execution. */ if (MACHINE_HAS_TE || MACHINE_HAS_VX) { unsigned long cr, cr_new; @@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task) __ctl_load(cr_new, 2, 2); } } -#endif /* Copy user specified PER registers */ new.control = thread->per_user.control; new.start = thread->per_user.start; @@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task) new.control |= PER_EVENT_BRANCH; else new.control |= PER_EVENT_IFETCH; -#ifdef CONFIG_64BIT new.control |= PER_CONTROL_SUSPENSION; new.control |= PER_EVENT_TRANSACTION_END; -#endif if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) new.control |= PER_EVENT_IFETCH; new.start = 0; @@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task) task->thread.per_flags = 0; } -#ifndef CONFIG_64BIT -# define __ADDR_MASK 3 -#else -# define __ADDR_MASK 7 -#endif +#define __ADDR_MASK 7 static inline unsigned long __peek_user_per(struct task_struct *child, addr_t addr) @@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; -#ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the @@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) if (addr == (addr_t) &dummy->regs.acrs[15]) tmp = ((unsigned long) child->thread.acrs[15]) << 32; else -#endif - tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); + tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* @@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) * or the child->thread.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; -#ifdef CONFIG_64BIT if (child->thread.vxrs) tmp = *(addr_t *) ((addr_t) child->thread.vxrs + 2*offset); else -#endif tmp = *(addr_t *) ((addr_t) &child->thread.fp_regs.fprs + offset); @@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) * an alignment of 4. Programmers from hell... */ mask = __ADDR_MASK; -#ifdef CONFIG_64BIT if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) mask = 3; -#endif if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) return -EIO; @@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; -#ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower @@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else -#endif - *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; + *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* @@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) * or the child->thread.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; -#ifdef CONFIG_64BIT if (child->thread.vxrs) *(addr_t *)((addr_t) child->thread.vxrs + 2*offset) = data; else -#endif *(addr_t *)((addr_t) &child->thread.fp_regs.fprs + offset) = data; @@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data) * an alignment of 4. Programmers from hell indeed... */ mask = __ADDR_MASK; -#ifdef CONFIG_64BIT if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) mask = 3; -#endif if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) return -EIO; @@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) * or the child->thread.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; -#ifdef CONFIG_64BIT if (child->thread.vxrs) tmp = *(__u32 *) ((addr_t) child->thread.vxrs + 2*offset); else -#endif tmp = *(__u32 *) ((addr_t) &child->thread.fp_regs.fprs + offset); @@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child, * or the child->thread.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; -#ifdef CONFIG_64BIT if (child->thread.vxrs) *(__u32 *)((addr_t) child->thread.vxrs + 2*offset) = tmp; else -#endif *(__u32 *)((addr_t) &child->thread.fp_regs.fprs + offset) = tmp; @@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target, if (target == current) { save_fp_ctl(&target->thread.fp_regs.fpc); save_fp_regs(target->thread.fp_regs.fprs); - } -#ifdef CONFIG_64BIT - else if (target->thread.vxrs) { + } else if (target->thread.vxrs) { int i; for (i = 0; i < __NUM_VXRS_LOW; i++) target->thread.fp_regs.fprs[i] = *(freg_t *)(target->thread.vxrs + i); } -#endif return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fp_regs, 0, -1); } @@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target, if (target == current) { restore_fp_ctl(&target->thread.fp_regs.fpc); restore_fp_regs(target->thread.fp_regs.fprs); - } -#ifdef CONFIG_64BIT - else if (target->thread.vxrs) { + } else if (target->thread.vxrs) { int i; for (i = 0; i < __NUM_VXRS_LOW; i++) *(freg_t *)(target->thread.vxrs + i) = target->thread.fp_regs.fprs[i]; } -#endif } return rc; } -#ifdef CONFIG_64BIT - static int s390_last_break_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target, return rc; } -#endif - static int s390_system_call_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = { .get = s390_system_call_get, .set = s390_system_call_set, }, -#ifdef CONFIG_64BIT { .core_note_type = NT_S390_LAST_BREAK, .n = 1, @@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = { .get = s390_vxrs_high_get, .set = s390_vxrs_high_set, }, -#endif }; static const struct user_regset_view user_s390_view = { diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index dd8016b..52aab0b 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -1,7 +1,7 @@ /* - * S390 version - * Copyright IBM Corp. 2000 - * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) + * Copyright IBM Corp 2000, 2011 + * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, + * Denis Joseph Barrow, */ #include <linux/linkage.h> @@ -9,43 +9,90 @@ #include <asm/sigp.h> # -# store_status: Empty implementation until kdump is supported on 31 bit +# store_status +# +# Prerequisites to run this function: +# - Prefix register is set to zero +# - Original prefix register is stored in "dump_prefix_page" +# - Lowcore protection is off # ENTRY(store_status) - br %r14 + /* Save register one and load save area base */ + stg %r1,__LC_SAVE_AREA_RESTART + lghi %r1,SAVE_AREA_BASE + /* General purpose registers */ + stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + lg %r2,__LC_SAVE_AREA_RESTART + stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) + /* Control registers */ + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Access registers */ + stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Floating point registers */ + std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Floating point control register */ + stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* CPU timer */ + stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) + /* Saved prefix register */ + larl %r2,dump_prefix_page + mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) + /* Clock comparator - seven bytes */ + larl %r2,.Lclkcmp + stckc 0(%r2) + mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) + /* Program status word */ + epsw %r2,%r3 + st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) + st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) + larl %r2,store_status + stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) + br %r14 + + .section .bss + .align 8 +.Lclkcmp: .quad 0x0000000000000000 + .previous # # do_reipl_asm # Parameter: r2 = schid of reipl device # + ENTRY(do_reipl_asm) basr %r13,0 -.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) -.Lpg1: # do store status of all registers +.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) +.Lpg1: brasl %r14,store_status - stm %r0,%r15,__LC_GPREGS_SAVE_AREA - stctl %c0,%c15,__LC_CREGS_SAVE_AREA - stam %a0,%a15,__LC_AREGS_SAVE_AREA - l %r10,.Ldump_pfx-.Lpg0(%r13) - mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) - stckc .Lclkcmp-.Lpg0(%r13) - mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) - stpt __LC_CPU_TIMER_SAVE_AREA - st %r13, __LC_PSW_SAVE_AREA+4 - lctl %c6,%c6,.Lall-.Lpg0(%r13) - lr %r1,%r2 - mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) + lctlg %c6,%c6,.Lall-.Lpg0(%r13) + lgr %r1,%r2 + mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) stsch .Lschib-.Lpg0(%r13) oi .Lschib+5-.Lpg0(%r13),0x84 -.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 +.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 msch .Lschib-.Lpg0(%r13) - lhi %r0,5 + lghi %r0,5 .Lssch: ssch .Liplorb-.Lpg0(%r13) jz .L001 brct %r0,.Lssch bas %r14,.Ldisab-.Lpg0(%r13) -.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13) -.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13) +.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) +.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) .Lcont: c %r1,__LC_SUBCHANNEL_ID jnz .Ltpi clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) @@ -58,20 +105,36 @@ ENTRY(do_reipl_asm) jz .L003 bas %r14,.Ldisab-.Lpg0(%r13) .L003: st %r1,__LC_SUBCHANNEL_ID + lhi %r1,0 # mode 0 = esa + slr %r0,%r0 # set cpuid to zero + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode lpsw 0 - sigp 0,0,SIGP_RESTART -.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) - lpsw .Ldispsw-.Lpg0(%r13) +.Ldisab: sll %r14,1 + srl %r14,1 # need to kill hi bit to avoid specification exceptions. + st %r14,.Ldispsw+12-.Lpg0(%r13) + lpswe .Ldispsw-.Lpg0(%r13) .align 8 -.Lclkcmp: .quad 0x0000000000000000 -.Lall: .long 0xff000000 -.Ldump_pfx: .long dump_prefix_page - .align 8 -.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 -.Lpcnew: .long 0x00080000,0x80000000+.Lecs -.Lionew: .long 0x00080000,0x80000000+.Lcont -.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi -.Ldispsw: .long 0x000a0000,0x00000000 +.Lall: .quad 0x00000000ff000000 + .align 16 +/* + * These addresses have to be 31 bit otherwise + * the sigp will throw a specifcation exception + * when switching to ESA mode as bit 31 be set + * in the ESA psw. + * Bit 31 of the addresses has to be 0 for the + * 31bit lpswe instruction a fact they appear to have + * omitted from the pop. + */ +.Lnewpsw: .quad 0x0000000080000000 + .quad .Lpg1 +.Lpcnew: .quad 0x0000000080000000 + .quad .Lecs +.Lionew: .quad 0x0000000080000000 + .quad .Lcont +.Lwaitpsw: .quad 0x0202000080000000 + .quad .Ltpi +.Ldispsw: .quad 0x0002000080000000 + .quad 0x0000000000000000 .Liplccws: .long 0x02000000,0x60000018 .long 0x08000008,0x20000001 .Liplorb: .long 0x0049504c,0x0040ff80 diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S deleted file mode 100644 index dc3b127..0000000 --- a/arch/s390/kernel/reipl64.S +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright IBM Corp 2000, 2011 - * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, - * Denis Joseph Barrow, - */ - -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/sigp.h> - -# -# store_status -# -# Prerequisites to run this function: -# - Prefix register is set to zero -# - Original prefix register is stored in "dump_prefix_page" -# - Lowcore protection is off -# -ENTRY(store_status) - /* Save register one and load save area base */ - stg %r1,__LC_SAVE_AREA_RESTART - lghi %r1,SAVE_AREA_BASE - /* General purpose registers */ - stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - lg %r2,__LC_SAVE_AREA_RESTART - stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) - /* Control registers */ - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - /* Access registers */ - stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - /* Floating point registers */ - std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - /* Floating point control register */ - stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) - /* CPU timer */ - stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) - /* Saved prefix register */ - larl %r2,dump_prefix_page - mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) - /* Clock comparator - seven bytes */ - larl %r2,.Lclkcmp - stckc 0(%r2) - mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) - /* Program status word */ - epsw %r2,%r3 - st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) - st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) - larl %r2,store_status - stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) - br %r14 - - .section .bss - .align 8 -.Lclkcmp: .quad 0x0000000000000000 - .previous - -# -# do_reipl_asm -# Parameter: r2 = schid of reipl device -# - -ENTRY(do_reipl_asm) - basr %r13,0 -.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) -.Lpg1: brasl %r14,store_status - - lctlg %c6,%c6,.Lall-.Lpg0(%r13) - lgr %r1,%r2 - mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) - stsch .Lschib-.Lpg0(%r13) - oi .Lschib+5-.Lpg0(%r13),0x84 -.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 - msch .Lschib-.Lpg0(%r13) - lghi %r0,5 -.Lssch: ssch .Liplorb-.Lpg0(%r13) - jz .L001 - brct %r0,.Lssch - bas %r14,.Ldisab-.Lpg0(%r13) -.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) -.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) -.Lcont: c %r1,__LC_SUBCHANNEL_ID - jnz .Ltpi - clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) - jnz .Ltpi - tsch .Liplirb-.Lpg0(%r13) - tm .Liplirb+9-.Lpg0(%r13),0xbf - jz .L002 - bas %r14,.Ldisab-.Lpg0(%r13) -.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 - jz .L003 - bas %r14,.Ldisab-.Lpg0(%r13) -.L003: st %r1,__LC_SUBCHANNEL_ID - lhi %r1,0 # mode 0 = esa - slr %r0,%r0 # set cpuid to zero - sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode - lpsw 0 -.Ldisab: sll %r14,1 - srl %r14,1 # need to kill hi bit to avoid specification exceptions. - st %r14,.Ldispsw+12-.Lpg0(%r13) - lpswe .Ldispsw-.Lpg0(%r13) - .align 8 -.Lall: .quad 0x00000000ff000000 - .align 16 -/* - * These addresses have to be 31 bit otherwise - * the sigp will throw a specifcation exception - * when switching to ESA mode as bit 31 be set - * in the ESA psw. - * Bit 31 of the addresses has to be 0 for the - * 31bit lpswe instruction a fact they appear to have - * omitted from the pop. - */ -.Lnewpsw: .quad 0x0000000080000000 - .quad .Lpg1 -.Lpcnew: .quad 0x0000000080000000 - .quad .Lecs -.Lionew: .quad 0x0000000080000000 - .quad .Lcont -.Lwaitpsw: .quad 0x0202000080000000 - .quad .Ltpi -.Ldispsw: .quad 0x0002000080000000 - .quad 0x0000000000000000 -.Liplccws: .long 0x02000000,0x60000018 - .long 0x08000008,0x20000001 -.Liplorb: .long 0x0049504c,0x0040ff80 - .long 0x00000000+.Liplccws -.Lschib: .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 -.Liplirb: .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index f4e6f20..cfac283 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -19,7 +19,8 @@ * %r7 = PAGE_SIZE * %r8 holds the source address * %r9 = PAGE_SIZE - * %r10 is a page mask + * + * 0xf000 is a page_mask */ .text @@ -27,46 +28,47 @@ ENTRY(relocate_kernel) basr %r13,0 # base address .base: stnsm sys_msk-.base(%r13),0xfb # disable DAT - stctl %c0,%c15,ctlregs-.base(%r13) - stm %r0,%r15,gprregs-.base(%r13) + stctg %c0,%c15,ctlregs-.base(%r13) + stmg %r0,%r15,gprregs-.base(%r13) + lghi %r0,3 + sllg %r0,%r0,31 + stg %r0,0x1d0(%r0) + la %r0,.back_pgm-.base(%r13) + stg %r0,0x1d8(%r0) la %r1,load_psw-.base(%r13) mvc 0(8,%r0),0(%r1) la %r0,.back-.base(%r13) st %r0,4(%r0) oi 4(%r0),0x80 - mvc 0x68(8,%r0),0(%r1) - la %r0,.back_pgm-.base(%r13) - st %r0,0x6c(%r0) - oi 0x6c(%r0),0x80 - lhi %r0,0 + lghi %r0,0 diag %r0,%r0,0x308 .back: + lhi %r1,1 # mode 1 = esame + sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode + sam64 # switch to 64 bit addressing mode basr %r13,0 .back_base: oi have_diag308-.back_base(%r13),0x01 - lctl %c0,%c15,ctlregs-.back_base(%r13) - lm %r0,%r15,gprregs-.back_base(%r13) - j .start_reloc + lctlg %c0,%c15,ctlregs-.back_base(%r13) + lmg %r0,%r15,gprregs-.back_base(%r13) + j .top .back_pgm: - lm %r0,%r15,gprregs-.base(%r13) - .start_reloc: - lhi %r10,-1 # preparing the mask - sll %r10,12 # shift it such that it becomes 0xf000 + lmg %r0,%r15,gprregs-.base(%r13) .top: - lhi %r7,4096 # load PAGE_SIZE in r7 - lhi %r9,4096 # load PAGE_SIZE in r9 - l %r5,0(%r2) # read another word for indirection page - ahi %r2,4 # increment pointer + lghi %r7,4096 # load PAGE_SIZE in r7 + lghi %r9,4096 # load PAGE_SIZE in r9 + lg %r5,0(%r2) # read another word for indirection page + aghi %r2,8 # increment pointer tml %r5,0x1 # is it a destination page? je .indir_check # NO, goto "indir_check" - lr %r6,%r5 # r6 = r5 - nr %r6,%r10 # mask it out and... + lgr %r6,%r5 # r6 = r5 + nill %r6,0xf000 # mask it out and... j .top # ...next iteration .indir_check: tml %r5,0x2 # is it a indirection page? je .done_test # NO, goto "done_test" - nr %r5,%r10 # YES, mask out, - lr %r2,%r5 # move it into the right register, + nill %r5,0xf000 # YES, mask out, + lgr %r2,%r5 # move it into the right register, j .top # and read next... .done_test: tml %r5,0x4 # is it the done indicator? @@ -75,13 +77,13 @@ ENTRY(relocate_kernel) .source_test: tml %r5,0x8 # it should be a source indicator... je .top # NO, ignore it... - lr %r8,%r5 # r8 = r5 - nr %r8,%r10 # masking + lgr %r8,%r5 # r8 = r5 + nill %r8,0xf000 # masking 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 jo 0b j .top .done: - sr %r0,%r0 # clear register r0 + sgr %r0,%r0 # clear register r0 la %r4,load_psw-.base(%r13) # load psw-address into the register o %r3,4(%r4) # or load address into psw st %r3,4(%r4) @@ -90,8 +92,9 @@ ENTRY(relocate_kernel) jno .no_diag308 diag %r0,%r0,0x308 .no_diag308: - sr %r1,%r1 # clear %r1 - sr %r2,%r2 # clear %r2 + sam31 # 31 bit mode + sr %r1,%r1 # erase register r1 + sr %r2,%r2 # erase register r2 sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero lpsw 0 # hopefully start new kernel... @@ -102,11 +105,11 @@ ENTRY(relocate_kernel) .quad 0 ctlregs: .rept 16 - .long 0 + .quad 0 .endr gprregs: .rept 16 - .long 0 + .quad 0 .endr have_diag308: .byte 0 diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S deleted file mode 100644 index cfac283..0000000 --- a/arch/s390/kernel/relocate_kernel64.S +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright IBM Corp. 2005 - * - * Author(s): Rolf Adelsberger, - * Heiko Carstens <heiko.carstens@de.ibm.com> - * - */ - -#include <linux/linkage.h> -#include <asm/sigp.h> - -/* - * moves the new kernel to its destination... - * %r2 = pointer to first kimage_entry_t - * %r3 = start address - where to jump to after the job is done... - * - * %r5 will be used as temp. storage - * %r6 holds the destination address - * %r7 = PAGE_SIZE - * %r8 holds the source address - * %r9 = PAGE_SIZE - * - * 0xf000 is a page_mask - */ - - .text -ENTRY(relocate_kernel) - basr %r13,0 # base address - .base: - stnsm sys_msk-.base(%r13),0xfb # disable DAT - stctg %c0,%c15,ctlregs-.base(%r13) - stmg %r0,%r15,gprregs-.base(%r13) - lghi %r0,3 - sllg %r0,%r0,31 - stg %r0,0x1d0(%r0) - la %r0,.back_pgm-.base(%r13) - stg %r0,0x1d8(%r0) - la %r1,load_psw-.base(%r13) - mvc 0(8,%r0),0(%r1) - la %r0,.back-.base(%r13) - st %r0,4(%r0) - oi 4(%r0),0x80 - lghi %r0,0 - diag %r0,%r0,0x308 - .back: - lhi %r1,1 # mode 1 = esame - sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode - sam64 # switch to 64 bit addressing mode - basr %r13,0 - .back_base: - oi have_diag308-.back_base(%r13),0x01 - lctlg %c0,%c15,ctlregs-.back_base(%r13) - lmg %r0,%r15,gprregs-.back_base(%r13) - j .top - .back_pgm: - lmg %r0,%r15,gprregs-.base(%r13) - .top: - lghi %r7,4096 # load PAGE_SIZE in r7 - lghi %r9,4096 # load PAGE_SIZE in r9 - lg %r5,0(%r2) # read another word for indirection page - aghi %r2,8 # increment pointer - tml %r5,0x1 # is it a destination page? - je .indir_check # NO, goto "indir_check" - lgr %r6,%r5 # r6 = r5 - nill %r6,0xf000 # mask it out and... - j .top # ...next iteration - .indir_check: - tml %r5,0x2 # is it a indirection page? - je .done_test # NO, goto "done_test" - nill %r5,0xf000 # YES, mask out, - lgr %r2,%r5 # move it into the right register, - j .top # and read next... - .done_test: - tml %r5,0x4 # is it the done indicator? - je .source_test # NO! Well, then it should be the source indicator... - j .done # ok, lets finish it here... - .source_test: - tml %r5,0x8 # it should be a source indicator... - je .top # NO, ignore it... - lgr %r8,%r5 # r8 = r5 - nill %r8,0xf000 # masking - 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 - jo 0b - j .top - .done: - sgr %r0,%r0 # clear register r0 - la %r4,load_psw-.base(%r13) # load psw-address into the register - o %r3,4(%r4) # or load address into psw - st %r3,4(%r4) - mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 - tm have_diag308-.base(%r13),0x01 - jno .no_diag308 - diag %r0,%r0,0x308 - .no_diag308: - sam31 # 31 bit mode - sr %r1,%r1 # erase register r1 - sr %r2,%r2 # erase register r2 - sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero - lpsw 0 # hopefully start new kernel... - - .align 8 - load_psw: - .long 0x00080000,0x80000000 - sys_msk: - .quad 0 - ctlregs: - .rept 16 - .quad 0 - .endr - gprregs: - .rept 16 - .quad 0 - .endr - have_diag308: - .byte 0 - .align 8 - relocate_kernel_end: - .align 8 - .globl relocate_kernel_len - relocate_kernel_len: - .quad relocate_kernel_end - relocate_kernel diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 7e77e03..43c3169 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -36,21 +36,17 @@ _sclp_wait_int: ahi %r15,-96 # create stack frame la %r8,LC_EXT_NEW_PSW # register int handler la %r9,.LextpswS1-.LbaseS1(%r13) -#ifdef CONFIG_64BIT tm LC_AR_MODE_ID,1 jno .Lesa1 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit la %r9,.LextpswS1_64-.LbaseS1(%r13) .Lesa1: -#endif mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) mvc 0(16,%r8),0(%r9) -#ifdef CONFIG_64BIT epsw %r6,%r7 # set current addressing mode nill %r6,0x1 # in new psw (31 or 64 bit mode) nilh %r7,0x8000 stm %r6,%r7,0(%r8) -#endif lhi %r6,0x0200 # cr mask for ext int (cr0.54) ltr %r2,%r2 jz .LsetctS1 @@ -92,10 +88,8 @@ _sclp_wait_int: .long 0, 0, 0, 0 # old ext int PSW .LextpswS1: .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int -#ifdef CONFIG_64BIT .LextpswS1_64: .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit -#endif .LwaitpswS1: .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int .LtimeS1: @@ -272,13 +266,11 @@ _sclp_print: ENTRY(_sclp_print_early) stm %r6,%r15,24(%r15) # save registers ahi %r15,-96 # create stack frame -#ifdef CONFIG_64BIT tm LC_AR_MODE_ID,1 jno .Lesa2 ahi %r15,-80 stmh %r6,%r15,96(%r15) # store upper register halves .Lesa2: -#endif lr %r10,%r2 # save string pointer lhi %r2,0 bras %r14,_sclp_setup # enable console @@ -291,14 +283,12 @@ ENTRY(_sclp_print_early) lhi %r2,1 bras %r14,_sclp_setup # disable console .LendS5: -#ifdef CONFIG_64BIT tm LC_AR_MODE_ID,1 jno .Lesa3 lgfr %r2,%r2 # sign extend return value lmh %r6,%r15,96(%r15) # restore upper register halves ahi %r15,80 .Lesa3: -#endif lm %r6,%r15,120(%r15) # restore registers br %r14 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5ea8bc..7262fe4 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END); struct page *vmemmap; EXPORT_SYMBOL(vmemmap); -#ifdef CONFIG_64BIT unsigned long MODULES_VADDR; unsigned long MODULES_END; -#endif /* An array with a pointer to the lowcore of every CPU. */ struct _lowcore *lowcore_ptr[NR_CPUS]; @@ -334,19 +332,10 @@ static void __init setup_lowcore(void) lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, MAX_FACILITY_BIT/8); -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) { - lc->extended_save_area_addr = (__u32) - __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); - /* enable extended save area */ - __ctl_set_bit(14, 29); - } -#else if (MACHINE_HAS_VX) lc->vector_save_area_addr = (unsigned long) &lc->vector_save_area; lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; -#endif lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; @@ -450,7 +439,6 @@ static void __init setup_memory_end(void) unsigned long vmax, vmalloc_size, tmp; /* Choose kernel address space layout: 2, 3, or 4 levels. */ -#ifdef CONFIG_64BIT vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; tmp = tmp * (sizeof(struct page) + PAGE_SIZE); @@ -462,12 +450,6 @@ static void __init setup_memory_end(void) MODULES_END = vmax; MODULES_VADDR = MODULES_END - MODULES_LEN; VMALLOC_END = MODULES_VADDR; -#else - vmalloc_size = VMALLOC_END ?: 96UL << 20; - vmax = 1UL << 31; /* 2-level kernel page table */ - /* vmalloc area is at the end of the kernel address space. */ - VMALLOC_END = vmax; -#endif VMALLOC_START = vmax - vmalloc_size; /* Split remaining virtual space between 1:1 mapping & vmemmap array */ @@ -754,7 +736,6 @@ static void __init setup_hwcaps(void) if (MACHINE_HAS_HPAGE) elf_hwcap |= HWCAP_S390_HPAGE; -#if defined(CONFIG_64BIT) /* * 64-bit register support for 31-bit processes * HWCAP_S390_HIGH_GPRS is bit 9. @@ -772,22 +753,15 @@ static void __init setup_hwcaps(void) */ if (test_facility(129)) elf_hwcap |= HWCAP_S390_VXRS; -#endif - get_cpu_id(&cpu_id); add_device_randomness(&cpu_id, sizeof(cpu_id)); switch (cpu_id.machine) { case 0x9672: -#if !defined(CONFIG_64BIT) - default: /* Use "g5" as default for 31 bit kernels. */ -#endif strcpy(elf_platform, "g5"); break; case 0x2064: case 0x2066: -#if defined(CONFIG_64BIT) default: /* Use "z900" as default for 64 bit kernels. */ -#endif strcpy(elf_platform, "z900"); break; case 0x2084: @@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p) /* * print what head.S has found out about the machine */ -#ifndef CONFIG_64BIT - if (MACHINE_IS_VM) - pr_info("Linux is running as a z/VM " - "guest operating system in 31-bit mode\n"); - else if (MACHINE_IS_LPAR) - pr_info("Linux is running natively in 31-bit mode\n"); - if (MACHINE_HAS_IEEE) - pr_info("The hardware system has IEEE compatible " - "floating point units\n"); - else - pr_info("The hardware system has no IEEE compatible " - "floating point units\n"); -#else /* CONFIG_64BIT */ if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 64-bit mode\n"); @@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p) pr_info("Linux is running under KVM in 64-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); -#endif /* CONFIG_64BIT */ /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ @@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p) /* Add system specific data to the random pool */ setup_randomness(); } - -#ifdef CONFIG_32BIT -static int no_removal_warning __initdata; - -static int __init parse_no_removal_warning(char *str) -{ - no_removal_warning = 1; - return 0; -} -__setup("no_removal_warning", parse_no_removal_warning); - -static int __init removal_warning(void) -{ - if (no_removal_warning) - return 0; - printk(KERN_ALERT "\n\n"); - printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n"); - printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n"); - printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n"); - printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n"); - printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n"); - printk(KERN_CONT "please let us know. Please write to:\n"); - printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n"); - printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n"); - printk(KERN_CONT "Thank you!\n\n"); - printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n"); - printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n"); - schedule_timeout_uninterruptible(300 * HZ); - return 0; -} -early_initcall(removal_warning); -#endif diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index b3ae6f7..7fec60c 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -106,7 +106,6 @@ static void store_sigregs(void) { save_access_regs(current->thread.acrs); save_fp_ctl(¤t->thread.fp_regs.fpc); -#ifdef CONFIG_64BIT if (current->thread.vxrs) { int i; @@ -115,7 +114,6 @@ static void store_sigregs(void) current->thread.fp_regs.fprs[i] = *(freg_t *)(current->thread.vxrs + i); } else -#endif save_fp_regs(current->thread.fp_regs.fprs); } @@ -124,7 +122,6 @@ static void load_sigregs(void) { restore_access_regs(current->thread.acrs); /* restore_fp_ctl is done in restore_sigregs */ -#ifdef CONFIG_64BIT if (current->thread.vxrs) { int i; @@ -133,7 +130,6 @@ static void load_sigregs(void) current->thread.fp_regs.fprs[i]; restore_vx_regs(current->thread.vxrs); } else -#endif restore_fp_regs(current->thread.fp_regs.fprs); } @@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) static int save_sigregs_ext(struct pt_regs *regs, _sigregs_ext __user *sregs_ext) { -#ifdef CONFIG_64BIT __u64 vxrs[__NUM_VXRS_LOW]; int i; @@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } -#endif return 0; } static int restore_sigregs_ext(struct pt_regs *regs, _sigregs_ext __user *sregs_ext) { -#ifdef CONFIG_64BIT __u64 vxrs[__NUM_VXRS_LOW]; int i; @@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs, for (i = 0; i < __NUM_VXRS_LOW; i++) *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; } -#endif return 0; } @@ -416,13 +408,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, * included in the signal frame on a 31-bit system. */ uc_flags = 0; -#ifdef CONFIG_64BIT if (MACHINE_HAS_VX) { frame_size += sizeof(_sigregs_ext); if (current->thread.vxrs) uc_flags |= UC_VXRS; } -#endif frame = get_sigframe(&ksig->ka, regs, frame_size); if (frame == (void __user *) -1UL) return -EFAULT; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index db8f1115..efd2c19 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) { - lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); - if (!lc->extended_save_area_addr) - goto out; - } -#else if (MACHINE_HAS_VX) lc->vector_save_area_addr = (unsigned long) &lc->vector_save_area; if (vdso_alloc_per_cpu(lc)) goto out; -#endif lowcore_ptr[cpu] = lc; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); return 0; @@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) { pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[pcpu - pcpu_devices] = NULL; -#ifndef CONFIG_64BIT - if (MACHINE_HAS_IEEE) { - struct _lowcore *lc = pcpu->lowcore; - - free_page((unsigned long) lc->extended_save_area_addr); - lc->extended_save_area_addr = 0; - } -#else vdso_free_per_cpu(pcpu->lowcore); -#endif if (pcpu == &pcpu_devices[0]) return; free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); @@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu) pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); } -#ifndef CONFIG_64BIT -/* - * this function sends a 'purge tlb' signal to another CPU. - */ -static void smp_ptlb_callback(void *info) -{ - __tlb_flush_local(); -} - -void smp_ptlb_all(void) -{ - on_each_cpu(smp_ptlb_callback, NULL, 1); -} -EXPORT_SYMBOL(smp_ptlb_all); -#endif /* ! CONFIG_64BIT */ - /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -851,7 +818,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) pcpu_prepare_secondary(pcpu, cpu); pcpu_attach_task(pcpu, tidle); pcpu_start_fn(pcpu, smp_start_secondary, NULL); - while (!cpu_online(cpu)) + /* Wait until cpu puts itself in the online & active maps */ + while (!cpu_online(cpu) || !cpu_active(cpu)) cpu_relax(); return 0; } diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 1c4c5ac..d3236c9 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); + unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; + unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); /* Always save lowcore pages (LC protection might be enabled). */ if (pfn <= LC_PAGES) @@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn) if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) return 1; /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ + if (pfn >= stext_pfn && pfn <= eshared_pfn) + return ipl_info.type == IPL_TYPE_NSS ? 1 : 0; if (tprot(PFN_PHYS(pfn))) return 1; return 0; diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp.S index ca62946..ca62946 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp.S diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 23eb222..f145490 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, return sys_ipc(call, first, second, third, ptr, third); } -#ifdef CONFIG_64BIT SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { unsigned int ret; @@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) return ret; } -#endif /* CONFIG_64BIT */ - -/* - * Wrapper function for sys_fadvise64/fadvise64_64 - */ -#ifndef CONFIG_64BIT - -SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low, - size_t, len, int, advice) -{ - return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low, - len, advice); -} - -struct fadvise64_64_args { - int fd; - long long offset; - long long len; - int advice; -}; - -SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) -{ - struct fadvise64_64_args a; - - if ( copy_from_user(&a, args, sizeof(a)) ) - return -EFAULT; - return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); -} - -/* - * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last - * 64 bit argument "len" is split into the upper and lower 32 bits. The - * system call wrapper in the user space loads the value to %r6/%r7. - * The code in entry.S keeps the values in %r2 - %r6 where they are and - * stores %r7 to 96(%r15). But the standard C linkage requires that - * the whole 64 bit value for len is stored on the stack and doesn't - * use %r6 at all. So s390_fallocate has to convert the arguments from - * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len - * to - * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len - */ -SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset, - u32, len_high, u32, len_low) -{ - return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); -} -#endif diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 939ec47..1acad02 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -1,365 +1,365 @@ /* * definitions for sys_call_table, each line represents an - * entry in the table in the form - * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall) + * entry in the table in the form + * SYSCALL(64 bit syscall, 31 bit emulated syscall) * - * this file is meant to be included from entry.S and entry64.S + * this file is meant to be included from entry.S */ -#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall) +#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) -NI_SYSCALL /* 0 */ -SYSCALL(sys_exit,sys_exit,compat_sys_exit) -SYSCALL(sys_fork,sys_fork,sys_fork) -SYSCALL(sys_read,sys_read,compat_sys_s390_read) -SYSCALL(sys_write,sys_write,compat_sys_s390_write) -SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */ -SYSCALL(sys_close,sys_close,compat_sys_close) -SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) -SYSCALL(sys_creat,sys_creat,compat_sys_creat) -SYSCALL(sys_link,sys_link,compat_sys_link) -SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */ -SYSCALL(sys_execve,sys_execve,compat_sys_execve) -SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir) -SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */ -SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod) -SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */ -SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ -NI_SYSCALL /* old break syscall holder */ -NI_SYSCALL /* old stat syscall holder */ -SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek) -SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */ -SYSCALL(sys_mount,sys_mount,compat_sys_mount) -SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount) -SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ -SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ -SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ -SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace) -SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm) -NI_SYSCALL /* old fstat syscall */ -SYSCALL(sys_pause,sys_pause,sys_pause) -SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */ -NI_SYSCALL /* old stty syscall */ -NI_SYSCALL /* old gtty syscall */ -SYSCALL(sys_access,sys_access,compat_sys_access) -SYSCALL(sys_nice,sys_nice,compat_sys_nice) -NI_SYSCALL /* 35 old ftime syscall */ -SYSCALL(sys_sync,sys_sync,sys_sync) -SYSCALL(sys_kill,sys_kill,compat_sys_kill) -SYSCALL(sys_rename,sys_rename,compat_sys_rename) -SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir) -SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */ -SYSCALL(sys_dup,sys_dup,compat_sys_dup) -SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe) -SYSCALL(sys_times,sys_times,compat_sys_times) -NI_SYSCALL /* old prof syscall */ -SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */ -SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ -SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ -SYSCALL(sys_signal,sys_signal,compat_sys_signal) -SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ -SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ -SYSCALL(sys_acct,sys_acct,compat_sys_acct) -SYSCALL(sys_umount,sys_umount,compat_sys_umount) -NI_SYSCALL /* old lock syscall */ -SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl) -SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */ -NI_SYSCALL /* intel mpx syscall */ -SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid) -NI_SYSCALL /* old ulimit syscall */ -NI_SYSCALL /* old uname syscall */ -SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */ -SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot) -SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat) -SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2) -SYSCALL(sys_getppid,sys_getppid,sys_getppid) -SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */ -SYSCALL(sys_setsid,sys_setsid,sys_setsid) -SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction) -NI_SYSCALL /* old sgetmask syscall*/ -NI_SYSCALL /* old ssetmask syscall*/ -SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ -SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ -SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend) -SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending) -SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname) -SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */ -SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit) -SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage) -SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday) -SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday) -SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ -SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ -NI_SYSCALL /* old select syscall */ -SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink) -NI_SYSCALL /* old lstat syscall */ -SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */ -SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib) -SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon) -SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot) -SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ -SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ -SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap) -SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate) -SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate) -SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod) -SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ -SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority) -SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority) -NI_SYSCALL /* old profil syscall */ -SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs) -SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */ -NI_SYSCALL /* ioperm for i386 */ -SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall) -SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog) -SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer) -SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */ -SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat) -SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat) -SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat) -NI_SYSCALL /* old uname syscall */ -SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ -SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) -NI_SYSCALL /* old "idle" system call */ -NI_SYSCALL /* vm86old for i386 */ -SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4) -SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */ -SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo) -SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc) -SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync) -SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn) -SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */ -SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname) -SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname) -NI_SYSCALL /* modify_ldt for i386 */ -SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex) -SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */ -SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask) -NI_SYSCALL /* old "create module" */ -SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module) -SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module) -NI_SYSCALL /* 130: old get_kernel_syms */ -SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl) -SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid) -SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir) -SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush) -SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */ -SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality) -NI_SYSCALL /* for afs_syscall */ -SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ -SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ -SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */ -SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents) -SYSCALL(sys_select,sys_select,compat_sys_select) -SYSCALL(sys_flock,sys_flock,compat_sys_flock) -SYSCALL(sys_msync,sys_msync,compat_sys_msync) -SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */ -SYSCALL(sys_writev,sys_writev,compat_sys_writev) -SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid) -SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync) -SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl) -SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */ -SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock) -SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall) -SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall) -SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam) -SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ -SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler) -SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler) -SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield) -SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max) -SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ -SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) -SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep) -SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap) -SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ -SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ -NI_SYSCALL /* for vm86 */ -NI_SYSCALL /* old sys_query_module */ -SYSCALL(sys_poll,sys_poll,compat_sys_poll) -NI_SYSCALL /* old nfsservctl */ -SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ -SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ -SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl) -SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn) -SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction) -SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ -SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending) -SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) -SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) -SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend) -SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */ -SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64) -SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ -SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd) -SYSCALL(sys_capget,sys_capget,compat_sys_capget) -SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */ -SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack) -SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile) -NI_SYSCALL /* streams1 */ -NI_SYSCALL /* streams2 */ -SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ -SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit) -SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2) -SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64) -SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64) -SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ -SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64) -SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64) -SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown) -SYSCALL(sys_getuid,sys_getuid,sys_getuid) -SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */ -SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid) -SYSCALL(sys_getegid,sys_getegid,sys_getegid) -SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid) -SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid) -SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */ -SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups) -SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown) -SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid) -SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid) -SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */ -SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid) -SYSCALL(sys_chown,sys_chown,compat_sys_chown) -SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid) -SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid) -SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */ -SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid) -SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root) -SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore) -SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise) -SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */ -SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64) -SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead) -SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64) -SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr) -SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ -SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr) -SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr) -SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr) -SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr) -SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */ -SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr) -SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr) -SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr) -SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr) -SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ -SYSCALL(sys_gettid,sys_gettid,sys_gettid) -SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill) -SYSCALL(sys_futex,sys_futex,compat_sys_futex) -SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity) -SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ -SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill) -NI_SYSCALL /* reserved for TUX */ -SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup) -SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy) -SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */ -SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit) -SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel) -SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group) -SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create) -SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ -SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait) -SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address) -SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64) -SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create) -SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */ -SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime) -SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun) -SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete) -SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime) -SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ -SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres) -SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep) -NI_SYSCALL /* reserved for vserver */ -SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64) -SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64) -SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64) -SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages) -NI_SYSCALL /* 268 sys_mbind */ -NI_SYSCALL /* 269 sys_get_mempolicy */ -NI_SYSCALL /* 270 sys_set_mempolicy */ -SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open) -SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink) -SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend) -SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive) -SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */ -SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr) -SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load) -SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key) -SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key) -SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */ -SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid) -SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set) -SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get) -SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init) -SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ -SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch) -NI_SYSCALL /* 287 sys_migrate_pages */ -SYSCALL(sys_openat,sys_openat,compat_sys_openat) -SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat) -SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */ -SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat) -SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat) -SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64) -SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat) -SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */ -SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat) -SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat) -SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat) -SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat) -SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */ -SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6) -SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll) -SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare) -SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list) -SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list) -SYSCALL(sys_splice,sys_splice,compat_sys_splice) -SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range) -SYSCALL(sys_tee,sys_tee,compat_sys_tee) -SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice) -NI_SYSCALL /* 310 sys_move_pages */ -SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu) -SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait) -SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes) -SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate) -SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */ -SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd) +NI_SYSCALL /* 0 */ +SYSCALL(sys_exit,compat_sys_exit) +SYSCALL(sys_fork,sys_fork) +SYSCALL(sys_read,compat_sys_s390_read) +SYSCALL(sys_write,compat_sys_s390_write) +SYSCALL(sys_open,compat_sys_open) /* 5 */ +SYSCALL(sys_close,compat_sys_close) +SYSCALL(sys_restart_syscall,sys_restart_syscall) +SYSCALL(sys_creat,compat_sys_creat) +SYSCALL(sys_link,compat_sys_link) +SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */ +SYSCALL(sys_execve,compat_sys_execve) +SYSCALL(sys_chdir,compat_sys_chdir) +SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */ +SYSCALL(sys_mknod,compat_sys_mknod) +SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */ +SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/ +NI_SYSCALL /* old break syscall holder */ +NI_SYSCALL /* old stat syscall holder */ +SYSCALL(sys_lseek,compat_sys_lseek) +SYSCALL(sys_getpid,sys_getpid) /* 20 */ +SYSCALL(sys_mount,compat_sys_mount) +SYSCALL(sys_oldumount,compat_sys_oldumount) +SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/ +SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ +SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ +SYSCALL(sys_ptrace,compat_sys_ptrace) +SYSCALL(sys_alarm,compat_sys_alarm) +NI_SYSCALL /* old fstat syscall */ +SYSCALL(sys_pause,sys_pause) +SYSCALL(sys_utime,compat_sys_utime) /* 30 */ +NI_SYSCALL /* old stty syscall */ +NI_SYSCALL /* old gtty syscall */ +SYSCALL(sys_access,compat_sys_access) +SYSCALL(sys_nice,compat_sys_nice) +NI_SYSCALL /* 35 old ftime syscall */ +SYSCALL(sys_sync,sys_sync) +SYSCALL(sys_kill,compat_sys_kill) +SYSCALL(sys_rename,compat_sys_rename) +SYSCALL(sys_mkdir,compat_sys_mkdir) +SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ +SYSCALL(sys_dup,compat_sys_dup) +SYSCALL(sys_pipe,compat_sys_pipe) +SYSCALL(sys_times,compat_sys_times) +NI_SYSCALL /* old prof syscall */ +SYSCALL(sys_brk,compat_sys_brk) /* 45 */ +SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/ +SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/ +SYSCALL(sys_signal,compat_sys_signal) +SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */ +SYSCALL(sys_acct,compat_sys_acct) +SYSCALL(sys_umount,compat_sys_umount) +NI_SYSCALL /* old lock syscall */ +SYSCALL(sys_ioctl,compat_sys_ioctl) +SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ +NI_SYSCALL /* intel mpx syscall */ +SYSCALL(sys_setpgid,compat_sys_setpgid) +NI_SYSCALL /* old ulimit syscall */ +NI_SYSCALL /* old uname syscall */ +SYSCALL(sys_umask,compat_sys_umask) /* 60 */ +SYSCALL(sys_chroot,compat_sys_chroot) +SYSCALL(sys_ustat,compat_sys_ustat) +SYSCALL(sys_dup2,compat_sys_dup2) +SYSCALL(sys_getppid,sys_getppid) +SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ +SYSCALL(sys_setsid,sys_setsid) +SYSCALL(sys_sigaction,compat_sys_sigaction) +NI_SYSCALL /* old sgetmask syscall*/ +NI_SYSCALL /* old ssetmask syscall*/ +SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */ +SYSCALL(sys_sigsuspend,compat_sys_sigsuspend) +SYSCALL(sys_sigpending,compat_sys_sigpending) +SYSCALL(sys_sethostname,compat_sys_sethostname) +SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */ +SYSCALL(sys_getrlimit,compat_sys_old_getrlimit) +SYSCALL(sys_getrusage,compat_sys_getrusage) +SYSCALL(sys_gettimeofday,compat_sys_gettimeofday) +SYSCALL(sys_settimeofday,compat_sys_settimeofday) +SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */ +NI_SYSCALL /* old select syscall */ +SYSCALL(sys_symlink,compat_sys_symlink) +NI_SYSCALL /* old lstat syscall */ +SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */ +SYSCALL(sys_uselib,compat_sys_uselib) +SYSCALL(sys_swapon,compat_sys_swapon) +SYSCALL(sys_reboot,compat_sys_reboot) +SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */ +SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */ +SYSCALL(sys_munmap,compat_sys_munmap) +SYSCALL(sys_truncate,compat_sys_truncate) +SYSCALL(sys_ftruncate,compat_sys_ftruncate) +SYSCALL(sys_fchmod,compat_sys_fchmod) +SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ +SYSCALL(sys_getpriority,compat_sys_getpriority) +SYSCALL(sys_setpriority,compat_sys_setpriority) +NI_SYSCALL /* old profil syscall */ +SYSCALL(sys_statfs,compat_sys_statfs) +SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ +NI_SYSCALL /* ioperm for i386 */ +SYSCALL(sys_socketcall,compat_sys_socketcall) +SYSCALL(sys_syslog,compat_sys_syslog) +SYSCALL(sys_setitimer,compat_sys_setitimer) +SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */ +SYSCALL(sys_newstat,compat_sys_newstat) +SYSCALL(sys_newlstat,compat_sys_newlstat) +SYSCALL(sys_newfstat,compat_sys_newfstat) +NI_SYSCALL /* old uname syscall */ +SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */ +SYSCALL(sys_vhangup,sys_vhangup) +NI_SYSCALL /* old "idle" system call */ +NI_SYSCALL /* vm86old for i386 */ +SYSCALL(sys_wait4,compat_sys_wait4) +SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ +SYSCALL(sys_sysinfo,compat_sys_sysinfo) +SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) +SYSCALL(sys_fsync,compat_sys_fsync) +SYSCALL(sys_sigreturn,compat_sys_sigreturn) +SYSCALL(sys_clone,compat_sys_clone) /* 120 */ +SYSCALL(sys_setdomainname,compat_sys_setdomainname) +SYSCALL(sys_newuname,compat_sys_newuname) +NI_SYSCALL /* modify_ldt for i386 */ +SYSCALL(sys_adjtimex,compat_sys_adjtimex) +SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */ +SYSCALL(sys_sigprocmask,compat_sys_sigprocmask) +NI_SYSCALL /* old "create module" */ +SYSCALL(sys_init_module,compat_sys_init_module) +SYSCALL(sys_delete_module,compat_sys_delete_module) +NI_SYSCALL /* 130: old get_kernel_syms */ +SYSCALL(sys_quotactl,compat_sys_quotactl) +SYSCALL(sys_getpgid,compat_sys_getpgid) +SYSCALL(sys_fchdir,compat_sys_fchdir) +SYSCALL(sys_bdflush,compat_sys_bdflush) +SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ +SYSCALL(sys_s390_personality,compat_sys_s390_personality) +NI_SYSCALL /* for afs_syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ +SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ +SYSCALL(sys_getdents,compat_sys_getdents) +SYSCALL(sys_select,compat_sys_select) +SYSCALL(sys_flock,compat_sys_flock) +SYSCALL(sys_msync,compat_sys_msync) +SYSCALL(sys_readv,compat_sys_readv) /* 145 */ +SYSCALL(sys_writev,compat_sys_writev) +SYSCALL(sys_getsid,compat_sys_getsid) +SYSCALL(sys_fdatasync,compat_sys_fdatasync) +SYSCALL(sys_sysctl,compat_sys_sysctl) +SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ +SYSCALL(sys_munlock,compat_sys_munlock) +SYSCALL(sys_mlockall,compat_sys_mlockall) +SYSCALL(sys_munlockall,sys_munlockall) +SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) +SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ +SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) +SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) +SYSCALL(sys_sched_yield,sys_sched_yield) +SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) +SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ +SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) +SYSCALL(sys_nanosleep,compat_sys_nanosleep) +SYSCALL(sys_mremap,compat_sys_mremap) +SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */ +NI_SYSCALL /* for vm86 */ +NI_SYSCALL /* old sys_query_module */ +SYSCALL(sys_poll,compat_sys_poll) +NI_SYSCALL /* old nfsservctl */ +SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */ +SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */ +SYSCALL(sys_prctl,compat_sys_prctl) +SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn) +SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction) +SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ +SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending) +SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) +SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) +SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend) +SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */ +SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64) +SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */ +SYSCALL(sys_getcwd,compat_sys_getcwd) +SYSCALL(sys_capget,compat_sys_capget) +SYSCALL(sys_capset,compat_sys_capset) /* 185 */ +SYSCALL(sys_sigaltstack,compat_sys_sigaltstack) +SYSCALL(sys_sendfile64,compat_sys_sendfile) +NI_SYSCALL /* streams1 */ +NI_SYSCALL /* streams2 */ +SYSCALL(sys_vfork,sys_vfork) /* 190 */ +SYSCALL(sys_getrlimit,compat_sys_getrlimit) +SYSCALL(sys_mmap2,compat_sys_s390_mmap2) +SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64) +SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64) +SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */ +SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64) +SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64) +SYSCALL(sys_lchown,compat_sys_lchown) +SYSCALL(sys_getuid,sys_getuid) +SYSCALL(sys_getgid,sys_getgid) /* 200 */ +SYSCALL(sys_geteuid,sys_geteuid) +SYSCALL(sys_getegid,sys_getegid) +SYSCALL(sys_setreuid,compat_sys_setreuid) +SYSCALL(sys_setregid,compat_sys_setregid) +SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ +SYSCALL(sys_setgroups,compat_sys_setgroups) +SYSCALL(sys_fchown,compat_sys_fchown) +SYSCALL(sys_setresuid,compat_sys_setresuid) +SYSCALL(sys_getresuid,compat_sys_getresuid) +SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ +SYSCALL(sys_getresgid,compat_sys_getresgid) +SYSCALL(sys_chown,compat_sys_chown) +SYSCALL(sys_setuid,compat_sys_setuid) +SYSCALL(sys_setgid,compat_sys_setgid) +SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ +SYSCALL(sys_setfsgid,compat_sys_setfsgid) +SYSCALL(sys_pivot_root,compat_sys_pivot_root) +SYSCALL(sys_mincore,compat_sys_mincore) +SYSCALL(sys_madvise,compat_sys_madvise) +SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */ +SYSCALL(sys_ni_syscall,compat_sys_fcntl64) +SYSCALL(sys_readahead,compat_sys_s390_readahead) +SYSCALL(sys_ni_syscall,compat_sys_sendfile64) +SYSCALL(sys_setxattr,compat_sys_setxattr) +SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */ +SYSCALL(sys_fsetxattr,compat_sys_fsetxattr) +SYSCALL(sys_getxattr,compat_sys_getxattr) +SYSCALL(sys_lgetxattr,compat_sys_lgetxattr) +SYSCALL(sys_fgetxattr,compat_sys_fgetxattr) +SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */ +SYSCALL(sys_llistxattr,compat_sys_llistxattr) +SYSCALL(sys_flistxattr,compat_sys_flistxattr) +SYSCALL(sys_removexattr,compat_sys_removexattr) +SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) +SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ +SYSCALL(sys_gettid,sys_gettid) +SYSCALL(sys_tkill,compat_sys_tkill) +SYSCALL(sys_futex,compat_sys_futex) +SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) +SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ +SYSCALL(sys_tgkill,compat_sys_tgkill) +NI_SYSCALL /* reserved for TUX */ +SYSCALL(sys_io_setup,compat_sys_io_setup) +SYSCALL(sys_io_destroy,compat_sys_io_destroy) +SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ +SYSCALL(sys_io_submit,compat_sys_io_submit) +SYSCALL(sys_io_cancel,compat_sys_io_cancel) +SYSCALL(sys_exit_group,compat_sys_exit_group) +SYSCALL(sys_epoll_create,compat_sys_epoll_create) +SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ +SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) +SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) +SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64) +SYSCALL(sys_timer_create,compat_sys_timer_create) +SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ +SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) +SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) +SYSCALL(sys_timer_delete,compat_sys_timer_delete) +SYSCALL(sys_clock_settime,compat_sys_clock_settime) +SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ +SYSCALL(sys_clock_getres,compat_sys_clock_getres) +SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep) +NI_SYSCALL /* reserved for vserver */ +SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64) +SYSCALL(sys_statfs64,compat_sys_statfs64) +SYSCALL(sys_fstatfs64,compat_sys_fstatfs64) +SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages) +NI_SYSCALL /* 268 sys_mbind */ +NI_SYSCALL /* 269 sys_get_mempolicy */ +NI_SYSCALL /* 270 sys_set_mempolicy */ +SYSCALL(sys_mq_open,compat_sys_mq_open) +SYSCALL(sys_mq_unlink,compat_sys_mq_unlink) +SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend) +SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive) +SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */ +SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr) +SYSCALL(sys_kexec_load,compat_sys_kexec_load) +SYSCALL(sys_add_key,compat_sys_add_key) +SYSCALL(sys_request_key,compat_sys_request_key) +SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ +SYSCALL(sys_waitid,compat_sys_waitid) +SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) +SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) +SYSCALL(sys_inotify_init,sys_inotify_init) +SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ +SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) +NI_SYSCALL /* 287 sys_migrate_pages */ +SYSCALL(sys_openat,compat_sys_openat) +SYSCALL(sys_mkdirat,compat_sys_mkdirat) +SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */ +SYSCALL(sys_fchownat,compat_sys_fchownat) +SYSCALL(sys_futimesat,compat_sys_futimesat) +SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64) +SYSCALL(sys_unlinkat,compat_sys_unlinkat) +SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */ +SYSCALL(sys_linkat,compat_sys_linkat) +SYSCALL(sys_symlinkat,compat_sys_symlinkat) +SYSCALL(sys_readlinkat,compat_sys_readlinkat) +SYSCALL(sys_fchmodat,compat_sys_fchmodat) +SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */ +SYSCALL(sys_pselect6,compat_sys_pselect6) +SYSCALL(sys_ppoll,compat_sys_ppoll) +SYSCALL(sys_unshare,compat_sys_unshare) +SYSCALL(sys_set_robust_list,compat_sys_set_robust_list) +SYSCALL(sys_get_robust_list,compat_sys_get_robust_list) +SYSCALL(sys_splice,compat_sys_splice) +SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range) +SYSCALL(sys_tee,compat_sys_tee) +SYSCALL(sys_vmsplice,compat_sys_vmsplice) +NI_SYSCALL /* 310 sys_move_pages */ +SYSCALL(sys_getcpu,compat_sys_getcpu) +SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait) +SYSCALL(sys_utimes,compat_sys_utimes) +SYSCALL(sys_fallocate,compat_sys_s390_fallocate) +SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ +SYSCALL(sys_signalfd,compat_sys_signalfd) NI_SYSCALL /* 317 old sys_timer_fd */ -SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd) -SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create) -SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ -SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime) -SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4) -SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2) -SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1) -SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */ -SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3) -SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1) -SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv) -SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev) -SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ -SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open) -SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init) -SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark) -SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64) -SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ -SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) -SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime) -SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs) -SYSCALL(sys_setns,sys_setns,compat_sys_setns) -SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ -SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev) -SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr) -SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp) -SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) -SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ -SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) -SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) -SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) -SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) -SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ -SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) -SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) -SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) -SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat) +SYSCALL(sys_eventfd,compat_sys_eventfd) +SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) +SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ +SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) +SYSCALL(sys_signalfd4,compat_sys_signalfd4) +SYSCALL(sys_eventfd2,compat_sys_eventfd2) +SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) +SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ +SYSCALL(sys_dup3,compat_sys_dup3) +SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) +SYSCALL(sys_preadv,compat_sys_preadv) +SYSCALL(sys_pwritev,compat_sys_pwritev) +SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ +SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) +SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) +SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) +SYSCALL(sys_prlimit64,compat_sys_prlimit64) +SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ +SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) +SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) +SYSCALL(sys_syncfs,compat_sys_syncfs) +SYSCALL(sys_setns,compat_sys_setns) +SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ +SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) +SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) +SYSCALL(sys_kcmp,compat_sys_kcmp) +SYSCALL(sys_finit_module,compat_sys_finit_module) +SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ +SYSCALL(sys_sched_getattr,compat_sys_sched_getattr) +SYSCALL(sys_renameat2,compat_sys_renameat2) +SYSCALL(sys_seccomp,compat_sys_seccomp) +SYSCALL(sys_getrandom,compat_sys_getrandom) +SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */ +SYSCALL(sys_bpf,compat_sys_bpf) +SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) +SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) +SYSCALL(sys_execveat,compat_sys_execveat) diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 14da43b..5728c5b 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu) return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); } -const struct cpumask *cpu_thread_mask(int cpu) +static const struct cpumask *cpu_thread_mask(int cpu) { return &per_cpu(cpu_topology, cpu).thread_mask; } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f081cf1..4d96c9f 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -26,7 +26,6 @@ int show_unhandled_signals = 1; static inline void __user *get_trap_ip(struct pt_regs *regs) { -#ifdef CONFIG_64BIT unsigned long address; if (regs->int_code & 0x200) @@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) address = regs->psw.addr; return (void __user *) ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); -#else - return (void __user *) - ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); -#endif } static inline void report_user_fault(struct pt_regs *regs, int signr) @@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, "privileged operation") DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, "special operation exception") - -#ifdef CONFIG_64BIT DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, "transaction constraint exception") -#endif static inline void do_fp_trap(struct pt_regs *regs, int fpc) { @@ -182,7 +174,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc) void translation_exception(struct pt_regs *regs) { /* May never happen. */ - die(regs, "Translation exception"); + panic("Translation exception"); } void illegal_op(struct pt_regs *regs) @@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs) } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { is_uprobe_insn = 1; #endif -#ifdef CONFIG_MATHEMU - } else if (opcode[0] == 0xb3) { - if (get_user(*((__u16 *) (opcode+2)), location+1)) - return; - signal = math_emu_b3(opcode, regs); - } else if (opcode[0] == 0xed) { - if (get_user(*((__u32 *) (opcode+2)), - (__u32 __user *)(location+1))) - return; - signal = math_emu_ed(opcode, regs); - } else if (*((__u16 *) opcode) == 0xb299) { - if (get_user(*((__u16 *) (opcode+2)), location+1)) - return; - signal = math_emu_srnm(opcode, regs); - } else if (*((__u16 *) opcode) == 0xb29c) { - if (get_user(*((__u16 *) (opcode+2)), location+1)) - return; - signal = math_emu_stfpc(opcode, regs); - } else if (*((__u16 *) opcode) == 0xb29d) { - if (get_user(*((__u16 *) (opcode+2)), location+1)) - return; - signal = math_emu_lfpc(opcode, regs); -#endif } else signal = SIGILL; } @@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs) 3, SIGTRAP) != NOTIFY_STOP) signal = SIGILL; } - -#ifdef CONFIG_MATHEMU - if (signal == SIGFPE) - do_fp_trap(regs, current->thread.fp_regs.fpc); - else if (signal == SIGSEGV) - do_trap(regs, signal, SEGV_MAPERR, "user address fault"); - else -#endif if (signal) do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); } NOKPROBE_SYMBOL(illegal_op); -#ifdef CONFIG_MATHEMU -void specification_exception(struct pt_regs *regs) -{ - __u8 opcode[6]; - __u16 __user *location = NULL; - int signal = 0; - - location = (__u16 __user *) get_trap_ip(regs); - - if (user_mode(regs)) { - get_user(*((__u16 *) opcode), location); - switch (opcode[0]) { - case 0x28: /* LDR Rx,Ry */ - signal = math_emu_ldr(opcode); - break; - case 0x38: /* LER Rx,Ry */ - signal = math_emu_ler(opcode); - break; - case 0x60: /* STD R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_std(opcode, regs); - break; - case 0x68: /* LD R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_ld(opcode, regs); - break; - case 0x70: /* STE R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_ste(opcode, regs); - break; - case 0x78: /* LE R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_le(opcode, regs); - break; - default: - signal = SIGILL; - break; - } - } else - signal = SIGILL; - - if (signal == SIGFPE) - do_fp_trap(regs, current->thread.fp_regs.fpc); - else if (signal) - do_trap(regs, signal, ILL_ILLOPN, "specification exception"); -} -#else DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); -#endif -#ifdef CONFIG_64BIT int alloc_vector_registers(struct task_struct *tsk) { __vector128 *vxrs; @@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str) return 1; } __setup("novx", disable_vector_extension); -#endif void data_exception(struct pt_regs *regs) { @@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); - if (MACHINE_HAS_IEEE) - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); - -#ifdef CONFIG_MATHEMU - else if (user_mode(regs)) { - __u8 opcode[6]; - get_user(*((__u16 *) opcode), location); - switch (opcode[0]) { - case 0x28: /* LDR Rx,Ry */ - signal = math_emu_ldr(opcode); - break; - case 0x38: /* LER Rx,Ry */ - signal = math_emu_ler(opcode); - break; - case 0x60: /* STD R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_std(opcode, regs); - break; - case 0x68: /* LD R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_ld(opcode, regs); - break; - case 0x70: /* STE R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_ste(opcode, regs); - break; - case 0x78: /* LE R,D(X,B) */ - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_le(opcode, regs); - break; - case 0xb3: - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_b3(opcode, regs); - break; - case 0xed: - get_user(*((__u32 *) (opcode+2)), - (__u32 __user *)(location+1)); - signal = math_emu_ed(opcode, regs); - break; - case 0xb2: - if (opcode[1] == 0x99) { - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_srnm(opcode, regs); - } else if (opcode[1] == 0x9c) { - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_stfpc(opcode, regs); - } else if (opcode[1] == 0x9d) { - get_user(*((__u16 *) (opcode+2)), location+1); - signal = math_emu_lfpc(opcode, regs); - } else - signal = SIGILL; - break; - default: - signal = SIGILL; - break; - } - } -#endif -#ifdef CONFIG_64BIT + asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); /* Check for vector register enablement */ if (MACHINE_HAS_VX && !current->thread.vxrs && (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { @@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs) clear_pt_regs_flag(regs, PIF_PER_TRAP); return; } -#endif - if (current->thread.fp_regs.fpc & FPC_DXC_MASK) signal = SIGFPE; else signal = SIGILL; - if (signal == SIGFPE) + if (signal == SIGFPE) do_fp_trap(regs, current->thread.fp_regs.fpc); else if (signal) do_trap(regs, signal, ILL_ILLOPN, "data exception"); diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index cc73280..66956c0 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -188,7 +188,9 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len) else if (put_user(*(input), __ptr)) \ __rc = EMU_ADDRESSING; \ if (__rc == 0) \ - sim_stor_event(regs, __ptr, mask + 1); \ + sim_stor_event(regs, \ + (void __force *)__ptr, \ + mask + 1); \ __rc; \ }) diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 0bbb7e0..0d58269 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -32,19 +32,17 @@ #include <asm/vdso.h> #include <asm/facility.h> -#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) +#ifdef CONFIG_COMPAT extern char vdso32_start, vdso32_end; static void *vdso32_kbase = &vdso32_start; static unsigned int vdso32_pages; static struct page **vdso32_pagelist; #endif -#ifdef CONFIG_64BIT extern char vdso64_start, vdso64_end; static void *vdso64_kbase = &vdso64_start; static unsigned int vdso64_pages; static struct page **vdso64_pagelist; -#endif /* CONFIG_64BIT */ /* * Should the kernel map a VDSO page into processes and pass its @@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd) vd->ectg_available = test_facility(31); } -#ifdef CONFIG_64BIT /* * Allocate/free per cpu vdso data. */ @@ -169,7 +166,6 @@ static void vdso_init_cr5(void) cr5 = offsetof(struct _lowcore, paste); __ctl_load(cr5, 5, 5); } -#endif /* CONFIG_64BIT */ /* * This is called from binfmt_elf, we create the special vma for the @@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!uses_interp) return 0; -#ifdef CONFIG_64BIT vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT @@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_pages = vdso32_pages; } #endif -#else - vdso_pagelist = vdso32_pagelist; - vdso_pages = vdso32_pages; -#endif - /* * vDSO has a problem and was disabled, just don't "enable" it for * the process @@ -268,7 +258,7 @@ static int __init vdso_init(void) if (!vdso_enabled) return 0; vdso_init_data(vdso_data); -#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) +#ifdef CONFIG_COMPAT /* Calculate the size of the 32 bit vDSO */ vdso32_pages = ((&vdso32_end - &vdso32_start + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; @@ -287,7 +277,6 @@ static int __init vdso_init(void) vdso32_pagelist[vdso32_pages] = NULL; #endif -#ifdef CONFIG_64BIT /* Calculate the size of the 64 bit vDSO */ vdso64_pages = ((&vdso64_end - &vdso64_start + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; @@ -307,7 +296,6 @@ static int __init vdso_init(void) if (vdso_alloc_per_cpu(&S390_lowcore)) BUG(); vdso_init_cr5(); -#endif /* CONFIG_64BIT */ get_page(virt_to_page(vdso_data)); diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 35b13ed..445657f 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -6,17 +6,10 @@ #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> -#ifndef CONFIG_64BIT -OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") -OUTPUT_ARCH(s390:31-bit) -ENTRY(startup) -jiffies = jiffies_64 + 4; -#else OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) ENTRY(startup) jiffies = jiffies_64; -#endif PHDRS { text PT_LOAD FLAGS(5); /* R_E */ diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index a01df23..0e8fefe 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -3,8 +3,7 @@ # lib-y += delay.o string.o uaccess.o find.o -obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o -obj-$(CONFIG_64BIT) += mem64.o +obj-y += mem.o lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_KPROBES) += probes.o lib-$(CONFIG_UPROBES) += probes.o diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c deleted file mode 100644 index 261152f..0000000 --- a/arch/s390/lib/div64.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * __div64_32 implementation for 31 bit. - * - * Copyright IBM Corp. 2006 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - */ - -#include <linux/types.h> -#include <linux/module.h> - -#ifdef CONFIG_MARCH_G5 - -/* - * Function to divide an unsigned 64 bit integer by an unsigned - * 31 bit integer using signed 64/32 bit division. - */ -static uint32_t __div64_31(uint64_t *n, uint32_t base) -{ - register uint32_t reg2 asm("2"); - register uint32_t reg3 asm("3"); - uint32_t *words = (uint32_t *) n; - uint32_t tmp; - - /* Special case base==1, remainder = 0, quotient = n */ - if (base == 1) - return 0; - /* - * Special case base==0 will cause a fixed point divide exception - * on the dr instruction and may not happen anyway. For the - * following calculation we can assume base > 1. The first - * signed 64 / 32 bit division with an upper half of 0 will - * give the correct upper half of the 64 bit quotient. - */ - reg2 = 0UL; - reg3 = words[0]; - asm volatile( - " dr %0,%2\n" - : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); - words[0] = reg3; - reg3 = words[1]; - /* - * To get the lower half of the 64 bit quotient and the 32 bit - * remainder we have to use a little trick. Since we only have - * a signed division the quotient can get too big. To avoid this - * the 64 bit dividend is halved, then the signed division will - * work. Afterwards the quotient and the remainder are doubled. - * If the last bit of the dividend has been one the remainder - * is increased by one then checked against the base. If the - * remainder has overflown subtract base and increase the - * quotient. Simple, no ? - */ - asm volatile( - " nr %2,%1\n" - " srdl %0,1\n" - " dr %0,%3\n" - " alr %0,%0\n" - " alr %1,%1\n" - " alr %0,%2\n" - " clr %0,%3\n" - " jl 0f\n" - " slr %0,%3\n" - " ahi %1,1\n" - "0:\n" - : "+d" (reg2), "+d" (reg3), "=d" (tmp) - : "d" (base), "2" (1UL) : "cc" ); - words[1] = reg3; - return reg2; -} - -/* - * Function to divide an unsigned 64 bit integer by an unsigned - * 32 bit integer using the unsigned 64/31 bit division. - */ -uint32_t __div64_32(uint64_t *n, uint32_t base) -{ - uint32_t r; - - /* - * If the most significant bit of base is set, divide n by - * (base/2). That allows to use 64/31 bit division and gives a - * good approximation of the result: n = (base/2)*q + r. The - * result needs to be corrected with two simple transformations. - * If base is already < 2^31-1 __div64_31 can be used directly. - */ - r = __div64_31(n, ((signed) base < 0) ? (base/2) : base); - if ((signed) base < 0) { - uint64_t q = *n; - /* - * First transformation: - * n = (base/2)*q + r - * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r - * Since r < (base/2), r + (base/2) < base. - * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0) - * n = ((base/2)*2)*q1 + r1 with r1 < base. - */ - if (q & 1) - r += base/2; - q >>= 1; - /* - * Second transformation. ((base/2)*2) could have lost the - * last bit. - * n = ((base/2)*2)*q1 + r1 - * = base*q1 - ((base&1) ? q1 : 0) + r1 - */ - if (base & 1) { - int64_t rx = r - q; - /* - * base is >= 2^31. The worst case for the while - * loop is n=2^64-1 base=2^31+1. That gives a - * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since - * base >= 2^31 the loop is finished after a maximum - * of three iterations. - */ - while (rx < 0) { - rx += base; - q--; - } - r = rx; - } - *n = q; - } - return r; -} - -#else /* MARCH_G5 */ - -uint32_t __div64_32(uint64_t *n, uint32_t base) -{ - register uint32_t reg2 asm("2"); - register uint32_t reg3 asm("3"); - uint32_t *words = (uint32_t *) n; - - reg2 = 0UL; - reg3 = words[0]; - asm volatile( - " dlr %0,%2\n" - : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); - words[0] = reg3; - reg3 = words[1]; - asm volatile( - " dlr %0,%2\n" - : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); - words[1] = reg3; - return reg2; -} - -#endif /* MARCH_G5 */ diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem.S index c6d553e..c6d553e 100644 --- a/arch/s390/lib/mem64.S +++ b/arch/s390/lib/mem.S diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S deleted file mode 100644 index 14ca924..0000000 --- a/arch/s390/lib/mem32.S +++ /dev/null @@ -1,92 +0,0 @@ -/* - * String handling functions. - * - * Copyright IBM Corp. 2012 - */ - -#include <linux/linkage.h> - -/* - * memset implementation - * - * This code corresponds to the C construct below. We do distinguish - * between clearing (c == 0) and setting a memory array (c != 0) simply - * because nearly all memset invocations in the kernel clear memory and - * the xc instruction is preferred in such cases. - * - * void *memset(void *s, int c, size_t n) - * { - * if (likely(c == 0)) - * return __builtin_memset(s, 0, n); - * return __builtin_memset(s, c, n); - * } - */ -ENTRY(memset) - basr %r5,%r0 -.Lmemset_base: - ltr %r4,%r4 - bzr %r14 - ltr %r3,%r3 - jnz .Lmemset_fill - ahi %r4,-1 - lr %r3,%r4 - srl %r3,8 - ltr %r3,%r3 - lr %r1,%r2 - je .Lmemset_clear_rest -.Lmemset_clear_loop: - xc 0(256,%r1),0(%r1) - la %r1,256(%r1) - brct %r3,.Lmemset_clear_loop -.Lmemset_clear_rest: - ex %r4,.Lmemset_xc-.Lmemset_base(%r5) - br %r14 -.Lmemset_fill: - stc %r3,0(%r2) - chi %r4,1 - lr %r1,%r2 - ber %r14 - ahi %r4,-2 - lr %r3,%r4 - srl %r3,8 - ltr %r3,%r3 - je .Lmemset_fill_rest -.Lmemset_fill_loop: - mvc 1(256,%r1),0(%r1) - la %r1,256(%r1) - brct %r3,.Lmemset_fill_loop -.Lmemset_fill_rest: - ex %r4,.Lmemset_mvc-.Lmemset_base(%r5) - br %r14 -.Lmemset_xc: - xc 0(1,%r1),0(%r1) -.Lmemset_mvc: - mvc 1(1,%r1),0(%r1) - -/* - * memcpy implementation - * - * void *memcpy(void *dest, const void *src, size_t n) - */ -ENTRY(memcpy) - basr %r5,%r0 -.Lmemcpy_base: - ltr %r4,%r4 - bzr %r14 - ahi %r4,-1 - lr %r0,%r4 - srl %r0,8 - ltr %r0,%r0 - lr %r1,%r2 - jnz .Lmemcpy_loop -.Lmemcpy_rest: - ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5) - br %r14 -.Lmemcpy_loop: - mvc 0(256,%r1),0(%r3) - la %r1,256(%r1) - la %r3,256(%r3) - brct %r0,.Lmemcpy_loop - j .Lmemcpy_rest -.Lmemcpy_mvc: - mvc 0(1,%r1),0(%r3) diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S deleted file mode 100644 index d321329..0000000 --- a/arch/s390/lib/qrnnd.S +++ /dev/null @@ -1,78 +0,0 @@ -# S/390 __udiv_qrnnd - -#include <linux/linkage.h> - -# r2 : &__r -# r3 : upper half of 64 bit word n -# r4 : lower half of 64 bit word n -# r5 : divisor d -# the reminder r of the division is to be stored to &__r and -# the quotient q is to be returned - - .text -ENTRY(__udiv_qrnnd) - st %r2,24(%r15) # store pointer to reminder for later - lr %r0,%r3 # reload n - lr %r1,%r4 - ltr %r2,%r5 # reload and test divisor - jp 5f - # divisor >= 0x80000000 - srdl %r0,2 # n/4 - srl %r2,1 # d/2 - slr %r1,%r2 # special case if last bit of d is set - brc 3,0f # (n/4) div (n/2) can overflow by 1 - ahi %r0,-1 # trick: subtract n/2, then divide -0: dr %r0,%r2 # signed division - ahi %r1,1 # trick part 2: add 1 to the quotient - # now (n >> 2) = (d >> 1) * %r1 + %r0 - lhi %r3,1 - nr %r3,%r1 # test last bit of q - jz 1f - alr %r0,%r2 # add (d>>1) to r -1: srl %r1,1 # q >>= 1 - # now (n >> 2) = (d&-2) * %r1 + %r0 - lhi %r3,1 - nr %r3,%r5 # test last bit of d - jz 2f - slr %r0,%r1 # r -= q - brc 3,2f # borrow ? - alr %r0,%r5 # r += d - ahi %r1,-1 -2: # now (n >> 2) = d * %r1 + %r0 - alr %r1,%r1 # q <<= 1 - alr %r0,%r0 # r <<= 1 - brc 12,3f # overflow on r ? - slr %r0,%r5 # r -= d - ahi %r1,1 # q += 1 -3: lhi %r3,2 - nr %r3,%r4 # test next to last bit of n - jz 4f - ahi %r0,1 # r += 1 -4: clr %r0,%r5 # r >= d ? - jl 6f - slr %r0,%r5 # r -= d - ahi %r1,1 # q += 1 - # now (n >> 1) = d * %r1 + %r0 - j 6f -5: # divisor < 0x80000000 - srdl %r0,1 - dr %r0,%r2 # signed division - # now (n >> 1) = d * %r1 + %r0 -6: alr %r1,%r1 # q <<= 1 - alr %r0,%r0 # r <<= 1 - brc 12,7f # overflow on r ? - slr %r0,%r5 # r -= d - ahi %r1,1 # q += 1 -7: lhi %r3,1 - nr %r3,%r4 # isolate last bit of n - alr %r0,%r3 # r += (n & 1) - clr %r0,%r5 # r >= d ? - jl 8f - slr %r0,%r5 # r -= d - ahi %r1,1 # q += 1 -8: # now n = d * %r1 + %r0 - l %r2,24(%r15) - st %r0,0(%r2) - lr %r2,%r1 - br %r14 - .end __udiv_qrnnd diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 53dd5d7..4614d41 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -15,20 +15,6 @@ #include <asm/mmu_context.h> #include <asm/facility.h> -#ifndef CONFIG_64BIT -#define AHI "ahi" -#define ALR "alr" -#define CLR "clr" -#define LHI "lhi" -#define SLR "slr" -#else -#define AHI "aghi" -#define ALR "algr" -#define CLR "clgr" -#define LHI "lghi" -#define SLR "slgr" -#endif - static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, @@ -41,29 +27,29 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr asm volatile( "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" "9: jz 7f\n" - "1:"ALR" %0,%3\n" - " "SLR" %1,%3\n" - " "SLR" %2,%3\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" " j 0b\n" "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ - " "SLR" %4,%1\n" - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ " jnh 4f\n" "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" - "10:"SLR" %0,%4\n" - " "ALR" %2,%4\n" - "4:"LHI" %4,-1\n" - " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + "10:slgr %0,%4\n" + " algr %2,%4\n" + "4: lghi %4,-1\n" + " algr %4,%0\n" /* copy remaining size, subtract 1 */ " bras %3,6f\n" /* memset loop */ " xc 0(1,%2),0(%2)\n" "5: xc 0(256,%2),0(%2)\n" " la %2,256(%2)\n" - "6:"AHI" %4,-256\n" + "6: aghi %4,-256\n" " jnm 5b\n" " ex %4,0(%3)\n" " j 8f\n" - "7:"SLR" %0,%0\n" + "7:slgr %0,%0\n" "8:\n" EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) @@ -82,32 +68,32 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, " sacf 0\n" "0: mvcp 0(%0,%2),0(%1),%3\n" "10:jz 8f\n" - "1:"ALR" %0,%3\n" + "1: algr %0,%3\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "2: mvcp 0(%0,%2),0(%1),%3\n" "11:jnz 1b\n" " j 8f\n" "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ - " "LHI" %3,-4096\n" + " lghi %3,-4096\n" " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ - " "SLR" %4,%1\n" - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ " jnh 5f\n" "4: mvcp 0(%4,%2),0(%1),%3\n" - "12:"SLR" %0,%4\n" - " "ALR" %2,%4\n" - "5:"LHI" %4,-1\n" - " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + "12:slgr %0,%4\n" + " algr %2,%4\n" + "5: lghi %4,-1\n" + " algr %4,%0\n" /* copy remaining size, subtract 1 */ " bras %3,7f\n" /* memset loop */ " xc 0(1,%2),0(%2)\n" "6: xc 0(256,%2),0(%2)\n" " la %2,256(%2)\n" - "7:"AHI" %4,-256\n" + "7: aghi %4,-256\n" " jnm 6b\n" " ex %4,0(%3)\n" " j 9f\n" - "8:"SLR" %0,%0\n" + "8:slgr %0,%0\n" "9: sacf 768\n" EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) @@ -134,19 +120,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" "6: jz 4f\n" - "1:"ALR" %0,%3\n" - " "SLR" %1,%3\n" - " "SLR" %2,%3\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" " j 0b\n" "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ - " "SLR" %4,%1\n" - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" - "7:"SLR" %0,%4\n" + "7: slgr %0,%4\n" " j 5f\n" - "4:"SLR" %0,%0\n" + "4: slgr %0,%0\n" "5:\n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) @@ -165,22 +151,22 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, " sacf 0\n" "0: mvcs 0(%0,%1),0(%2),%3\n" "7: jz 5f\n" - "1:"ALR" %0,%3\n" + "1: algr %0,%3\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "2: mvcs 0(%0,%1),0(%2),%3\n" "8: jnz 1b\n" " j 5f\n" "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ - " "LHI" %3,-4096\n" + " lghi %3,-4096\n" " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ - " "SLR" %4,%1\n" - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ " jnh 6f\n" "4: mvcs 0(%4,%1),0(%2),%3\n" - "9:"SLR" %0,%4\n" + "9: slgr %0,%4\n" " j 6f\n" - "5:"SLR" %0,%0\n" + "5: slgr %0,%0\n" "6: sacf 768\n" EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) @@ -208,11 +194,11 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" " jz 2f\n" - "1:"ALR" %0,%3\n" - " "SLR" %1,%3\n" - " "SLR" %2,%3\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" " j 0b\n" - "2:"SLR" %0,%0\n" + "2:slgr %0,%0\n" "3: \n" EX_TABLE(0b,3b) : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) @@ -228,23 +214,23 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user load_kernel_asce(); asm volatile( " sacf 256\n" - " "AHI" %0,-1\n" + " aghi %0,-1\n" " jo 5f\n" " bras %3,3f\n" - "0:"AHI" %0,257\n" + "0: aghi %0,257\n" "1: mvc 0(1,%1),0(%2)\n" " la %1,1(%1)\n" " la %2,1(%2)\n" - " "AHI" %0,-1\n" + " aghi %0,-1\n" " jnz 1b\n" " j 5f\n" "2: mvc 0(256,%1),0(%2)\n" " la %1,256(%1)\n" " la %2,256(%2)\n" - "3:"AHI" %0,-256\n" + "3: aghi %0,-256\n" " jnm 2b\n" "4: ex %0,1b-0b(%3)\n" - "5: "SLR" %0,%0\n" + "5: slgr %0,%0\n" "6: sacf 768\n" EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) @@ -269,18 +255,18 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size asm volatile( "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" " jz 4f\n" - "1:"ALR" %0,%2\n" - " "SLR" %1,%2\n" + "1: algr %0,%2\n" + " slgr %1,%2\n" " j 0b\n" "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ - " "SLR" %3,%1\n" - " "CLR" %0,%3\n" /* copy crosses next page boundary? */ + " slgr %3,%1\n" + " clgr %0,%3\n" /* copy crosses next page boundary? */ " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" - " "SLR" %0,%3\n" + " slgr %0,%3\n" " j 5f\n" - "4:"SLR" %0,%0\n" + "4:slgr %0,%0\n" "5:\n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) @@ -295,28 +281,28 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size) load_kernel_asce(); asm volatile( " sacf 256\n" - " "AHI" %0,-1\n" + " aghi %0,-1\n" " jo 5f\n" " bras %3,3f\n" " xc 0(1,%1),0(%1)\n" - "0:"AHI" %0,257\n" + "0: aghi %0,257\n" " la %2,255(%1)\n" /* %2 = ptr + 255 */ " srl %2,12\n" " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ - " "SLR" %2,%1\n" - " "CLR" %0,%2\n" /* clear crosses next page boundary? */ + " slgr %2,%1\n" + " clgr %0,%2\n" /* clear crosses next page boundary? */ " jnh 5f\n" - " "AHI" %2,-1\n" + " aghi %2,-1\n" "1: ex %2,0(%3)\n" - " "AHI" %2,1\n" - " "SLR" %0,%2\n" + " aghi %2,1\n" + " slgr %0,%2\n" " j 5f\n" "2: xc 0(256,%1),0(%1)\n" " la %1,256(%1)\n" - "3:"AHI" %0,-256\n" + "3: aghi %0,-256\n" " jnm 2b\n" "4: ex %0,0(%3)\n" - "5: "SLR" %0,%0\n" + "5: slgr %0,%0\n" "6: sacf 768\n" EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) @@ -341,12 +327,12 @@ static inline unsigned long strnlen_user_srst(const char __user *src, asm volatile( " la %2,0(%1)\n" " la %3,0(%0,%1)\n" - " "SLR" %0,%0\n" + " slgr %0,%0\n" " sacf 256\n" "0: srst %3,%2\n" " jo 0b\n" " la %0,1(%3)\n" /* strnlen_user results includes \0 */ - " "SLR" %0,%1\n" + " slgr %0,%1\n" "1: sacf 768\n" EX_TABLE(0b,1b) : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) @@ -399,7 +385,7 @@ early_param("uaccess_primary", parse_uaccess_pt); static int __init uaccess_init(void) { - if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) + if (!uaccess_primary && test_facility(27)) static_key_slow_inc(&have_mvcos); return 0; } diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c deleted file mode 100644 index 3e05ff5..0000000 --- a/arch/s390/lib/ucmpdi2.c +++ /dev/null @@ -1,26 +0,0 @@ -#include <linux/module.h> - -union ull_union { - unsigned long long ull; - struct { - unsigned int high; - unsigned int low; - } ui; -}; - -int __ucmpdi2(unsigned long long a, unsigned long long b) -{ - union ull_union au = {.ull = a}; - union ull_union bu = {.ull = b}; - - if (au.ui.high < bu.ui.high) - return 0; - else if (au.ui.high > bu.ui.high) - return 2; - if (au.ui.low < bu.ui.low) - return 0; - else if (au.ui.low > bu.ui.low) - return 2; - return 1; -} -EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile deleted file mode 100644 index 51d3995..0000000 --- a/arch/s390/math-emu/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the FPU instruction emulation. -# - -obj-$(CONFIG_MATHEMU) := math.o - -ccflags-y := -I$(src) -Iinclude/math-emu -w diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c deleted file mode 100644 index a6ba0d7..0000000 --- a/arch/s390/math-emu/math.c +++ /dev/null @@ -1,2255 +0,0 @@ -/* - * S390 version - * Copyright IBM Corp. 1999, 2001 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * - * 'math.c' emulates IEEE instructions on a S390 processor - * that does not have the IEEE fpu (all processors before G5). - */ - -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <asm/uaccess.h> -#include <asm/lowcore.h> - -#include <asm/sfp-util.h> -#include <math-emu/soft-fp.h> -#include <math-emu/single.h> -#include <math-emu/double.h> -#include <math-emu/quad.h> - -#define FPC_VALID_MASK 0xF8F8FF03 - -/* - * I miss a macro to round a floating point number to the - * nearest integer in the same floating point format. - */ -#define _FP_TO_FPINT_ROUND(fs, wc, X) \ - do { \ - switch (X##_c) \ - { \ - case FP_CLS_NORMAL: \ - if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \ - { /* floating point number has no bits after the dot. */ \ - } \ - else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \ - X##_e > _FP_EXPBIAS_##fs) \ - { /* some bits before the dot, some after it. */ \ - _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \ - X##_e - _FP_EXPBIAS_##fs \ - + _FP_FRACBITS_##fs); \ - _FP_ROUND(wc, X); \ - _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \ - + _FP_FRACBITS_##fs); \ - } \ - else \ - { /* all bits after the dot. */ \ - FP_SET_EXCEPTION(FP_EX_INEXACT); \ - X##_c = FP_CLS_ZERO; \ - } \ - break; \ - case FP_CLS_NAN: \ - case FP_CLS_INF: \ - case FP_CLS_ZERO: \ - break; \ - } \ - } while (0) - -#define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X) -#define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X) -#define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X) - -typedef union { - long double ld; - struct { - __u64 high; - __u64 low; - } w; -} mathemu_ldcv; - -#ifdef CONFIG_SYSCTL -int sysctl_ieee_emulation_warnings=1; -#endif - -#define mathemu_put_user(x, p) \ - do { \ - if (put_user((x),(p))) \ - return SIGSEGV; \ - } while (0) - -#define mathemu_get_user(x, p) \ - do { \ - if (get_user((x),(p))) \ - return SIGSEGV; \ - } while (0) - -#define mathemu_copy_from_user(d, s, n)\ - do { \ - if (copy_from_user((d),(s),(n)) != 0) \ - return SIGSEGV; \ - } while (0) - -#define mathemu_copy_to_user(d, s, n) \ - do { \ - if (copy_to_user((d),(s),(n)) != 0) \ - return SIGSEGV; \ - } while (0) - -static void display_emulation_not_implemented(struct pt_regs *regs, char *instr) -{ - __u16 *location; - -#ifdef CONFIG_SYSCTL - if(sysctl_ieee_emulation_warnings) -#endif - { - location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); - printk("%s ieee fpu instruction not emulated " - "process name: %s pid: %d \n", - instr, current->comm, current->pid); - printk("%s's PSW: %08lx %08lx\n", instr, - (unsigned long) regs->psw.mask, - (unsigned long) location); - } -} - -static inline void emu_set_CC (struct pt_regs *regs, int cc) -{ - regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12); -} - -/* - * Set the condition code in the user psw. - * 0 : Result is zero - * 1 : Result is less than zero - * 2 : Result is greater than zero - * 3 : Result is NaN or INF - */ -static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign) -{ - switch (class) { - case FP_CLS_NORMAL: - case FP_CLS_INF: - emu_set_CC(regs, sign ? 1 : 2); - break; - case FP_CLS_ZERO: - emu_set_CC(regs, 0); - break; - case FP_CLS_NAN: - emu_set_CC(regs, 3); - break; - } -} - -/* Add long double */ -static int emu_axbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QB, &cvt.ld); - FP_ADD_Q(QR, QA, QB); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - emu_set_CC_cs(regs, QR_c, QR_s); - return _fex; -} - -/* Add double */ -static int emu_adbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_ADD_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Add double */ -static int emu_adb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, val); - FP_ADD_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Add float */ -static int emu_aebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_ADD_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Add float */ -static int emu_aeb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, val); - FP_ADD_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Compare long double */ -static int emu_cxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QB); - mathemu_ldcv cvt; - int IR; - - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_RAW_QP(QA, &cvt.ld); - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_RAW_QP(QB, &cvt.ld); - FP_CMP_Q(IR, QA, QB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - return 0; -} - -/* Compare double */ -static int emu_cdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DB); - int IR; - - FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_RAW_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_CMP_D(IR, DA, DB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - return 0; -} - -/* Compare double */ -static int emu_cdb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DB); - int IR; - - FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_RAW_DP(DB, val); - FP_CMP_D(IR, DA, DB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - return 0; -} - -/* Compare float */ -static int emu_cebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SB); - int IR; - - FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_RAW_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_CMP_S(IR, SA, SB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - return 0; -} - -/* Compare float */ -static int emu_ceb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SB); - int IR; - - FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_RAW_SP(SB, val); - FP_CMP_S(IR, SA, SB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - return 0; -} - -/* Compare and signal long double */ -static int emu_kxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QB); - FP_DECL_EX; - mathemu_ldcv cvt; - int IR; - - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_RAW_QP(QA, &cvt.ld); - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QB, &cvt.ld); - FP_CMP_Q(IR, QA, QB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - if (IR == 3) - FP_SET_EXCEPTION (FP_EX_INVALID); - return _fex; -} - -/* Compare and signal double */ -static int emu_kdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DB); - FP_DECL_EX; - int IR; - - FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_RAW_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_CMP_D(IR, DA, DB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - if (IR == 3) - FP_SET_EXCEPTION (FP_EX_INVALID); - return _fex; -} - -/* Compare and signal double */ -static int emu_kdb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DB); - FP_DECL_EX; - int IR; - - FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_RAW_DP(DB, val); - FP_CMP_D(IR, DA, DB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - if (IR == 3) - FP_SET_EXCEPTION (FP_EX_INVALID); - return _fex; -} - -/* Compare and signal float */ -static int emu_kebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SB); - FP_DECL_EX; - int IR; - - FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_RAW_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_CMP_S(IR, SA, SB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - if (IR == 3) - FP_SET_EXCEPTION (FP_EX_INVALID); - return _fex; -} - -/* Compare and signal float */ -static int emu_keb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SB); - FP_DECL_EX; - int IR; - - FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_RAW_SP(SB, val); - FP_CMP_S(IR, SA, SB, 3); - /* - * IR == -1 if DA < DB, IR == 0 if DA == DB, - * IR == 1 if DA > DB and IR == 3 if unorderded - */ - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); - if (IR == 3) - FP_SET_EXCEPTION (FP_EX_INVALID); - return _fex; -} - -/* Convert from fixed long double */ -static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - __s32 si; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - si = regs->gprs[ry]; - FP_FROM_INT_Q(QR, si, 32, int); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Convert from fixed double */ -static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DR); - FP_DECL_EX; - __s32 si; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - si = regs->gprs[ry]; - FP_FROM_INT_D(DR, si, 32, int); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Convert from fixed float */ -static int emu_cefbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SR); - FP_DECL_EX; - __s32 si; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - si = regs->gprs[ry]; - FP_FROM_INT_S(SR, si, 32, int); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Convert to fixed long double */ -static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) { - FP_DECL_Q(QA); - FP_DECL_EX; - mathemu_ldcv cvt; - __s32 si; - int mode; - - if (mask == 0) - mode = current->thread.fp_regs.fpc & 3; - else if (mask == 1) - mode = FP_RND_NEAREST; - else - mode = mask - 4; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_TO_INT_ROUND_Q(si, QA, 32, 1); - regs->gprs[rx] = si; - emu_set_CC_cs(regs, QA_c, QA_s); - return _fex; -} - -/* Convert to fixed double */ -static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) { - FP_DECL_D(DA); - FP_DECL_EX; - __s32 si; - int mode; - - if (mask == 0) - mode = current->thread.fp_regs.fpc & 3; - else if (mask == 1) - mode = FP_RND_NEAREST; - else - mode = mask - 4; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - FP_TO_INT_ROUND_D(si, DA, 32, 1); - regs->gprs[rx] = si; - emu_set_CC_cs(regs, DA_c, DA_s); - return _fex; -} - -/* Convert to fixed float */ -static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) { - FP_DECL_S(SA); - FP_DECL_EX; - __s32 si; - int mode; - - if (mask == 0) - mode = current->thread.fp_regs.fpc & 3; - else if (mask == 1) - mode = FP_RND_NEAREST; - else - mode = mask - 4; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - FP_TO_INT_ROUND_S(si, SA, 32, 1); - regs->gprs[rx] = si; - emu_set_CC_cs(regs, SA_c, SA_s); - return _fex; -} - -/* Divide long double */ -static int emu_dxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QB, &cvt.ld); - FP_DIV_Q(QR, QA, QB); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Divide double */ -static int emu_ddbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_DIV_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Divide double */ -static int emu_ddb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, val); - FP_DIV_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Divide float */ -static int emu_debr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_DIV_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Divide float */ -static int emu_deb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, val); - FP_DIV_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Divide to integer double */ -static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) { - display_emulation_not_implemented(regs, "didbr"); - return 0; -} - -/* Divide to integer float */ -static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) { - display_emulation_not_implemented(regs, "diebr"); - return 0; -} - -/* Extract fpc */ -static int emu_efpc (struct pt_regs *regs, int rx, int ry) { - regs->gprs[rx] = current->thread.fp_regs.fpc; - return 0; -} - -/* Load and test long double */ -static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - mathemu_ldcv cvt; - FP_DECL_Q(QA); - FP_DECL_EX; - - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; - fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui; - emu_set_CC_cs(regs, QA_c, QA_s); - return _fex; -} - -/* Load and test double */ -static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - FP_DECL_D(DA); - FP_DECL_EX; - - FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); - fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; - emu_set_CC_cs(regs, DA_c, DA_s); - return _fex; -} - -/* Load and test double */ -static int emu_ltebr (struct pt_regs *regs, int rx, int ry) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - FP_DECL_S(SA); - FP_DECL_EX; - - FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); - fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; - emu_set_CC_cs(regs, SA_c, SA_s); - return _fex; -} - -/* Load complement long double */ -static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_NEG_Q(QR, QA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - emu_set_CC_cs(regs, QR_c, QR_s); - return _fex; -} - -/* Load complement double */ -static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - FP_NEG_D(DR, DA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Load complement float */ -static int emu_lcebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - FP_NEG_S(SR, SA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Load floating point integer long double */ -static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - FP_DECL_Q(QA); - FP_DECL_EX; - mathemu_ldcv cvt; - __s32 si; - int mode; - - if (mask == 0) - mode = fp_regs->fpc & 3; - else if (mask == 1) - mode = FP_RND_NEAREST; - else - mode = mask - 4; - cvt.w.high = fp_regs->fprs[ry].ui; - cvt.w.low = fp_regs->fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_TO_FPINT_ROUND_Q(QA); - FP_PACK_QP(&cvt.ld, QA); - fp_regs->fprs[rx].ui = cvt.w.high; - fp_regs->fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Load floating point integer double */ -static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) { - /* FIXME: rounding mode !! */ - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - FP_DECL_D(DA); - FP_DECL_EX; - __s32 si; - int mode; - - if (mask == 0) - mode = fp_regs->fpc & 3; - else if (mask == 1) - mode = FP_RND_NEAREST; - else - mode = mask - 4; - FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); - FP_TO_FPINT_ROUND_D(DA); - FP_PACK_DP(&fp_regs->fprs[rx].d, DA); - return _fex; -} - -/* Load floating point integer float */ -static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - FP_DECL_S(SA); - FP_DECL_EX; - __s32 si; - int mode; - - if (mask == 0) - mode = fp_regs->fpc & 3; - else if (mask == 1) - mode = FP_RND_NEAREST; - else - mode = mask - 4; - FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); - FP_TO_FPINT_ROUND_S(SA); - FP_PACK_SP(&fp_regs->fprs[rx].f, SA); - return _fex; -} - -/* Load lengthened double to long double */ -static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - FP_CONV (Q, D, 4, 2, QR, DA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Load lengthened double to long double */ -static int emu_lxdb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, val); - FP_CONV (Q, D, 4, 2, QR, DA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Load lengthened float to long double */ -static int emu_lxebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - FP_CONV (Q, S, 4, 1, QR, SA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Load lengthened float to long double */ -static int emu_lxeb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, val); - FP_CONV (Q, S, 4, 1, QR, SA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Load lengthened float to double */ -static int emu_ldebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - FP_CONV (D, S, 2, 1, DR, SA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Load lengthened float to double */ -static int emu_ldeb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, val); - FP_CONV (D, S, 2, 1, DR, SA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Load negative long double */ -static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - if (QA_s == 0) { - FP_NEG_Q(QR, QA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - } else { - current->thread.fp_regs.fprs[rx].ui = - current->thread.fp_regs.fprs[ry].ui; - current->thread.fp_regs.fprs[rx+2].ui = - current->thread.fp_regs.fprs[ry+2].ui; - } - emu_set_CC_cs(regs, QR_c, QR_s); - return _fex; -} - -/* Load negative double */ -static int emu_lndbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - if (DA_s == 0) { - FP_NEG_D(DR, DA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - } else - current->thread.fp_regs.fprs[rx].ui = - current->thread.fp_regs.fprs[ry].ui; - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Load negative float */ -static int emu_lnebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - if (SA_s == 0) { - FP_NEG_S(SR, SA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - } else - current->thread.fp_regs.fprs[rx].ui = - current->thread.fp_regs.fprs[ry].ui; - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Load positive long double */ -static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - if (QA_s != 0) { - FP_NEG_Q(QR, QA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - } else{ - current->thread.fp_regs.fprs[rx].ui = - current->thread.fp_regs.fprs[ry].ui; - current->thread.fp_regs.fprs[rx+2].ui = - current->thread.fp_regs.fprs[ry+2].ui; - } - emu_set_CC_cs(regs, QR_c, QR_s); - return _fex; -} - -/* Load positive double */ -static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - if (DA_s != 0) { - FP_NEG_D(DR, DA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - } else - current->thread.fp_regs.fprs[rx].ui = - current->thread.fp_regs.fprs[ry].ui; - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Load positive float */ -static int emu_lpebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - if (SA_s != 0) { - FP_NEG_S(SR, SA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - } else - current->thread.fp_regs.fprs[rx].ui = - current->thread.fp_regs.fprs[ry].ui; - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Load rounded long double to double */ -static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_D(DR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_CONV (D, Q, 2, 4, DR, QA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].f, DR); - return _fex; -} - -/* Load rounded long double to float */ -static int emu_lexbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_S(SR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_CONV (S, Q, 1, 4, SR, QA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Load rounded double to float */ -static int emu_ledbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - FP_CONV (S, D, 1, 2, SR, DA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Multiply long double */ -static int emu_mxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QB, &cvt.ld); - FP_MUL_Q(QR, QA, QB); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Multiply double */ -static int emu_mdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_MUL_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Multiply double */ -static int emu_mdb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, val); - FP_MUL_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Multiply double to long double */ -static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_CONV (Q, D, 4, 2, QA, DA); - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - FP_CONV (Q, D, 4, 2, QB, DA); - FP_MUL_Q(QR, QA, QB); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Multiply double to long double */ -static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) { - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_UNPACK_QP(QB, val); - FP_MUL_Q(QR, QA, QB); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - return _fex; -} - -/* Multiply float */ -static int emu_meebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_MUL_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Multiply float */ -static int emu_meeb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, val); - FP_MUL_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - return _fex; -} - -/* Multiply float to double */ -static int emu_mdebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_CONV (D, S, 2, 1, DA, SA); - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - FP_CONV (D, S, 2, 1, DB, SA); - FP_MUL_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Multiply float to double */ -static int emu_mdeb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_CONV (D, S, 2, 1, DA, SA); - FP_UNPACK_SP(SA, val); - FP_CONV (D, S, 2, 1, DB, SA); - FP_MUL_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - return _fex; -} - -/* Multiply and add double */ -static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); - FP_MUL_D(DR, DA, DB); - FP_ADD_D(DR, DR, DC); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); - return _fex; -} - -/* Multiply and add double */ -static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, val); - FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); - FP_MUL_D(DR, DA, DB); - FP_ADD_D(DR, DR, DC); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); - return _fex; -} - -/* Multiply and add float */ -static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); - FP_MUL_S(SR, SA, SB); - FP_ADD_S(SR, SR, SC); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); - return _fex; -} - -/* Multiply and add float */ -static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, val); - FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); - FP_MUL_S(SR, SA, SB); - FP_ADD_S(SR, SR, SC); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); - return _fex; -} - -/* Multiply and subtract double */ -static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); - FP_MUL_D(DR, DA, DB); - FP_SUB_D(DR, DR, DC); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); - return _fex; -} - -/* Multiply and subtract double */ -static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, val); - FP_UNPACK_DP(DC, ¤t->thread.fp_regs.fprs[rz].d); - FP_MUL_D(DR, DA, DB); - FP_SUB_D(DR, DR, DC); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rz].d, DR); - return _fex; -} - -/* Multiply and subtract float */ -static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); - FP_MUL_S(SR, SA, SB); - FP_SUB_S(SR, SR, SC); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); - return _fex; -} - -/* Multiply and subtract float */ -static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, val); - FP_UNPACK_SP(SC, ¤t->thread.fp_regs.fprs[rz].f); - FP_MUL_S(SR, SA, SB); - FP_SUB_S(SR, SR, SC); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rz].f, SR); - return _fex; -} - -/* Set floating point control word */ -static int emu_sfpc (struct pt_regs *regs, int rx, int ry) { - __u32 temp; - - temp = regs->gprs[rx]; - if ((temp & ~FPC_VALID_MASK) != 0) - return SIGILL; - current->thread.fp_regs.fpc = temp; - return 0; -} - -/* Square root long double */ -static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - FP_SQRT_Q(QR, QA); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - emu_set_CC_cs(regs, QR_c, QR_s); - return _fex; -} - -/* Square root double */ -static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[ry].d); - FP_SQRT_D(DR, DA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Square root double */ -static int emu_sqdb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, val); - FP_SQRT_D(DR, DA); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Square root float */ -static int emu_sqebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[ry].f); - FP_SQRT_S(SR, SA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Square root float */ -static int emu_sqeb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, val); - FP_SQRT_S(SR, SA); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Subtract long double */ -static int emu_sxbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); - FP_DECL_EX; - mathemu_ldcv cvt; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_QP(QA, &cvt.ld); - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; - FP_UNPACK_QP(QB, &cvt.ld); - FP_SUB_Q(QR, QA, QB); - FP_PACK_QP(&cvt.ld, QR); - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; - emu_set_CC_cs(regs, QR_c, QR_s); - return _fex; -} - -/* Subtract double */ -static int emu_sdbr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, ¤t->thread.fp_regs.fprs[ry].d); - FP_SUB_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Subtract double */ -static int emu_sdb (struct pt_regs *regs, int rx, double *val) { - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - FP_UNPACK_DP(DB, val); - FP_SUB_D(DR, DA, DB); - FP_PACK_DP(¤t->thread.fp_regs.fprs[rx].d, DR); - emu_set_CC_cs(regs, DR_c, DR_s); - return _fex; -} - -/* Subtract float */ -static int emu_sebr (struct pt_regs *regs, int rx, int ry) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, ¤t->thread.fp_regs.fprs[ry].f); - FP_SUB_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Subtract float */ -static int emu_seb (struct pt_regs *regs, int rx, float *val) { - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); - FP_DECL_EX; - int mode; - - mode = current->thread.fp_regs.fpc & 3; - FP_UNPACK_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - FP_UNPACK_SP(SB, val); - FP_SUB_S(SR, SA, SB); - FP_PACK_SP(¤t->thread.fp_regs.fprs[rx].f, SR); - emu_set_CC_cs(regs, SR_c, SR_s); - return _fex; -} - -/* Test data class long double */ -static int emu_tcxb (struct pt_regs *regs, int rx, long val) { - FP_DECL_Q(QA); - mathemu_ldcv cvt; - int bit; - - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; - FP_UNPACK_RAW_QP(QA, &cvt.ld); - switch (QA_e) { - default: - bit = 8; /* normalized number */ - break; - case 0: - if (_FP_FRAC_ZEROP_4(QA)) - bit = 10; /* zero */ - else - bit = 6; /* denormalized number */ - break; - case _FP_EXPMAX_Q: - if (_FP_FRAC_ZEROP_4(QA)) - bit = 4; /* infinity */ - else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q) - bit = 2; /* quiet NAN */ - else - bit = 0; /* signaling NAN */ - break; - } - if (!QA_s) - bit++; - emu_set_CC(regs, ((__u32) val >> bit) & 1); - return 0; -} - -/* Test data class double */ -static int emu_tcdb (struct pt_regs *regs, int rx, long val) { - FP_DECL_D(DA); - int bit; - - FP_UNPACK_RAW_DP(DA, ¤t->thread.fp_regs.fprs[rx].d); - switch (DA_e) { - default: - bit = 8; /* normalized number */ - break; - case 0: - if (_FP_FRAC_ZEROP_2(DA)) - bit = 10; /* zero */ - else - bit = 6; /* denormalized number */ - break; - case _FP_EXPMAX_D: - if (_FP_FRAC_ZEROP_2(DA)) - bit = 4; /* infinity */ - else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D) - bit = 2; /* quiet NAN */ - else - bit = 0; /* signaling NAN */ - break; - } - if (!DA_s) - bit++; - emu_set_CC(regs, ((__u32) val >> bit) & 1); - return 0; -} - -/* Test data class float */ -static int emu_tceb (struct pt_regs *regs, int rx, long val) { - FP_DECL_S(SA); - int bit; - - FP_UNPACK_RAW_SP(SA, ¤t->thread.fp_regs.fprs[rx].f); - switch (SA_e) { - default: - bit = 8; /* normalized number */ - break; - case 0: - if (_FP_FRAC_ZEROP_1(SA)) - bit = 10; /* zero */ - else - bit = 6; /* denormalized number */ - break; - case _FP_EXPMAX_S: - if (_FP_FRAC_ZEROP_1(SA)) - bit = 4; /* infinity */ - else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S) - bit = 2; /* quiet NAN */ - else - bit = 0; /* signaling NAN */ - break; - } - if (!SA_s) - bit++; - emu_set_CC(regs, ((__u32) val >> bit) & 1); - return 0; -} - -static inline void emu_load_regd(int reg) { - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ - return; - asm volatile( /* load reg from fp_regs.fprs[reg] */ - " bras 1,0f\n" - " ld 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" (reg<<4),"a" (¤t->thread.fp_regs.fprs[reg].d) - : "1"); -} - -static inline void emu_load_rege(int reg) { - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ - return; - asm volatile( /* load reg from fp_regs.fprs[reg] */ - " bras 1,0f\n" - " le 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) - : "1"); -} - -static inline void emu_store_regd(int reg) { - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ - return; - asm volatile( /* store reg to fp_regs.fprs[reg] */ - " bras 1,0f\n" - " std 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].d) - : "1"); -} - - -static inline void emu_store_rege(int reg) { - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ - return; - asm volatile( /* store reg to fp_regs.fprs[reg] */ - " bras 1,0f\n" - " ste 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" (reg<<4), "a" (¤t->thread.fp_regs.fprs[reg].f) - : "1"); -} - -int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { - int _fex = 0; - static const __u8 format_table[256] = { - [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03, - [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d, - [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03, - [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06, - [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02, - [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03, - [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02, - [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05, - [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01, - [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04, - [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01, - [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06, - [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13, - [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c, - [0x99] = 0x0b,[0x9a] = 0x0a - }; - static const void *jump_table[256]= { - [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr, - [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr, - [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr, - [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr, - [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr, - [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr, - [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr, - [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr, - [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr, - [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr, - [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr, - [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr, - [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr, - [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr, - [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr, - [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr, - [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc, - [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr, - [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr, - [0x9a] = emu_cfxbr - }; - - switch (format_table[opcode[1]]) { - case 1: /* RRE format, long double operation */ - if (opcode[3] & 0x22) - return SIGILL; - emu_store_regd((opcode[3] >> 4) & 15); - emu_store_regd(((opcode[3] >> 4) & 15) + 2); - emu_store_regd(opcode[3] & 15); - emu_store_regd((opcode[3] & 15) + 2); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *,int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(((opcode[3] >> 4) & 15) + 2); - emu_load_regd(opcode[3] & 15); - emu_load_regd((opcode[3] & 15) + 2); - break; - case 2: /* RRE format, double operation */ - emu_store_regd((opcode[3] >> 4) & 15); - emu_store_regd(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(opcode[3] & 15); - break; - case 3: /* RRE format, float operation */ - emu_store_rege((opcode[3] >> 4) & 15); - emu_store_rege(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_rege((opcode[3] >> 4) & 15); - emu_load_rege(opcode[3] & 15); - break; - case 4: /* RRF format, long double operation */ - if (opcode[3] & 0x22) - return SIGILL; - emu_store_regd((opcode[3] >> 4) & 15); - emu_store_regd(((opcode[3] >> 4) & 15) + 2); - emu_store_regd(opcode[3] & 15); - emu_store_regd((opcode[3] & 15) + 2); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(((opcode[3] >> 4) & 15) + 2); - emu_load_regd(opcode[3] & 15); - emu_load_regd((opcode[3] & 15) + 2); - break; - case 5: /* RRF format, double operation */ - emu_store_regd((opcode[2] >> 4) & 15); - emu_store_regd((opcode[3] >> 4) & 15); - emu_store_regd(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); - emu_load_regd((opcode[2] >> 4) & 15); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(opcode[3] & 15); - break; - case 6: /* RRF format, float operation */ - emu_store_rege((opcode[2] >> 4) & 15); - emu_store_rege((opcode[3] >> 4) & 15); - emu_store_rege(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); - emu_load_rege((opcode[2] >> 4) & 15); - emu_load_rege((opcode[3] >> 4) & 15); - emu_load_rege(opcode[3] & 15); - break; - case 7: /* RRE format, cxfbr instruction */ - /* call the emulation function */ - if (opcode[3] & 0x20) - return SIGILL; - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(((opcode[3] >> 4) & 15) + 2); - break; - case 8: /* RRE format, cdfbr instruction */ - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - break; - case 9: /* RRE format, cefbr instruction */ - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_rege((opcode[3] >> 4) & 15); - break; - case 10: /* RRF format, cfxbr instruction */ - if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) - /* mask of { 2,3,8-15 } is invalid */ - return SIGILL; - if (opcode[3] & 2) - return SIGILL; - emu_store_regd(opcode[3] & 15); - emu_store_regd((opcode[3] & 15) + 2); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); - break; - case 11: /* RRF format, cfdbr instruction */ - if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) - /* mask of { 2,3,8-15 } is invalid */ - return SIGILL; - emu_store_regd(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); - break; - case 12: /* RRF format, cfebr instruction */ - if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) - /* mask of { 2,3,8-15 } is invalid */ - return SIGILL; - emu_store_rege(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); - break; - case 13: /* RRE format, ldxbr & mdxbr instruction */ - /* double store but long double load */ - if (opcode[3] & 0x20) - return SIGILL; - emu_store_regd((opcode[3] >> 4) & 15); - emu_store_regd(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(((opcode[3] >> 4) & 15) + 2); - break; - case 14: /* RRE format, ldxbr & mdxbr instruction */ - /* float store but long double load */ - if (opcode[3] & 0x20) - return SIGILL; - emu_store_rege((opcode[3] >> 4) & 15); - emu_store_rege(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - emu_load_regd(((opcode[3] >> 4) & 15) + 2); - break; - case 15: /* RRE format, ldebr & mdebr instruction */ - /* float store but double load */ - emu_store_rege((opcode[3] >> 4) & 15); - emu_store_rege(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - break; - case 16: /* RRE format, ldxbr instruction */ - /* long double store but double load */ - if (opcode[3] & 2) - return SIGILL; - emu_store_regd(opcode[3] & 15); - emu_store_regd((opcode[3] & 15) + 2); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_regd((opcode[3] >> 4) & 15); - break; - case 17: /* RRE format, ldxbr instruction */ - /* long double store but float load */ - if (opcode[3] & 2) - return SIGILL; - emu_store_regd(opcode[3] & 15); - emu_store_regd((opcode[3] & 15) + 2); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_rege((opcode[3] >> 4) & 15); - break; - case 18: /* RRE format, ledbr instruction */ - /* double store but float load */ - emu_store_regd(opcode[3] & 15); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - emu_load_rege((opcode[3] >> 4) & 15); - break; - case 19: /* RRE format, efpc & sfpc instruction */ - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, int)) - jump_table[opcode[1]]) - (regs, opcode[3] >> 4, opcode[3] & 15); - break; - default: /* invalid operation */ - return SIGILL; - } - if (_fex != 0) { - current->thread.fp_regs.fpc |= _fex; - if (current->thread.fp_regs.fpc & (_fex << 8)) - return SIGFPE; - } - return 0; -} - -static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp) -{ - addr_t addr; - - rx &= 15; - rb &= 15; - addr = disp & 0xfff; - addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */ - addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */ - return (void*) addr; -} - -int math_emu_ed(__u8 *opcode, struct pt_regs * regs) { - int _fex = 0; - - static const __u8 format_table[256] = { - [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05, - [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02, - [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04, - [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02, - [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01, - [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01, - [0x1e] = 0x03,[0x1f] = 0x03, - }; - static const void *jump_table[]= { - [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb, - [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb, - [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb, - [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb, - [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb, - [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb, - [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb, - [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb, - [0x1e] = emu_madb,[0x1f] = emu_msdb - }; - - switch (format_table[opcode[5]]) { - case 1: /* RXE format, double constant */ { - __u64 *dxb, temp; - __u32 opc; - - emu_store_regd((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_copy_from_user(&temp, dxb, 8); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, double *)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (double *) &temp); - emu_load_regd((opcode[1] >> 4) & 15); - break; - } - case 2: /* RXE format, float constant */ { - __u32 *dxb, temp; - __u32 opc; - - emu_store_rege((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_get_user(temp, dxb); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, float *)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (float *) &temp); - emu_load_rege((opcode[1] >> 4) & 15); - break; - } - case 3: /* RXF format, double constant */ { - __u64 *dxb, temp; - __u32 opc; - - emu_store_regd((opcode[1] >> 4) & 15); - emu_store_regd((opcode[4] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_copy_from_user(&temp, dxb, 8); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, double *, int)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4); - emu_load_regd((opcode[1] >> 4) & 15); - break; - } - case 4: /* RXF format, float constant */ { - __u32 *dxb, temp; - __u32 opc; - - emu_store_rege((opcode[1] >> 4) & 15); - emu_store_rege((opcode[4] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_get_user(temp, dxb); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, float *, int)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4); - emu_load_rege((opcode[4] >> 4) & 15); - break; - } - case 5: /* RXE format, double constant */ - /* store double and load long double */ - { - __u64 *dxb, temp; - __u32 opc; - if ((opcode[1] >> 4) & 0x20) - return SIGILL; - emu_store_regd((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_copy_from_user(&temp, dxb, 8); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, double *)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (double *) &temp); - emu_load_regd((opcode[1] >> 4) & 15); - emu_load_regd(((opcode[1] >> 4) & 15) + 2); - break; - } - case 6: /* RXE format, float constant */ - /* store float and load double */ - { - __u32 *dxb, temp; - __u32 opc; - emu_store_rege((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_get_user(temp, dxb); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, float *)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (float *) &temp); - emu_load_regd((opcode[1] >> 4) & 15); - break; - } - case 7: /* RXE format, float constant */ - /* store float and load long double */ - { - __u32 *dxb, temp; - __u32 opc; - if ((opcode[1] >> 4) & 0x20) - return SIGILL; - emu_store_rege((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_get_user(temp, dxb); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, float *)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, (float *) &temp); - emu_load_regd((opcode[1] >> 4) & 15); - emu_load_regd(((opcode[1] >> 4) & 15) + 2); - break; - } - case 8: /* RXE format, RX address used as int value */ { - __u64 dxb; - __u32 opc; - - emu_store_rege((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, long)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, dxb); - break; - } - case 9: /* RXE format, RX address used as int value */ { - __u64 dxb; - __u32 opc; - - emu_store_regd((opcode[1] >> 4) & 15); - opc = *((__u32 *) opcode); - dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, long)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, dxb); - break; - } - case 10: /* RXE format, RX address used as int value */ { - __u64 dxb; - __u32 opc; - - if ((opcode[1] >> 4) & 2) - return SIGILL; - emu_store_regd((opcode[1] >> 4) & 15); - emu_store_regd(((opcode[1] >> 4) & 15) + 2); - opc = *((__u32 *) opcode); - dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); - /* call the emulation function */ - _fex = ((int (*)(struct pt_regs *, int, long)) - jump_table[opcode[5]]) - (regs, opcode[1] >> 4, dxb); - break; - } - default: /* invalid operation */ - return SIGILL; - } - if (_fex != 0) { - current->thread.fp_regs.fpc |= _fex; - if (current->thread.fp_regs.fpc & (_fex << 8)) - return SIGFPE; - } - return 0; -} - -/* - * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6} - */ -int math_emu_ldr(__u8 *opcode) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - __u16 opc = *((__u16 *) opcode); - - if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ - /* we got an exception therefore ry can't be in {0,2,4,6} */ - asm volatile( /* load rx from fp_regs.fprs[ry] */ - " bras 1,0f\n" - " ld 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d) - : "1"); - } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ - asm volatile ( /* store ry to fp_regs.fprs[rx] */ - " bras 1,0f\n" - " std 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" ((opc & 0xf) << 4), - "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) - : "1"); - } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ - fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; - return 0; -} - -/* - * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6} - */ -int math_emu_ler(__u8 *opcode) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - __u16 opc = *((__u16 *) opcode); - - if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ - /* we got an exception therefore ry can't be in {0,2,4,6} */ - asm volatile( /* load rx from fp_regs.fprs[ry] */ - " bras 1,0f\n" - " le 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f) - : "1"); - } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ - asm volatile( /* store ry to fp_regs.fprs[rx] */ - " bras 1,0f\n" - " ste 0,0(%1)\n" - "0: ex %0,0(1)" - : /* no output */ - : "a" ((opc & 0xf) << 4), - "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) - : "1"); - } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ - fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; - return 0; -} - -/* - * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6} - */ -int math_emu_ld(__u8 *opcode, struct pt_regs * regs) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - __u32 opc = *((__u32 *) opcode); - __u64 *dxb; - - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8); - return 0; -} - -/* - * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6} - */ -int math_emu_le(__u8 *opcode, struct pt_regs * regs) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - __u32 opc = *((__u32 *) opcode); - __u32 *mem, *dxb; - - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); - mathemu_get_user(mem[0], dxb); - return 0; -} - -/* - * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6} - */ -int math_emu_std(__u8 *opcode, struct pt_regs * regs) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - __u32 opc = *((__u32 *) opcode); - __u64 *dxb; - - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8); - return 0; -} - -/* - * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6} - */ -int math_emu_ste(__u8 *opcode, struct pt_regs * regs) { - s390_fp_regs *fp_regs = ¤t->thread.fp_regs; - __u32 opc = *((__u32 *) opcode); - __u32 *mem, *dxb; - - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); - mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); - mathemu_put_user(mem[0], dxb); - return 0; -} - -/* - * Emulate LFPC D(B) - */ -int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) { - __u32 opc = *((__u32 *) opcode); - __u32 *dxb, temp; - - dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); - mathemu_get_user(temp, dxb); - if ((temp & ~FPC_VALID_MASK) != 0) - return SIGILL; - current->thread.fp_regs.fpc = temp; - return 0; -} - -/* - * Emulate STFPC D(B) - */ -int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) { - __u32 opc = *((__u32 *) opcode); - __u32 *dxb; - - dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); - mathemu_put_user(current->thread.fp_regs.fpc, dxb); - return 0; -} - -/* - * Emulate SRNM D(B) - */ -int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) { - __u32 opc = *((__u32 *) opcode); - __u32 temp; - - temp = calc_addr(regs, 0, opc>>12, opc); - current->thread.fp_regs.fpc &= ~3; - current->thread.fp_regs.fpc |= (temp & 3); - return 0; -} - -/* broken compiler ... */ -long long -__negdi2 (long long u) -{ - - union lll { - long long ll; - long s[2]; - }; - - union lll w,uu; - - uu.ll = u; - - w.s[1] = -uu.s[1]; - w.s[0] = -uu.s[0] - ((int) w.s[1] != 0); - - return w.ll; -} diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index d46cade..8556d6b 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -18,9 +18,7 @@ enum address_markers_idx { KERNEL_END_NR, VMEMMAP_NR, VMALLOC_NR, -#ifdef CONFIG_64BIT MODULES_NR, -#endif }; static struct addr_marker address_markers[] = { @@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = { [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, [VMEMMAP_NR] = {0, "vmemmap Area"}, [VMALLOC_NR] = {0, "vmalloc Area"}, -#ifdef CONFIG_64BIT [MODULES_NR] = {0, "Modules Area"}, -#endif { -1, NULL } }; @@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, } } -#ifdef CONFIG_64BIT -#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT -#else -#define _PMD_PROT_MASK 0 -#endif - static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t *pud, unsigned long addr) { @@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { if (pmd_large(*pmd)) { - prot = pmd_val(*pmd) & _PMD_PROT_MASK; + prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT; note_page(m, st, prot, 3); } else walk_pte_level(m, st, pmd, addr); @@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, } } -#ifdef CONFIG_64BIT -#define _PUD_PROT_MASK _REGION3_ENTRY_RO -#else -#define _PUD_PROT_MASK 0 -#endif - static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t *pgd, unsigned long addr) { @@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pud = pud_offset(pgd, addr); if (!pud_none(*pud)) if (pud_large(*pud)) { - prot = pud_val(*pud) & _PUD_PROT_MASK; + prot = pud_val(*pud) & _REGION3_ENTRY_RO; note_page(m, st, prot, 2); } else walk_pmd_level(m, st, pud, addr); @@ -230,13 +214,9 @@ static int pt_dump_init(void) * kernel ASCE. We need this to keep the page table walker functions * from accessing non-existent entries. */ -#ifdef CONFIG_32BIT - max_addr = 1UL << 31; -#else max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[MODULES_NR].start_address = MODULES_VADDR; -#endif address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; address_markers[VMALLOC_NR].start_address = VMALLOC_START; debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 519bba7..23c4969 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -51,7 +51,6 @@ struct qout64 { struct qrange range[6]; }; -#ifdef CONFIG_64BIT struct qrange_old { unsigned int start; /* last byte type */ unsigned int end; /* last byte reserved */ @@ -65,7 +64,6 @@ struct qout64_old { int segrcnt; struct qrange_old range[6]; }; -#endif struct qin64 { char qopcode; @@ -103,7 +101,6 @@ static int scode_set; static int dcss_set_subcodes(void) { -#ifdef CONFIG_64BIT char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); unsigned long rx, ry; int rc; @@ -135,7 +132,6 @@ dcss_set_subcodes(void) segext_scode = DCSS_SEGEXTX; return 0; } -#endif /* Diag x'64' new subcodes are not supported, set to old subcodes */ loadshr_scode = DCSS_LOADNOLY; loadnsr_scode = DCSS_LOADNSR; @@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter, rx = (unsigned long) parameter; ry = (unsigned long) *func; -#ifdef CONFIG_64BIT /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ if (*func > DCSS_SEGEXT) asm volatile( @@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter, " ipm %2\n" " srl %2,28\n" : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); -#else - asm volatile( - " diag %0,%1,0x64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); -#endif *ret1 = rx; *ret2 = ry; return rc; @@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg) goto out_free; } -#ifdef CONFIG_64BIT /* Only old format of output area of Diagnose x'64' is supported, copy data for the new format. */ if (segext_scode == DCSS_SEGEXT) { @@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg) } kfree(qout_old); } -#endif if (qout->segcnt > 6) { rc = -EOPNOTSUPP; goto out_free; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 3ff8653..76515bc 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -36,15 +36,9 @@ #include <asm/facility.h> #include "../kernel/entry.h" -#ifndef CONFIG_64BIT -#define __FAIL_ADDR_MASK 0x7ffff000 -#define __SUBCODE_MASK 0x0200 -#define __PF_RES_FIELD 0ULL -#else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL -#endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 @@ -54,7 +48,6 @@ static unsigned long store_indication __read_mostly; -#ifdef CONFIG_64BIT static int __init fault_init(void) { if (test_facility(75)) @@ -62,7 +55,6 @@ static int __init fault_init(void) return 0; } early_initcall(fault_init); -#endif static inline int notify_page_fault(struct pt_regs *regs) { @@ -133,7 +125,6 @@ static int bad_address(void *p) return probe_kernel_address((unsigned long *)p, dummy); } -#ifdef CONFIG_64BIT static void dump_pagetable(unsigned long asce, unsigned long address) { unsigned long *table = __va(asce & PAGE_MASK); @@ -187,33 +178,6 @@ bad: pr_cont("BAD\n"); } -#else /* CONFIG_64BIT */ - -static void dump_pagetable(unsigned long asce, unsigned long address) -{ - unsigned long *table = __va(asce & PAGE_MASK); - - pr_alert("AS:%08lx ", asce); - table = table + ((address >> 20) & 0x7ff); - if (bad_address(table)) - goto bad; - pr_cont("S:%08lx ", *table); - if (*table & _SEGMENT_ENTRY_INVALID) - goto out; - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); - table = table + ((address >> 12) & 0xff); - if (bad_address(table)) - goto bad; - pr_cont("P:%08lx ", *table); -out: - pr_cont("\n"); - return; -bad: - pr_cont("BAD\n"); -} - -#endif /* CONFIG_64BIT */ - static void dump_fault_info(struct pt_regs *regs) { unsigned long asce; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 5c586c7..1eb41bb 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, pmd_t *pmdp, pmd; pmdp = (pmd_t *) pudp; -#ifdef CONFIG_64BIT if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) pmdp = (pmd_t *) pud_deref(pud); pmdp += pmd_index(addr); -#endif do { pmd = *pmdp; barrier(); @@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, pud_t *pudp, pud; pudp = (pud_t *) pgdp; -#ifdef CONFIG_64BIT if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) pudp = (pud_t *) pgd_deref(pgd); pudp += pud_index(addr); -#endif do { pud = *pudp; barrier(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d35b151..80875c4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -105,7 +105,6 @@ void __init paging_init(void) unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; -#ifdef CONFIG_64BIT if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; @@ -113,10 +112,6 @@ void __init paging_init(void) asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } -#else - asce_bits = _ASCE_TABLE_LENGTH; - pgd_type = _SEGMENT_ENTRY_EMPTY; -#endif S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 2eb34bd..8a993a5 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -1,7 +1,7 @@ /* * Access kernel memory without faulting -- s390 specific implementation. * - * Copyright IBM Corp. 2009 + * Copyright IBM Corp. 2009, 2015 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * @@ -16,51 +16,55 @@ #include <asm/ctl_reg.h> #include <asm/io.h> -/* - * This function writes to kernel memory bypassing DAT and possible - * write protection. It copies one to four bytes from src to dst - * using the stura instruction. - * Returns the number of bytes copied or -EFAULT. - */ -static long probe_kernel_write_odd(void *dst, const void *src, size_t size) +static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) { - unsigned long count, aligned; - int offset, mask; - int rc = -EFAULT; + unsigned long aligned, offset, count; + char tmp[8]; - aligned = (unsigned long) dst & ~3UL; - offset = (unsigned long) dst & 3; - count = min_t(unsigned long, 4 - offset, size); - mask = (0xf << (4 - count)) & 0xf; - mask >>= offset; + aligned = (unsigned long) dst & ~7UL; + offset = (unsigned long) dst & 7UL; + size = min(8UL - offset, size); + count = size - 1; asm volatile( " bras 1,0f\n" - " icm 0,0,0(%3)\n" - "0: l 0,0(%1)\n" - " lra %1,0(%1)\n" - "1: ex %2,0(1)\n" - "2: stura 0,%1\n" - " la %0,0\n" - "3:\n" - EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) - : "+d" (rc), "+a" (aligned) - : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); - return rc ? rc : count; + " mvc 0(1,%4),0(%5)\n" + "0: mvc 0(8,%3),0(%0)\n" + " ex %1,0(1)\n" + " lg %1,0(%3)\n" + " lra %0,0(%0)\n" + " sturg %1,%0\n" + : "+&a" (aligned), "+&a" (count), "=m" (tmp) + : "a" (&tmp), "a" (&tmp[offset]), "a" (src) + : "cc", "memory", "1"); + return size; } -long probe_kernel_write(void *dst, const void *src, size_t size) +/* + * s390_kernel_write - write to kernel memory bypassing DAT + * @dst: destination address + * @src: source address + * @size: number of bytes to copy + * + * This function writes to kernel memory bypassing DAT and possible page table + * write protection. It writes to the destination using the sturg instruction. + * Therefore we have a read-modify-write sequence: the function reads eight + * bytes from destination at an eight byte boundary, modifies the bytes + * requested and writes the result back in a loop. + * + * Note: this means that this function may not be called concurrently on + * several cpus with overlapping words, since this may potentially + * cause data corruption. + */ +void notrace s390_kernel_write(void *dst, const void *src, size_t size) { - long copied = 0; + long copied; while (size) { - copied = probe_kernel_write_odd(dst, src, size); - if (copied < 0) - break; + copied = s390_kernel_write_odd(dst, src, size); dst += copied; src += copied; size -= copied; } - return copied < 0 ? -EFAULT : 0; } static int __memcpy_real(void *dest, void *src, size_t count) diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index 5535cfe..0f36043 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c @@ -36,10 +36,6 @@ void __init detect_memory_memblock(void) memsize = rzm * rnmax; if (!rzm) rzm = 1ULL << 17; - if (IS_ENABLED(CONFIG_32BIT)) { - rzm = min(ADDR2G, rzm); - memsize = min(ADDR2G, memsize); - } max_physmem_end = memsize; addr = 0; /* keep memblock lists close to the kernel */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index bb3367c..6e552af 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -32,7 +32,7 @@ #include <asm/pgalloc.h> unsigned long mmap_rnd_mask; -unsigned long mmap_align_mask; +static unsigned long mmap_align_mask; static unsigned long stack_maxrandom_size(void) { @@ -177,34 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } -#ifndef CONFIG_64BIT - -/* - * This function, called very early during the creation of a new - * process VM image, sets up which VM layout function to use: - */ -void arch_pick_mmap_layout(struct mm_struct *mm) -{ - unsigned long random_factor = 0UL; - - if (current->flags & PF_RANDOMIZE) - random_factor = arch_mmap_rnd(); - - /* - * Fall back to the standard layout if the personality - * bit is set, or if the expected stack growth is unlimited: - */ - if (mmap_is_legacy()) { - mm->mmap_base = mmap_base_legacy(random_factor); - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(random_factor); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } -} - -#else - int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) @@ -314,5 +286,3 @@ static int __init setup_mmap_rnd(void) return 0; } early_initcall(setup_mmap_rnd); - -#endif diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 426c9d4..749c984 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) { int i; - if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { + if (test_facility(13)) { __ptep_ipte_range(address, nr - 1, pte); return; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b2c1542..33f5894 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -27,14 +27,8 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -#ifndef CONFIG_64BIT -#define ALLOC_ORDER 1 -#define FRAG_MASK 0x0f -#else #define ALLOC_ORDER 2 #define FRAG_MASK 0x03 -#endif - unsigned long *crst_table_alloc(struct mm_struct *mm) { @@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) free_pages((unsigned long) table, ALLOC_ORDER); } -#ifdef CONFIG_64BIT static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; @@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) if (current->active_mm == mm) set_user_asce(mm); } -#endif #ifdef CONFIG_PGSTE diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b1593c2..ef7d6c8 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; -#ifdef CONFIG_64BIT pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); -#endif return pud; } @@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; -#ifdef CONFIG_64BIT pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); -#endif return pmd; } @@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pgd_populate(&init_mm, pg_dir, pu_dir); } pu_dir = pud_offset(pg_dir, address); -#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) +#ifndef CONFIG_DEBUG_PAGEALLOC if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { pud_val(*pu_dir) = __pa(address) | @@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pud_populate(&init_mm, pu_dir, pm_dir); } pm_dir = pmd_offset(pu_dir, address); -#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) +#ifndef CONFIG_DEBUG_PAGEALLOC if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { pmd_val(*pm_dir) = __pa(address) | @@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { -#ifdef CONFIG_64BIT /* Use 1MB frames for vmemmap if available. We always * use large frames even if they are only partially * used. @@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) address = (address + PMD_SIZE) & PMD_MASK; continue; } -#endif pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile index 524c4b61..1bd2301 100644 --- a/arch/s390/oprofile/Makefile +++ b/arch/s390/oprofile/Makefile @@ -7,4 +7,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ timer_int.o ) oprofile-y := $(DRIVER_OBJS) init.o backtrace.o -oprofile-$(CONFIG_64BIT) += hwsampler.o +oprofile-y += hwsampler.o diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 9ffe645..bc927a0 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -21,8 +21,6 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); -#ifdef CONFIG_64BIT - #include "hwsampler.h" #include "op_counter.h" @@ -495,14 +493,10 @@ static void oprofile_hwsampler_exit(void) hwsampler_shutdown(); } -#endif /* CONFIG_64BIT */ - int __init oprofile_arch_init(struct oprofile_operations *ops) { ops->backtrace = s390_backtrace; -#ifdef CONFIG_64BIT - /* * -ENODEV is not reported to the caller. The module itself * will use the timer mode sampling as fallback and this is @@ -511,14 +505,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) hwsampler_available = oprofile_hwsampler_init(ops) == 0; return 0; -#else - return -ENODEV; -#endif } void oprofile_arch_exit(void) { -#ifdef CONFIG_64BIT oprofile_hwsampler_exit(); -#endif } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index b2c76f6..9833620 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -913,8 +913,7 @@ static int __init pci_base_init(void) if (!s390_pci_probe) return 0; - if (!test_facility(2) || !test_facility(69) - || !test_facility(71) || !test_facility(72)) + if (!test_facility(69) || !test_facility(71) || !test_facility(72)) return 0; rc = zpci_debug_init(); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index be34ef4..26a51dc 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1237,7 +1237,6 @@ EXPORT_SYMBOL(dasd_smalloc_request); */ void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { -#ifdef CONFIG_64BIT struct ccw1 *ccw; /* Clear any idals used for the request. */ @@ -1245,7 +1244,6 @@ void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) do { clear_normalized_cda(ccw); } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); -#endif kfree(cqr->cpaddr); kfree(cqr->data); kfree(cqr); @@ -2967,8 +2965,6 @@ enum blk_eh_timer_return dasd_times_out(struct request *req) */ static int dasd_alloc_queue(struct dasd_block *block) { - int rc; - block->request_queue = blk_init_queue(do_dasd_request, &block->request_queue_lock); if (block->request_queue == NULL) @@ -2976,14 +2972,7 @@ static int dasd_alloc_queue(struct dasd_block *block) block->request_queue->queuedata = block; - elevator_exit(block->request_queue->elevator); - block->request_queue->elevator = NULL; - mutex_lock(&block->request_queue->sysfs_lock); - rc = elevator_init(block->request_queue, "deadline"); - if (rc) - blk_cleanup_queue(block->request_queue); - mutex_unlock(&block->request_queue->sysfs_lock); - return rc; + return 0; } /* diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h index a803cc7..e84a546 100644 --- a/drivers/s390/block/dasd_diag.h +++ b/drivers/s390/block/dasd_diag.h @@ -38,8 +38,6 @@ struct dasd_diag_characteristics { u8 rdev_features; } __attribute__ ((packed, aligned(4))); - -#ifdef CONFIG_64BIT #define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT typedef u64 blocknum_t; @@ -80,43 +78,3 @@ struct dasd_diag_rw_io { struct dasd_diag_bio *bio_list; u8 spare4[8]; } __attribute__ ((packed, aligned(8))); -#else /* CONFIG_64BIT */ -#define DASD_DIAG_FLAGA_DEFAULT 0x0 - -typedef u32 blocknum_t; -typedef s32 sblocknum_t; - -struct dasd_diag_bio { - u8 type; - u8 status; - u16 spare1; - blocknum_t block_number; - u32 alet; - void *buffer; -} __attribute__ ((packed, aligned(8))); - -struct dasd_diag_init_io { - u16 dev_nr; - u8 flaga; - u8 spare1[21]; - u32 block_size; - blocknum_t offset; - sblocknum_t start_block; - blocknum_t end_block; - u8 spare2[24]; -} __attribute__ ((packed, aligned(8))); - -struct dasd_diag_rw_io { - u16 dev_nr; - u8 flaga; - u8 spare1[21]; - u8 key; - u8 flags; - u8 spare2[2]; - u32 block_count; - u32 alet; - struct dasd_diag_bio *bio_list; - u32 interrupt_params; - u8 spare3[20]; -} __attribute__ ((packed, aligned(8))); -#endif /* CONFIG_64BIT */ diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index d47f5b9..49b48a8 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1633,7 +1633,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device) static u32 get_fcx_max_data(struct dasd_device *device) { -#if defined(CONFIG_64BIT) int tpm, mdc; int fcx_in_css, fcx_in_gneq, fcx_in_features; struct dasd_eckd_private *private; @@ -1657,9 +1656,6 @@ static u32 get_fcx_max_data(struct dasd_device *device) return 0; } else return mdc * FCX_MAX_DATA_FACTOR; -#else - return 0; -#endif } /* @@ -2615,10 +2611,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( /* Eckd can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv.bv_len >> (block->s2b_shift + 9); -#if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) cidaw += bv.bv_len >> (block->s2b_shift + 9); -#endif } /* Paranoia. */ if (count != last_rec - first_rec + 1) diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 2c8e68b..c9262e7 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -287,10 +287,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, /* Fba can only do full blocks. */ return ERR_PTR(-EINVAL); count += bv.bv_len >> (block->s2b_shift + 9); -#if defined(CONFIG_64BIT) if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) cidaw += bv.bv_len / blksize; -#endif } /* Paranoia. */ if (count != last_rec - first_rec + 1) diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index a5c6f7e..eaca3e0 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -115,7 +115,7 @@ config SCLP_ASYNC_ID config HMC_DRV def_tristate m prompt "Support for file transfers from HMC drive CD/DVD-ROM" - depends on S390 && 64BIT + depends on S390 select CRC16 help This option enables support for file transfers from a Hardware diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 561a041..eb7cb07 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -178,11 +178,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; sccb.evbuf.event_id = 4712; -#ifdef CONFIG_64BIT sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; -#else - sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; -#endif sccb.evbuf.event_status = 0; sccb.evbuf.blk_cnt = nr_blks; sccb.evbuf.asa = (unsigned long)dest; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index efcf484..a68fcfd 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -212,11 +212,7 @@ static struct zcore_header zcore_header = { .dump_level = 0, .page_size = PAGE_SIZE, .mem_start = 0, -#ifdef CONFIG_64BIT .build_arch = DUMP_ARCH_S390X, -#else - .build_arch = DUMP_ARCH_S390, -#endif }; /* @@ -516,23 +512,6 @@ static const struct file_operations zcore_hsa_fops = { .llseek = no_llseek, }; -#ifdef CONFIG_32BIT - -static void __init set_lc_mask(struct save_area *map) -{ - memset(&map->ext_save, 0xff, sizeof(map->ext_save)); - memset(&map->timer, 0xff, sizeof(map->timer)); - memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); - memset(&map->psw, 0xff, sizeof(map->psw)); - memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); - memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); - memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); - memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); - memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); -} - -#else /* CONFIG_32BIT */ - static void __init set_lc_mask(struct save_area *map) { memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); @@ -547,8 +526,6 @@ static void __init set_lc_mask(struct save_area *map) memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); } -#endif /* CONFIG_32BIT */ - /* * Initialize dump globals for a given architecture */ @@ -688,21 +665,12 @@ static int __init zcore_init(void) if (rc) goto fail; -#ifdef CONFIG_64BIT if (arch == ARCH_S390) { pr_alert("The 64-bit dump tool cannot be used for a " "32-bit system\n"); rc = -EINVAL; goto fail; } -#else /* CONFIG_64BIT */ - if (arch == ARCH_S390X) { - pr_alert("The 32-bit dump tool cannot be used for a " - "64-bit system\n"); - rc = -EINVAL; - goto fail; - } -#endif /* CONFIG_64BIT */ rc = get_mem_info(&mem_size, &mem_end); if (rc) diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3578105..07fc5d9 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -143,13 +143,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ orb->cmd.spnd = priv->options.suspend; orb->cmd.ssic = priv->options.suspend && priv->options.inter; orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; -#ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ orb->cmd.c64 = 1; orb->cmd.i2k = 0; -#endif orb->cmd.key = key >> 4; /* issue "Start Subchannel" */ orb->cmd.cpa = (__u32) __pa(cpa); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a563e4c..7e70f92 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -84,7 +84,6 @@ enum qdio_irq_states { #define QDIO_SIGA_WRITEQ 0x04 #define QDIO_SIGA_QEBSM_FLAG 0x80 -#ifdef CONFIG_64BIT static inline int do_sqbs(u64 token, unsigned char state, int queue, int *start, int *count) { @@ -122,12 +121,6 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue, return (_ccq >> 32) & 0xff; } -#else -static inline int do_sqbs(u64 token, unsigned char state, int queue, - int *start, int *count) { return 0; } -static inline int do_eqbs(u64 token, unsigned char *state, int queue, - int *start, int *count, int ack) { return 0; } -#endif /* CONFIG_64BIT */ struct qdio_irq; diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index f76bff6..48b3866 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -91,10 +91,7 @@ EXPORT_SYMBOL_GPL(qdio_reset_buffers); */ static inline int qebsm_possible(void) { -#ifdef CONFIG_64BIT return css_general_characteristics.qebsm; -#endif - return 0; } /* diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 3d7f19f..f0b9871 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -165,7 +165,7 @@ static inline int ap_instructions_available(void) */ static int ap_interrupts_available(void) { - return test_facility(2) && test_facility(65); + return test_facility(65); } /** @@ -174,12 +174,10 @@ static int ap_interrupts_available(void) * * Returns 1 if AP configuration information is available. */ -#ifdef CONFIG_64BIT static int ap_configuration_available(void) { - return test_facility(2) && test_facility(12); + return test_facility(12); } -#endif /** * ap_test_queue(): Test adjunct processor queue. @@ -239,7 +237,6 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) return reg1; } -#ifdef CONFIG_64BIT /** * ap_queue_interruption_control(): Enable interruption for a specific AP. * @qid: The AP queue number @@ -261,9 +258,7 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind) : "cc" ); return reg1_out; } -#endif -#ifdef CONFIG_64BIT static inline struct ap_queue_status __ap_query_functions(ap_qid_t qid, unsigned int *functions) { @@ -282,9 +277,7 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions) *functions = (unsigned int)(reg2 >> 32); return reg1; } -#endif -#ifdef CONFIG_64BIT static inline int __ap_query_configuration(struct ap_config_info *config) { register unsigned long reg0 asm ("0") = 0x04000000UL; @@ -302,7 +295,6 @@ static inline int __ap_query_configuration(struct ap_config_info *config) return reg1; } -#endif /** * ap_query_functions(): Query supported functions. @@ -317,7 +309,6 @@ static inline int __ap_query_configuration(struct ap_config_info *config) */ static int ap_query_functions(ap_qid_t qid, unsigned int *functions) { -#ifdef CONFIG_64BIT struct ap_queue_status status; int i; status = __ap_query_functions(qid, functions); @@ -348,9 +339,6 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions) } } return -EBUSY; -#else - return -EINVAL; -#endif } /** @@ -364,7 +352,6 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions) */ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) { -#ifdef CONFIG_64BIT struct ap_queue_status status; int t_depth, t_device_type, rc, i; @@ -404,9 +391,6 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) } } return rc; -#else - return -EINVAL; -#endif } /** @@ -1238,7 +1222,6 @@ static struct bus_attribute *const ap_bus_attrs[] = { */ static void ap_query_configuration(void) { -#ifdef CONFIG_64BIT if (ap_configuration_available()) { if (!ap_configuration) ap_configuration = @@ -1248,9 +1231,6 @@ static void ap_query_configuration(void) __ap_query_configuration(ap_configuration); } else ap_configuration = NULL; -#else - ap_configuration = NULL; -#endif } /** diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 2dbc77b..edf16bf 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -130,11 +130,7 @@ void ctcmpc_dumpit(char *buf, int len) __u32 ct, sw, rm, dup; char *ptr, *rptr; char tbuf[82], tdup[82]; - #ifdef CONFIG_64BIT char addr[22]; - #else - char addr[12]; - #endif char boff[12]; char bhex[82], duphex[82]; char basc[40]; @@ -147,11 +143,7 @@ void ctcmpc_dumpit(char *buf, int len) for (ct = 0; ct < len; ct++, ptr++, rptr++) { if (sw == 0) { - #ifdef CONFIG_64BIT sprintf(addr, "%16.16llx", (__u64)rptr); - #else - sprintf(addr, "%8.8X", (__u32)rptr); - #endif sprintf(boff, "%4.4X", (__u32)ct); bhex[0] = '\0'; @@ -162,11 +154,7 @@ void ctcmpc_dumpit(char *buf, int len) if (sw == 8) strcat(bhex, " "); - #if CONFIG_64BIT sprintf(tbuf, "%2.2llX", (__u64)*ptr); - #else - sprintf(tbuf, "%2.2X", (__u32)*ptr); - #endif tbuf[2] = '\0'; strcat(bhex, tbuf); diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index 429494b..a9a5210 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -125,9 +125,7 @@ static int wdt_start(struct watchdog_device *dev) ret = __diag288_vm(func, dev->timeout, ebc_cmd, len); WARN_ON(ret != 0); kfree(ebc_cmd); - } - - if (MACHINE_IS_LPAR) { + } else { ret = __diag288_lpar(WDT_FUNC_INIT, dev->timeout, LPARWDT_RESTART); } @@ -136,7 +134,6 @@ static int wdt_start(struct watchdog_device *dev) pr_err("The watchdog cannot be activated\n"); return ret; } - pr_info("The watchdog was activated\n"); return 0; } @@ -145,7 +142,6 @@ static int wdt_stop(struct watchdog_device *dev) int ret; ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0); - pr_info("The watchdog was deactivated\n"); return ret; } @@ -177,10 +173,9 @@ static int wdt_ping(struct watchdog_device *dev) ret = __diag288_vm(func, dev->timeout, ebc_cmd, len); WARN_ON(ret != 0); kfree(ebc_cmd); - } - - if (MACHINE_IS_LPAR) + } else { ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0); + } if (ret) pr_err("The watchdog timer cannot be started or reset\n"); @@ -202,7 +197,7 @@ static struct watchdog_ops wdt_ops = { }; static struct watchdog_info wdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "z Watchdog", }; @@ -273,21 +268,16 @@ static int __init diag288_init(void) watchdog_set_nowayout(&wdt_dev, nowayout_info); if (MACHINE_IS_VM) { - pr_info("The watchdog device driver detected a z/VM environment\n"); if (__diag288_vm(WDT_FUNC_INIT, 15, ebc_begin, sizeof(ebc_begin)) != 0) { pr_err("The watchdog cannot be initialized\n"); return -EINVAL; } - } else if (MACHINE_IS_LPAR) { - pr_info("The watchdog device driver detected an LPAR environment\n"); + } else { if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) { pr_err("The watchdog cannot be initialized\n"); return -EINVAL; } - } else { - pr_err("Linux runs in an environment that does not support the diag288 watchdog\n"); - return -ENODEV; } if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) { |