summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 08:19:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 08:19:39 -0700
commitd0b8883800c913f5cc0eb273c052bcac94ad44d8 (patch)
tree2c9610d6df3545beb916238b314466e6d0e74297 /arch/s390
parent9d2da7af909e1cf529f3cac582aaae05b107aa1e (diff)
parent1c21351b722c9101bacdb961f5b5711669c882a0 (diff)
downloadop-kernel-dev-d0b8883800c913f5cc0eb273c052bcac94ad44d8.zip
op-kernel-dev-d0b8883800c913f5cc0eb273c052bcac94ad44d8.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 update from Martin Schwidefsky: "This is the first batch of s390 patches for the 3.10 merge window. Included are some performance enhancements: storage key initialization, zero page cache synonyms, system call micro optimization and the speedup patches for dasdfmt. Sebastian managed to get rid of the special casing for the console device in the cio layer. And the usual bunch of bug fixes." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (59 commits) s390/pci: use pci_scan_root_bus s390/scm_blk: fix memleak in init function s390/scm_blk: allow more cluster size values s390/cio: fix irq statistics s390/memory hotplug: prevent offline of active memory increments s390: remove small stack config option s390: system call path micro optimization s390: lowcore stack pointer offsets s390/uapi: change struct statfs[64] member types to unsigned values s390/pci: return correct dma address for offset > PAGE_SIZE s390/ptrace: remove empty ifdefs s390/compat: remove ptrace compat definitions from uapi header file s390/compat: fix compile error for !COMPAT s390/compat: fix compat_sys_statfs() memory corruption s390/zcore: Fix HSA copy length for last block s390/mm,gmap: segment mapping race s390/mm,gmap: implement gmap_translate() s390/pci: remove disable_device implementation s390/pci: disable per default s390/pci: return error after failed pci ops ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig13
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c4
-rw-r--r--arch/s390/include/asm/bitops.h117
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h57
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_debug.h9
-rw-r--r--arch/s390/include/asm/pci_insn.h203
-rw-r--r--arch/s390/include/asm/pci_io.h16
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h6
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h20
-rw-r--r--arch/s390/include/uapi/asm/statfs.h63
-rw-r--r--arch/s390/kernel/Makefile17
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/dumpstack.c236
-rw-r--r--arch/s390/kernel/entry.S39
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/entry64.S43
-rw-r--r--arch/s390/kernel/machine_kexec.c30
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/kernel/smp.c15
-rw-r--r--arch/s390/kernel/suspend.c31
-rw-r--r--arch/s390/kernel/swsusp_asm64.S29
-rw-r--r--arch/s390/kernel/traps.c250
-rw-r--r--arch/s390/kvm/trace.h2
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/init.c10
-rw-r--r--arch/s390/mm/pageattr.c24
-rw-r--r--arch/s390/mm/pgtable.c235
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/pci/Makefile4
-rw-r--r--arch/s390/pci/pci.c153
-rw-r--r--arch/s390/pci/pci_clp.c13
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_dma.c9
-rw-r--r--arch/s390/pci/pci_insn.c202
-rw-r--r--arch/s390/pci/pci_msi.c10
47 files changed, 1047 insertions, 913 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index eb8fb62..bda6ba6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -375,19 +375,6 @@ config PACK_STACK
Say Y if you are unsure.
-config SMALL_STACK
- def_bool n
- prompt "Use 8kb for kernel stack instead of 16kb"
- depends on PACK_STACK && 64BIT && !LOCKDEP
- help
- If you say Y here and the compiler supports the -mkernel-backchain
- option the kernel will use a smaller kernel stack size. The reduced
- size is 8kb instead of 16kb. This allows to run more threads on a
- system and reduces the pressure on the memory management for higher
- order page allocations.
-
- Say N if you are unsure.
-
config CHECK_STACK
def_bool y
prompt "Detect kernel stack overflow"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7e3ce78..a7d68a4 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
ifeq ($(call cc-option-yn,-mkernel-backchain),y)
cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 9fd4a40..bb5dd49 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
- if (IS_ERR(dbfs_dir))
- return PTR_ERR(dbfs_dir);
- return 0;
+ return PTR_RET(dbfs_dir);
}
void hypfs_dbfs_exit(void)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 1542293..4d8604e 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
#ifndef CONFIG_64BIT
-#define __BITOPS_ALIGN 3
-#define __BITOPS_WORDSIZE 32
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
#else /* CONFIG_64BIT */
-#define __BITOPS_ALIGN 7
-#define __BITOPS_WORDSIZE 64
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
#endif /* CONFIG_64BIT */
-#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
-#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
+#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#ifdef CONFIG_SMP
/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND/test mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
- __BITOPS_BARRIER();
+ barrier();
return (old ^ new) != 0;
}
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr |= 1 << (nr & 7);
}
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr &= ~(1 << (nr & 7));
}
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr ^= 1 << (nr & 7);
}
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(volatile unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
static inline int
__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
return (((volatile char *) addr)
- [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
+ [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
}
#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffz_word returns __BITOPS_WORDSIZE
+ * __ffz_word returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, *p >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffs_word returns __BITOPS_WORDSIZE
+ * __ffs_word returns BITS_PER_LONG
* if no one bit is present in the word.
*/
set = __ffs_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_le(p, size);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e606161..f201af8 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
-extern int ccw_device_force_console(void);
+extern void ccw_device_wait_idle(struct ccw_device *);
+extern int ccw_device_force_console(struct ccw_device *);
int ccw_device_siosl(struct ccw_device *);
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ad2b924..ffb8989 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
-extern void wait_cons_dev(void);
-
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index f8c6df6..c1e7c64 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
+typedef struct {
+ u32 mask;
+ u32 addr;
+} __aligned(8) psw_compat_t;
+
+typedef struct {
+ psw_compat_t psw;
+ u32 gprs[NUM_GPRS];
+ u32 acrs[NUM_ACRS];
+ u32 orig_gpr2;
+} s390_compat_regs;
+
+typedef struct {
+ u32 gprs_high[NUM_GPRS];
+} s390_compat_regs_high;
+
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
};
struct compat_statfs {
- s32 f_type;
- s32 f_bsize;
- s32 f_blocks;
- s32 f_bfree;
- s32 f_bavail;
- s32 f_files;
- s32 f_ffree;
+ u32 f_type;
+ u32 f_bsize;
+ u32 f_blocks;
+ u32 f_bfree;
+ u32 f_bavail;
+ u32 f_files;
+ u32 f_ffree;
+ compat_fsid_t f_fsid;
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
+};
+
+struct compat_statfs64 {
+ u32 f_type;
+ u32 f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
compat_fsid_t f_fsid;
- s32 f_namelen;
- s32 f_frsize;
- s32 f_flags;
- s32 f_spare[5];
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
return is_32bit_task();
}
-#endif
-
static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
return (void __user *) (stack - len);
}
+#endif
+
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1bfdf24..78f4f87 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
*/
#include <asm/ptrace.h>
+#include <asm/compat.h>
+#include <asm/syscall.h>
#include <asm/user.h>
typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
-#ifdef CONFIG_64BIT
+#ifndef CONFIG_COMPAT
+#define SET_PERSONALITY(ex) \
+do { \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+} while (0)
+#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- else \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table_emu; \
+ } else { \
clear_thread_flag(TIF_31BIT); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+ } \
} while (0)
-#endif /* CONFIG_64BIT */
+#endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 05333b7..6c18012 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
+int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
void zpci_free_device(struct zpci_dev *);
int zpci_scan_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6bbec42..1ca5d10 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
#ifdef CONFIG_PCI_DEBUG
-#define zpci_dbg(fmt, args...) \
- do { \
- if (pci_debug_msg_id->level >= 2) \
- debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
- } while (0)
+#define zpci_dbg(imp, fmt, args...) \
+ debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(fmt, args...) do { } while (0)
+#define zpci_dbg(imp, fmt, args...) do { } while (0)
#endif
#define zpci_err(text...) \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 1486a98..e6a2bdd 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
-#include <linux/delay.h>
-
-#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
-
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
u64 reserved7;
} __packed;
-/* Modify PCI Function Controls */
-static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
- : : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
-{
- u8 cc, status;
-
- do {
- cc = __mpcifc(req, fib, &status);
- if (cc == 2)
- msleep(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
- __func__, cc, status);
- return (cc) ? -EIO : 0;
-}
-
-/* Refresh PCI Translations */
-static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
-{
- register u64 __addr asm("2") = addr;
- register u64 __range asm("3") = range;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d30000,%[fn],%[addr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn)
- : [addr] "d" (__addr), "d" (__range)
- : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
-{
- u8 cc, status;
-
- do {
- cc = __rpcit(fn, addr, range, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
- __func__, cc, status, addr, range);
- return (cc) ? -EIO : 0;
-}
-
-/* Store PCI function controls */
-static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
-{
- u64 fn = (u64) handle << 32 | space << 16;
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
- : : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-/* Set Interruption Controls */
-static inline void sic_instr(u16 ctl, char *unused, u8 isc)
-{
- asm volatile (
- " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
- : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
-}
-
-/* PCI Load */
-static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u64 __data;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d20000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
- : "d" (__offset)
- : "cc");
- *status = __req >> 24 & 0xff;
- *data = __data;
- return cc;
-}
-
-static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcilg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc) {
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- /* TODO: on IO errors set data to 0xff...
- * here or in users of pcilg (le conversion)?
- */
- }
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store */
-static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d00000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (__req)
- : "d" (__offset), [data] "d" (data)
- : "cc");
- *status = __req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistg_instr(u64 data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store Block */
-static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req)
- : [offset] "d" (offset), [data] "Q" (*data)
- : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistb(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
+int s390pci_load(u64 *data, u64 req, u64 offset);
+int s390pci_store(u64 data, u64 req, u64 offset);
+int s390pci_store_block(const u64 *data, u64 req, u64 offset);
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 5fd81f3..83a9caa 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \
int rc; \
\
- rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
+ rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
+ s390pci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */
break;
}
- return pcistg_instr(val, req, offset);
+ return s390pci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
- u8 cc;
+ int cc;
+
+ cc = s390pci_load(&data, req, offset);
+ if (cc)
+ goto out;
- cc = pcilg_instr(&data, req, offset);
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
*((u64 *) dst) = (u64) data;
break;
}
+out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
- return pcistb_instr(data, req, offset);
+ return s390pci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3cb47cf..4a64c0e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -764,6 +764,8 @@ void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long length);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long __gmap_translate(unsigned long address, struct gmap *);
+unsigned long gmap_translate(unsigned long address, struct gmap *);
unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 94e749c..6b49987 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]);
+extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
+ unsigned int len);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 3ee5da3..559512a 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-#ifndef __s390x__
-#else /* __s390x__ */
-#endif /* __s390x__ */
+
extern long psw_kernel_bits;
extern long psw_user_bits;
@@ -77,8 +75,6 @@ struct per_struct_kernel {
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
-#ifdef __s390x__
-#endif /* __s390x__ */
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fe7b997..cd29d2f 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
+extern const unsigned int sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 9e2cfe0..eb5f64d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else /* CONFIG_64BIT */
-#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#else
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#endif
#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
+ unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a5ca214..3aa9f1e 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
-typedef struct
-{
- __u32 mask;
- __u32 addr;
-} __attribute__ ((aligned(8))) psw_compat_t;
-
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
unsigned long orig_gpr2;
} s390_regs;
-typedef struct
-{
- psw_compat_t psw;
- __u32 gprs[NUM_GPRS];
- __u32 acrs[NUM_ACRS];
- __u32 orig_gpr2;
-} s390_compat_regs;
-
-typedef struct
-{
- __u32 gprs_high[NUM_GPRS];
-} s390_compat_regs_high;
-
-
/*
* Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 5acca0a..a61d538 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
#ifndef _S390_STATFS_H
#define _S390_STATFS_H
-#ifndef __s390x__
-#include <asm-generic/statfs.h>
-#else
/*
* We can't use <asm-generic/statfs.h> because in 64-bit mode
* we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
#endif
struct statfs {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
struct statfs64 {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
-struct compat_statfs64 {
- __u32 f_type;
- __u32 f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __u32 f_namelen;
- __u32 f_frsize;
- __u32 f_flags;
- __u32 f_spare[4];
-};
-
-#endif /* __s390x__ */
#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2ac311e..1386fca 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -14,16 +14,25 @@ endif
CFLAGS_smp.o := -Wno-nonnull
#
+# Disable tailcall optimizations for stack / callchain walking functions
+# since this might generate broken code when accessing register 15 and
+# passing its content to other functions.
+#
+CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
+CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
+
+#
# Pass UTS_MACHINE for user_regset definition
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
- processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
- debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
- sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
+obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y += dumpstack.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fface87..7a82f9f 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
+ DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6de049f..c439ac9 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
+ regs->gprs[6] = task_thread_info(current)->last_break;
}
/* Place signal number on stack to allow backtrace from handler. */
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
+ regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3ad5e95..7f4a4a8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
* insn_to_mnemonic - decode an s390 instruction
* @instruction: instruction to decode
* @buf: buffer to fill with mnemonic
+ * @len: length of buffer
*
* Decode the instruction at @instruction and store the corresponding
- * mnemonic into @buf.
+ * mnemonic into @buf of length @len.
* @buf is left unchanged if the instruction could not be decoded.
* Returns:
* %0 on success, %-ENOENT if the instruction was not found.
*/
-int insn_to_mnemonic(unsigned char *instruction, char buf[8])
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
struct insn *insn;
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
if (!insn)
return -ENOENT;
if (insn->name[0] == '\0')
- snprintf(buf, 8, "%s",
+ snprintf(buf, len, "%s",
long_insn_name[(int) insn->name[1]]);
else
- snprintf(buf, 8, "%.5s", insn->name);
+ snprintf(buf, len, "%.5s", insn->name);
return 0;
}
EXPORT_SYMBOL_GPL(insn_to_mnemonic);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
new file mode 100644
index 0000000..03dce39
--- /dev/null
+++ b/arch/s390/kernel/dumpstack.c
@@ -0,0 +1,236 @@
+/*
+ * Stack dumping functions
+ *
+ * Copyright IBM Corp. 1999, 2013
+ */
+
+#include <linux/kallsyms.h>
+#include <linux/hardirq.h>
+#include <linux/kprobes.h>
+#include <linux/utsname.h>
+#include <linux/export.h>
+#include <linux/kdebug.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#ifndef CONFIG_64BIT
+#define LONG "%08lx "
+#define FOURLONG "%08lx %08lx %08lx %08lx\n"
+static int kstack_depth_to_print = 12;
+#else /* CONFIG_64BIT */
+#define LONG "%016lx "
+#define FOURLONG "%016lx %016lx %016lx %016lx\n"
+static int kstack_depth_to_print = 20;
+#endif /* CONFIG_64BIT */
+
+/*
+ * For show_trace we have tree different stack to consider:
+ * - the panic stack which is used if the kernel stack has overflown
+ * - the asynchronous interrupt stack (cpu related)
+ * - the synchronous kernel stack (process related)
+ * The stack trace can start at any of the three stack and can potentially
+ * touch all of them. The order is: panic stack, async stack, sync stack.
+ */
+static unsigned long
+__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+{
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+
+ while (1) {
+ sp = sp & PSW_ADDR_INSN;
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+ /* Follow the backchain. */
+ while (1) {
+ low = sp;
+ sp = sf->back_chain & PSW_ADDR_INSN;
+ if (!sp)
+ break;
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+ }
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+ if (sp <= low || sp > high - sizeof(*regs))
+ return sp;
+ regs = (struct pt_regs *) sp;
+ printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+ low = sp;
+ sp = regs->gprs[15];
+ }
+}
+
+static void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ register unsigned long __r15 asm ("15");
+ unsigned long sp;
+
+ sp = (unsigned long) stack;
+ if (!sp)
+ sp = task ? task->thread.ksp : __r15;
+ printk("Call Trace:\n");
+#ifdef CONFIG_CHECK_STACK
+ sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
+ S390_lowcore.panic_stack);
+#endif
+ sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
+ S390_lowcore.async_stack);
+ if (task)
+ __show_trace(sp, (unsigned long) task_stack_page(task),
+ (unsigned long) task_stack_page(task) + THREAD_SIZE);
+ else
+ __show_trace(sp, S390_lowcore.thread_info,
+ S390_lowcore.thread_info + THREAD_SIZE);
+ if (!task)
+ task = current;
+ debug_show_held_locks(task);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ register unsigned long *__r15 asm ("15");
+ unsigned long *stack;
+ int i;
+
+ if (!sp)
+ stack = task ? (unsigned long *) task->thread.ksp : __r15;
+ else
+ stack = sp;
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if ((i * sizeof(long) % 32) == 0)
+ printk("%s ", i == 0 ? "" : "\n");
+ printk(LONG, *stack++);
+ }
+ printk("\n");
+ show_trace(task, sp);
+}
+
+static void show_last_breaking_event(struct pt_regs *regs)
+{
+#ifdef CONFIG_64BIT
+ printk("Last Breaking-Event-Address:\n");
+ printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+#endif
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+{
+ return (regs->psw.mask & bits) / ((~bits + 1) & bits);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ char *mode;
+
+ mode = user_mode(regs) ? "User" : "Krnl";
+ printk("%s PSW : %p %p",
+ mode, (void *) regs->psw.mask,
+ (void *) regs->psw.addr);
+ print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
+ printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
+ "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
+ mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
+ mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
+ mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
+ mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
+ mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
+#ifdef CONFIG_64BIT
+ printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
+#endif
+ printk("\n%s GPRS: " FOURLONG, mode,
+ regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+ printk(" " FOURLONG,
+ regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+ printk(" " FOURLONG,
+ regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+ printk(" " FOURLONG,
+ regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+ show_code(regs);
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_registers(regs);
+ /* Show stack backtrace if pt_regs is from kernel mode */
+ if (!user_mode(regs))
+ show_trace(NULL, (unsigned long *) regs->gprs[15]);
+ show_last_breaking_event(regs);
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+
+ oops_enter();
+ lgr_info_log();
+ debug_stop_all();
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+#endif
+ printk("\n");
+ notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
+ print_modules();
+ show_regs(regs);
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception: panic_on_oops");
+ oops_exit();
+ do_exit(SIGSEGV);
+}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 94feff7..4d5e6f8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
#define BASED(name) name-system_call(%r13)
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: l %r15,\stack # load target stack
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro ADD64 high,low,timer
@@ -150,7 +151,7 @@ ENTRY(__switch_to)
l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
lr %r15,%r5
- ahi %r15,STACK_SIZE # end of kernel stack of next
+ ahi %r15,STACK_INIT # end of kernel stack of next
st %r3,__LC_CURRENT # store task struct of next
st %r5,__LC_THREAD_INFO # store thread info of next
st %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -178,7 +179,6 @@ sysc_stm:
l %r13,__LC_SVC_NEW_PSW+4
sysc_per:
l %r15,__LC_KERNEL_STACK
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -188,6 +188,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+3(%r12),_TIF_SYSCALL
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0
jnz sysc_nr_ok
@@ -198,7 +199,6 @@ sysc_do_svc:
lr %r8,%r1
sla %r8,2
sysc_nr_ok:
- l %r10,BASED(.Lsys_call_table) # 31 bit system call table
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
l %r15,__LC_KERNEL_STACK
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
stm %r8,%r9,__PT_PSW(%r11)
@@ -485,7 +485,6 @@ io_work:
#
io_work_user:
l %r1,__LC_KERNEL_STACK
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -646,7 +645,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -674,6 +672,7 @@ mcck_panic:
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
+ j mcck_skip
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
@@ -714,12 +713,10 @@ ENTRY(restart_int_handler)
*/
stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack
- ahi %r15,-__PT_SIZE # create pt_regs
- stm %r0,%r7,__PT_R0(%r15)
- stm %r8,%r9,__PT_PSW(%r15)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stm %r0,%r7,__PT_R0(%r11)
+ stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(32,%r11),0(%r14)
- lr %r15,%r11
- ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,BASED(1f)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
lr %r2,%r11 # pass pointer to pt_regs
@@ -799,15 +796,14 @@ cleanup_system_call:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
# set up saved register 11
l %r15,__LC_KERNEL_STACK
- ahi %r15,-__PT_SIZE
- st %r15,12(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ st %r9,12(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
+ stm %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register 15
- ahi %r15,-STACK_FRAME_OVERHEAD
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@@ -910,7 +906,6 @@ cleanup_idle_wait:
.Ltrace_enter: .long do_syscall_trace_enter
.Ltrace_exit: .long do_syscall_trace_exit
.Lschedule_tail: .long schedule_tail
-.Lsys_call_table: .long sys_call_table
.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
.Lhardirqs_on: .long trace_hardirqs_on_caller
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3a736a..aa0ab02e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -7,6 +7,7 @@
#include <asm/cputime.h>
extern void *restart_stack;
+extern unsigned long suspend_zero_pages;
void system_call(void);
void pgm_check_handler(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2e6d60c..4c17eec 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lg %r15,\stack # load target stack
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME scratch,enter_timer
@@ -177,7 +178,7 @@ ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
lgr %r15,%r5
- aghi %r15,STACK_SIZE # end of kernel stack of next
+ aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -203,10 +204,8 @@ sysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
sysc_per:
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@@ -217,6 +216,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+7(%r12),_TIF_SYSCALL
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok
@@ -227,13 +227,6 @@ sysc_do_svc:
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
sysc_nr_ok:
- larl %r10,sys_call_table # 64 bit system call table
-#ifdef CONFIG_COMPAT
- tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
- jno sysc_noemu
- larl %r10,sys_call_table_emu # 31 bit system call table
-sysc_noemu:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r14
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -526,7 +519,6 @@ io_work:
#
io_work_user:
lg %r1,__LC_KERNEL_STACK
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -688,7 +680,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -755,14 +746,12 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- lg %r11,__LC_PANIC_STACK # change to panic stack
- aghi %r11,-__PT_SIZE # create pt_regs
+ lg %r15,__LC_PANIC_STACK # change to panic stack
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- lgr %r15,%r11
- aghi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
@@ -846,15 +835,14 @@ cleanup_system_call:
mvc __TI_last_break(8,%r12),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-__PT_SIZE
- stg %r15,24(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register r15
- aghi %r15,-STACK_FRAME_OVERHEAD
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,sysc_do_svc
@@ -1011,6 +999,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
+ .globl sys_call_table_emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b3de277..ac21781 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
+#include <linux/suspend.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
@@ -67,6 +68,35 @@ void setup_regs(void)
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
+/*
+ * PM notifier callback for kdump
+ */
+static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ if (crashk_res.start)
+ crash_map_reserved_pages();
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ if (crashk_res.start)
+ crash_unmap_reserved_pages();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init machine_kdump_pm_init(void)
+{
+ pm_notifier(machine_kdump_pm_cb, 0);
+ return 0;
+}
+arch_initcall(machine_kdump_pm_init);
#endif
/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2926885..0f419c5 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
- lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
+ lc->kernel_stack = ((unsigned long) &init_thread_union)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
+ __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
+ __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 549c9d1..8bde89e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
- lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
+ lc->async_stack = pcpu->async_stack + ASYNC_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->cpu_nr = cpu;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
struct _lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
- lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
+ lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->user_timer = ti->user_timer;
@@ -810,8 +813,10 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
- pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
- pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
+ pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index aa1494d..c479d2f 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -41,6 +41,7 @@ struct page_key_data {
static struct page_key_data *page_key_data;
static struct page_key_data *page_key_rp, *page_key_wp;
static unsigned long page_key_rx, page_key_wx;
+unsigned long suspend_zero_pages;
/*
* For each page in the hibernation image one additional byte is
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
}
+/*
+ * PM notifier callback for suspend
+ */
+static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
+ if (!suspend_zero_pages)
+ return NOTIFY_BAD;
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ free_pages(suspend_zero_pages, LC_ORDER);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init suspend_pm_init(void)
+{
+ pm_notifier(suspend_pm_cb, 0);
+ return 0;
+}
+arch_initcall(suspend_pm_init);
+
void save_processor_state(void)
{
/* swsusp_arch_suspend() actually saves all cpu register contents.
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0..c487be4 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Save prefix register contents for lowcore */
- llgf %r4,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore copy */
+ llgf %r10,__SF_EMPTY(%r15)
/* Get pointer to save area */
lghi %r1,0x1000
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
+ /* Save absolute zero pages */
+ larl %r2,suspend_zero_pages
+ lg %r2,0(%r2)
+ lghi %r4,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
+ /* Copy lowcore to absolute zero lowcore */
lghi %r2,0
+ lgr %r4,%r10
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
@@ -248,8 +259,20 @@ restore_registers:
/* Load old stack */
lg %r15,0x2f8(%r13)
+ /* Save prefix register */
+ mvc __SF_EMPTY(4,%r15),0x318(%r13)
+
+ /* Restore absolute zero pages */
+ lghi %r2,0
+ larl %r4,suspend_zero_pages
+ lg %r4,0(%r4)
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
/* Restore prefix register */
- spx 0x318(%r13)
+ spx __SF_EMPTY(%r15)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 13dd63f..c576232 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -12,49 +12,16 @@
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
#include <linux/ptrace.h>
-#include <linux/timer.h>
+#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/seq_file.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-#include <linux/kallsyms.h>
-#include <linux/reboot.h>
-#include <linux/kprobes.h>
-#include <linux/bug.h>
-#include <linux/utsname.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <asm/mathemu.h>
-#include <asm/cpcmd.h>
-#include <asm/lowcore.h>
-#include <asm/debug.h>
-#include <asm/ipl.h>
#include "entry.h"
int show_unhandled_signals = 1;
-#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
-
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
#endif
}
-/*
- * For show_trace we have tree different stack to consider:
- * - the panic stack which is used if the kernel stack has overflown
- * - the asynchronous interrupt stack (cpu related)
- * - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stack and can potentially
- * touch all of them. The order is: panic stack, async stack, sync stack.
- */
-static unsigned long
-__show_trace(unsigned long sp, unsigned long low, unsigned long high)
-{
- struct stack_frame *sf;
- struct pt_regs *regs;
-
- while (1) {
- sp = sp & PSW_ADDR_INSN;
- if (sp < low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
- /* Follow the backchain. */
- while (1) {
- low = sp;
- sp = sf->back_chain & PSW_ADDR_INSN;
- if (!sp)
- break;
- if (sp <= low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
- }
- /* Zero backchain detected, check for interrupt frame. */
- sp = (unsigned long) (sf + 1);
- if (sp <= low || sp > high - sizeof(*regs))
- return sp;
- regs = (struct pt_regs *) sp;
- printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
- low = sp;
- sp = regs->gprs[15];
- }
-}
-
-static void show_trace(struct task_struct *task, unsigned long *stack)
-{
- register unsigned long __r15 asm ("15");
- unsigned long sp;
-
- sp = (unsigned long) stack;
- if (!sp)
- sp = task ? task->thread.ksp : __r15;
- printk("Call Trace:\n");
-#ifdef CONFIG_CHECK_STACK
- sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
- S390_lowcore.panic_stack);
-#endif
- sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
- if (task)
- __show_trace(sp, (unsigned long) task_stack_page(task),
- (unsigned long) task_stack_page(task) + THREAD_SIZE);
- else
- __show_trace(sp, S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
- if (!task)
- task = current;
- debug_show_held_locks(task);
-}
-
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
- register unsigned long * __r15 asm ("15");
- unsigned long *stack;
- int i;
-
- if (!sp)
- stack = task ? (unsigned long *) task->thread.ksp : __r15;
- else
- stack = sp;
-
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
- break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
- }
- printk("\n");
- show_trace(task, sp);
-}
-
-static void show_last_breaking_event(struct pt_regs *regs)
-{
-#ifdef CONFIG_64BIT
- printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
-#endif
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
-
-static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
-{
- return (regs->psw.mask & bits) / ((~bits + 1) & bits);
-}
-
-void show_registers(struct pt_regs *regs)
-{
- char *mode;
-
- mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %p %p",
- mode, (void *) regs->psw.mask,
- (void *) regs->psw.addr);
- print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
- printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
- "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
- mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
- mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
- mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
- mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
- mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
- printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
- regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
- regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
- regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
- regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
-
- show_code(regs);
-}
-
-void show_regs(struct pt_regs *regs)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_registers(regs);
- /* Show stack backtrace if pt_regs is from kernel mode */
- if (!user_mode(regs))
- show_trace(NULL, (unsigned long *) regs->gprs[15]);
- show_last_breaking_event(regs);
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(struct pt_regs *regs, const char *str)
-{
- static int die_counter;
-
- oops_enter();
- lgr_info_log();
- debug_stop_all();
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
- printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
- printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC");
-#endif
- printk("\n");
- notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
- print_modules();
- show_regs(regs);
- bust_spinlocks(0);
- add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irq(&die_lock);
- if (in_interrupt())
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception: panic_on_oops");
- oops_exit();
- do_exit(SIGSEGV);
-}
-
static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 2b29e62..53252d2d 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
__entry->instruction,
insn_to_mnemonic((unsigned char *)
&__entry->instruction,
- __entry->insn) ?
+ __entry->insn, sizeof(__entry->insn)) ?
"unknown" : __entry->insn)
);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 479e9428..9d84a1f 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (rc)
- goto out_kthread;
- return 0;
+ if (!IS_ERR(cmm_thread_ptr))
+ return 0;
-out_kthread:
+ rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2fb9e63..047c3e4 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
int fault;
trans_exc_code = regs->int_parm_long;
- /* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
+ /*
+ * Protection exceptions are suppressing, decrement psw address.
+ * The exception to this rule are aborted transactions, for these
+ * the PSW already points to the correct location.
+ */
+ if (!(regs->int_code & 0x200))
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 49ce6bb2..9f9c315 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -63,10 +63,18 @@ static unsigned long __init setup_zero_pages(void)
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
- default:
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
order = 2;
break;
+ case 0x2827: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d21040e..80adfbf 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
#include <asm/pgtable.h>
#include <asm/page.h>
+static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
+{
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ : [addr] "+a" (addr) : [skey] "d" (skey));
+ return addr;
+}
+
void storage_key_init_range(unsigned long start, unsigned long end)
{
- unsigned long boundary, function, size;
+ unsigned long boundary, size;
while (start < end) {
- if (MACHINE_HAS_EDAT2) {
- /* set storage keys for a 2GB frame */
- function = 0x22000 | PAGE_DEFAULT_KEY;
- size = 1UL << 31;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
- function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
- start = pfmf(function, start);
+ start = sske_frame(start, PAGE_DEFAULT_KEY);
} while (start < boundary);
continue;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae44d2a..bd954e9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ out_unmap:
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
-/*
- * this function is assumed to be called with mmap_sem held
- */
-unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
{
- unsigned long *table, vmaddr, segment;
- struct mm_struct *mm;
- struct gmap_pgtable *mp;
- struct gmap_rmap *rmap;
- struct vm_area_struct *vma;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ unsigned long *table;
- current->thread.gmap_addr = address;
- mm = gmap->mm;
- /* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
+ return table;
+}
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, vmaddr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
- segment = *table;
- if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
- vma = find_vma(mm, vmaddr);
- if (!vma || vma->vm_start > vmaddr)
- return -EFAULT;
-
- /* Walk the parent mm page table */
- pgd = pgd_offset(mm, vmaddr);
- pud = pud_alloc(mm, pgd, vmaddr);
- if (!pud)
- return -ENOMEM;
- pmd = pmd_alloc(mm, pud, vmaddr);
- if (!pmd)
- return -ENOMEM;
- if (!pmd_present(*pmd) &&
- __pte_alloc(mm, vma, pmd, vmaddr))
- return -ENOMEM;
- /* pmd now points to a valid segment table entry. */
- rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
- if (!rmap)
- return -ENOMEM;
- /* Link gmap segment table entry location to page table. */
- page = pmd_page(*pmd);
- mp = (struct gmap_pgtable *) page->index;
- rmap->entry = table;
- spin_lock(&mm->page_table_lock);
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_translate(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+static int gmap_connect_pgtable(unsigned long segment,
+ unsigned long *segment_ptr,
+ struct gmap *gmap)
+{
+ unsigned long vmaddr;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct mm_struct *mm;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mm = gmap->mm;
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->entry = segment_ptr;
+ spin_lock(&mm->page_table_lock);
+ if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper);
- spin_unlock(&mm->page_table_lock);
/* Set gmap segment table entry to page table. */
- *table = pmd_val(*pmd) & PAGE_MASK;
- return vmaddr | (address & ~PMD_MASK);
+ *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
+ rmap = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ kfree(rmap);
+ return 0;
+}
+
+static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry =
+ _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int rc;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return -EFAULT;
+ /* Convert the gmap address to an mm address. */
+ while (1) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
+ /* Page table is present */
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ }
+ if (!(segment & _SEGMENT_ENTRY_RO))
+ /* Nothing mapped in the gmap address space. */
+ break;
+ rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
+ if (rc)
+ return rc;
}
return -EFAULT;
}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_discard);
-void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
-{
- struct gmap_rmap *rmap, *next;
- struct gmap_pgtable *mp;
- struct page *page;
- int flush;
-
- flush = 0;
- spin_lock(&mm->page_table_lock);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
- *rmap->entry =
- _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
- list_del(&rmap->list);
- kfree(rmap);
- flush = 1;
- }
- spin_unlock(&mm->page_table_lock);
- if (flush)
- __tlb_flush_global();
-}
-
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
{
}
-static inline void gmap_unmap_notifier(struct mm_struct *mm,
- unsigned long *table)
+static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
+ unsigned long *table)
{
}
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
unsigned int bit, mask;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
return page_table_free_pgste(table);
}
/* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
mm = tlb->mm;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0972e91..82f165f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
+ addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
- memset(addrs, 0, fp->len * sizeof(*addrs));
memset(&jit, 0, sizeof(cjit));
memset(&cjit, 0, sizeof(cjit));
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index f0f426a..086a2e3 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
- pci_sysfs.o pci_event.o pci_debug.o
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
+ pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 27b4c17..e6f15b5 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
static struct kmem_cache *zdev_irq_cache;
static struct kmem_cache *zdev_fmb_cache;
-debug_info_t *pci_debug_msg_id;
-debug_info_t *pci_debug_err_id;
-
static inline int irq_to_msi_nr(unsigned int irq)
{
return irq & ZPCI_MSI_MASK;
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
fib->aisb = (u64) bucket->aisb + aisb / 8;
fib->aisbo = aisb & ZPCI_MSI_MASK;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
free_page((unsigned long) fib);
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
fib->iota = args->iota;
fib->fmb_addr = args->fmb_addr;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
free_page((unsigned long) fib);
return rc;
}
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
if (zdev->fmb)
return -EINVAL;
- zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
+ zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
if (!zdev->fmb)
return -ENOMEM;
- memset(zdev->fmb, 0, sizeof(*zdev->fmb));
WARN_ON((u64) zdev->fmb & 0xf);
args.fmb_addr = virt_to_phys(zdev->fmb);
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = pcilg_instr(&data, req, offset);
- data = data << ((8 - len) * 8);
- data = le64_to_cpu(data);
- if (!rc)
+ rc = s390pci_load(&data, req, offset);
+ if (!rc) {
+ data = data << ((8 - len) * 8);
+ data = le64_to_cpu(data);
*val = (u32) data;
- else
+ } else
*val = 0xffffffff;
return rc;
}
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
- rc = pcistg_instr(data, req, offset);
+ rc = s390pci_store(data, req, offset);
return rc;
}
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_load(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_load(zdev, where, val, size);
+
+ return ret;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_store(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_store(zdev, where, val, size);
+
+ return ret;
}
static struct pci_ops pci_root_ops = {
@@ -474,7 +478,7 @@ scan:
}
/* enable interrupts again */
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
/* check again to not lose initiative */
rmb();
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
};
-static void zpci_unmap_resources(struct pci_dev *pdev)
-{
- resource_size_t len;
- int i;
-
- for (i = 0; i < PCI_BAR_COUNT; i++) {
- len = pci_resource_len(pdev, i);
- if (!len)
- continue;
- pci_iounmap(pdev, (void *) pdev->resource[i].start);
- }
-};
-
struct zpci_dev *zpci_alloc_device(void)
{
struct zpci_dev *zdev;
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
kfree(zdev);
}
-/* Called on removal of pci_dev, leaves zpci and bus device */
-static void zpci_remove_device(struct pci_dev *pdev)
-{
- struct zpci_dev *zdev = get_zdev(pdev);
-
- dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
- zpci_dma_exit_device(zdev);
- zpci_fmb_disable_device(zdev);
- zpci_sysfs_remove_device(&pdev->dev);
- zpci_unmap_resources(pdev);
- list_del(&zdev->entry); /* can be called from init */
- zdev->pdev = NULL;
-}
-
-static void zpci_scan_devices(void)
-{
- struct zpci_dev *zdev;
-
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry)
- if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
- zpci_scan_device(zdev);
- mutex_unlock(&zpci_list_lock);
-}
-
/*
* Too late for any s390 specific setup, since interrupts must be set up
* already which requires DMA setup too and the pci scan will access the
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
return 0;
}
-void pcibios_disable_device(struct pci_dev *pdev)
-{
- zpci_remove_device(pdev);
- pdev->sysdata = NULL;
-}
-
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
spin_lock_init(&bucket->lock);
/* set summary to 1 to be called every time for the ISC */
*zpci_irq_si = 1;
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
return 0;
out_ai:
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
spin_unlock(&zpci_iomap_lock);
}
-static int zpci_create_device_bus(struct zpci_dev *zdev)
+int pcibios_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+
+ zdev->pdev = pdev;
+ zpci_debug_init_device(zdev);
+ zpci_fmb_enable_device(zdev);
+ zpci_map_resources(zdev);
+
+ return 0;
+}
+
+static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
LIST_HEAD(resources);
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
pci_add_resource(&resources, res);
}
- zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
- zdev, &resources);
+ zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
+ zdev, &resources);
if (!zdev->bus)
return -EIO;
@@ -959,6 +930,13 @@ out:
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
+int zpci_disable_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ return clp_disable_fh(zdev);
+}
+EXPORT_SYMBOL_GPL(zpci_disable_device);
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out;
- rc = zpci_create_device_bus(zdev);
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto out_free;
+
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ }
+ rc = zpci_scan_bus(zdev);
if (rc)
- goto out_bus;
+ goto out_disable;
mutex_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
hotplug_ops->create_slot(zdev);
mutex_unlock(&zpci_list_lock);
- if (zdev->state == ZPCI_FN_STATE_STANDBY)
- return 0;
-
- rc = zpci_enable_device(zdev);
- if (rc)
- goto out_start;
return 0;
-out_start:
- mutex_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- if (hotplug_ops)
- hotplug_ops->remove_slot(zdev);
- mutex_unlock(&zpci_list_lock);
-out_bus:
+out_disable:
+ if (zdev->state == ZPCI_FN_STATE_ONLINE)
+ zpci_disable_device(zdev);
+out_free:
zpci_free_domain(zdev);
out:
return rc;
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
goto out;
}
- zpci_debug_init_device(zdev);
- zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
pci_bus_add_devices(zdev->bus);
- /* now that pdev was added to the bus mark it as used */
- zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
-
out:
zpci_dma_exit_device(zdev);
clp_disable_fh(zdev);
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
}
EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
-unsigned int s390_pci_probe = 1;
+unsigned int s390_pci_probe;
EXPORT_SYMBOL_GPL(s390_pci_probe);
char * __init pcibios_setup(char *str)
{
- if (!strcmp(str, "off")) {
- s390_pci_probe = 0;
+ if (!strcmp(str, "on")) {
+ s390_pci_probe = 1;
return NULL;
}
return str;
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
- zpci_scan_devices();
return 0;
out_find:
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index f339fe2f..bd34359 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
/*
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
struct zpci_dev *zdev;
int rc;
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
zdev = zpci_alloc_device();
if (IS_ERR(zdev))
return PTR_ERR(zdev);
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
- pr_err("Set PCI FN failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
+ rrb->response.hdr.rsp);
rc = -EIO;
}
clp_free_block(rrb);
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
if (!rc)
/* Success -> store enabled handle in zdev */
zdev->fh = fh;
+
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
if (!rc)
/* Success -> store disabled handle in zdev */
zdev->fh = fh;
- else
- dev_err(&zdev->pdev->dev,
- "Failed to disable fn handle: 0x%x\n", fh);
+
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index a5d07bc..771b823 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -11,12 +11,17 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <asm/debug.h>
#include <asm/pci_dma.h>
static struct dentry *debugfs_root;
+debug_info_t *pci_debug_msg_id;
+EXPORT_SYMBOL_GPL(pci_debug_msg_id);
+debug_info_t *pci_debug_err_id;
+EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
debug_set_level(pci_debug_msg_id, 3);
- zpci_dbg("Debug view initialized\n");
/* error log */
pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
debug_set_level(pci_debug_err_id, 6);
- zpci_err("Debug view initialized\n");
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a547419..f8e69d5 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
* needs to be redone!
*/
goto no_refresh;
- rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
+
+ rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
- WARN_ON_ONCE(offset > PAGE_SIZE);
-
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
- return dma_addr + offset;
+ return dma_addr + (offset & ~PAGE_MASK);
}
out_free:
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
new file mode 100644
index 0000000..22eeb9d
--- /dev/null
+++ b/arch/s390/pci/pci_insn.c
@@ -0,0 +1,202 @@
+/*
+ * s390 specific pci instructions
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <asm/pci_insn.h>
+#include <asm/processor.h>
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
+{
+ u8 cc, status;
+
+ do {
+ cc = __mpcifc(req, fib, &status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
+ __func__, cc, status);
+ return (cc) ? -EIO : 0;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
+ __func__, cc, status, addr, range);
+ return (cc) ? -EIO : 0;
+}
+
+/* Set Interruption Controls */
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+{
+ asm volatile (
+ " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+}
+
+/* PCI Load */
+static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ if (!cc)
+ *data = __data;
+
+ return cc;
+}
+
+int s390pci_load(u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_load);
+
+/* PCI Store */
+static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store(u64 data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store);
+
+/* PCI Store Block */
+static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store_block(const u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 0297931..b097aed 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -18,8 +18,9 @@
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
-static unsigned int msihash_shift = 6;
-#define msi_hashfn(nr) hash_long(nr, msihash_shift)
+static const unsigned int msi_hash_bits = 8;
+#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
+#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
static DEFINE_SPINLOCK(msi_map_lock);
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
+ INIT_HLIST_NODE(&map->msi_chain);
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
{
unsigned int i;
- msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
+ msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
- for (i = 0; i < (1U << msihash_shift); i++)
+ for (i = 0; i < MSI_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}
OpenPOWER on IntegriCloud