summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig121
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/appldata/appldata_net_sum.c2
-rw-r--r--arch/s390/appldata/appldata_os.c2
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/barrier.h9
-rw-r--r--arch/s390/include/asm/clp.h2
-rw-r--r--arch/s390/include/asm/cpu_mf.h4
-rw-r--r--arch/s390/include/asm/dma-mapping.h8
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/include/asm/mman.h4
-rw-r--r--arch/s390/include/asm/page.h22
-rw-r--r--arch/s390/include/asm/pci.h11
-rw-r--r--arch/s390/include/asm/pgtable.h144
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h22
-rw-r--r--arch/s390/include/asm/signal.h19
-rw-r--r--arch/s390/include/asm/timex.h18
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/include/uapi/asm/socket.h4
-rw-r--r--arch/s390/kernel/compat_linux.c80
-rw-r--r--arch/s390/kernel/compat_linux.h34
-rw-r--r--arch/s390/kernel/compat_signal.c134
-rw-r--r--arch/s390/kernel/compat_wrapper.S144
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/early.c8
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/entry64.S10
-rw-r--r--arch/s390/kernel/ipl.c16
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/module.c143
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c13
-rw-r--r--arch/s390/kernel/signal.c56
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/syscalls.S46
-rw-r--r--arch/s390/kernel/time.c29
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/kernel/vtime.c8
-rw-r--r--arch/s390/kvm/Kconfig2
-rw-r--r--arch/s390/kvm/intercept.c45
-rw-r--r--arch/s390/kvm/interrupt.c270
-rw-r--r--arch/s390/kvm/kvm-s390.c52
-rw-r--r--arch/s390/kvm/kvm-s390.h46
-rw-r--r--arch/s390/kvm/priv.c316
-rw-r--r--arch/s390/kvm/sigp.c10
-rw-r--r--arch/s390/kvm/trace-s390.h26
-rw-r--r--arch/s390/lib/delay.c16
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/mm/init.c12
-rw-r--r--arch/s390/mm/mmap.c9
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/vmem.c28
-rw-r--r--arch/s390/net/bpf_jit_comp.c21
-rw-r--r--arch/s390/pci/pci.c35
-rw-r--r--arch/s390/pci/pci_clp.c14
59 files changed, 1184 insertions, 885 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b5ea38c..f09ae7b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -60,86 +60,88 @@ config PCI_QUIRKS
config S390
def_bool y
- select USE_GENERIC_SMP_HELPERS if SMP
- select GENERIC_CPU_DEVICES if !SMP
- select HAVE_SYSCALL_WRAPPERS
- select HAVE_FUNCTION_TRACER
- select HAVE_FUNCTION_TRACE_MCOUNT_TEST
- select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_C_RECORDMCOUNT
- select HAVE_SYSCALL_TRACEPOINTS
- select SYSCTL_EXCEPTION_TRACE
- select HAVE_DYNAMIC_FTRACE
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_OPROFILE
- select HAVE_KPROBES
- select HAVE_KRETPROBES
- select HAVE_KVM if 64BIT
- select HAVE_ARCH_TRACEHOOK
- select INIT_ALL_POSSIBLE
- select HAVE_IRQ_WORK
- select HAVE_PERF_EVENTS
- select ARCH_HAVE_NMI_SAFE_CMPXCHG
- select HAVE_DEBUG_KMEMLEAK
- select HAVE_KERNEL_GZIP
- select HAVE_KERNEL_BZIP2
- select HAVE_KERNEL_LZMA
- select HAVE_KERNEL_LZO
- select HAVE_KERNEL_XZ
- select HAVE_ARCH_MUTEX_CPU_RELAX
- select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
- select HAVE_BPF_JIT if 64BIT && PACK_STACK
- select ARCH_SAVE_PAGE_KEYS if HIBERNATION
- select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select HAVE_MEMBLOCK
- select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_CMPXCHG_LOCAL
- select HAVE_CMPXCHG_DOUBLE
- select HAVE_ALIGNED_STRUCT_PAGE if SLUB
- select HAVE_VIRT_CPU_ACCOUNTING
- select VIRT_CPU_ACCOUNTING
select ARCH_DISCARD_MEMBLOCK
- select BUILDTIME_EXTABLE_SORT
- select ARCH_INLINE_SPIN_TRYLOCK
- select ARCH_INLINE_SPIN_TRYLOCK_BH
- select ARCH_INLINE_SPIN_LOCK
- select ARCH_INLINE_SPIN_LOCK_BH
- select ARCH_INLINE_SPIN_LOCK_IRQ
- select ARCH_INLINE_SPIN_LOCK_IRQSAVE
- select ARCH_INLINE_SPIN_UNLOCK
- select ARCH_INLINE_SPIN_UNLOCK_BH
- select ARCH_INLINE_SPIN_UNLOCK_IRQ
- select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
- select ARCH_INLINE_READ_TRYLOCK
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
select ARCH_INLINE_READ_LOCK_IRQ
select ARCH_INLINE_READ_LOCK_IRQSAVE
+ select ARCH_INLINE_READ_TRYLOCK
select ARCH_INLINE_READ_UNLOCK
select ARCH_INLINE_READ_UNLOCK_BH
select ARCH_INLINE_READ_UNLOCK_IRQ
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
- select ARCH_INLINE_WRITE_TRYLOCK
+ select ARCH_INLINE_SPIN_LOCK
+ select ARCH_INLINE_SPIN_LOCK_BH
+ select ARCH_INLINE_SPIN_LOCK_IRQ
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE
+ select ARCH_INLINE_SPIN_TRYLOCK
+ select ARCH_INLINE_SPIN_TRYLOCK_BH
+ select ARCH_INLINE_SPIN_UNLOCK
+ select ARCH_INLINE_SPIN_UNLOCK_BH
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
select ARCH_INLINE_WRITE_LOCK
select ARCH_INLINE_WRITE_LOCK_BH
select ARCH_INLINE_WRITE_LOCK_IRQ
select ARCH_INLINE_WRITE_LOCK_IRQSAVE
+ select ARCH_INLINE_WRITE_TRYLOCK
select ARCH_INLINE_WRITE_UNLOCK
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
- select HAVE_UID16 if 32BIT
+ select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select ARCH_WANT_IPC_PARSE_VERSION
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS2
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_KERNEL_THREAD
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL_OLD
- select GENERIC_CLOCKEVENTS
- select KTIME_SCALAR if 32BIT
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+ select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
+ select HAVE_BPF_JIT if 64BIT && PACK_STACK
+ select HAVE_CMPXCHG_DOUBLE
+ select HAVE_CMPXCHG_LOCAL
+ select HAVE_C_RECORDMCOUNT
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
+ select HAVE_KVM if 64BIT
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_SYSCALL_WRAPPERS
+ select HAVE_UID16 if 32BIT
+ select HAVE_VIRT_CPU_ACCOUNTING
+ select INIT_ALL_POSSIBLE
+ select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS2
+ select OLD_SIGSUSPEND3
+ select OLD_SIGACTION
+ select SYSCTL_EXCEPTION_TRACE
+ select USE_GENERIC_SMP_HELPERS if SMP
+ select VIRT_CPU_ACCOUNTING
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -249,6 +251,7 @@ config COMPAT
depends on 64BIT
select COMPAT_BINFMT_ELF if BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
help
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
@@ -718,8 +721,8 @@ source "arch/s390/kvm/Kconfig"
config S390_GUEST
def_bool y
- prompt "s390 support for virtio devices (EXPERIMENTAL)"
- depends on 64BIT && EXPERIMENTAL
+ prompt "s390 support for virtio devices"
+ depends on 64BIT
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 02d9a1c..7ef60b5 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -108,7 +108,7 @@ static void appldata_get_mem_data(void *data)
mem_data->totalswap = P2K(val.totalswap);
mem_data->freeswap = P2K(val.freeswap);
- mem_data->timestamp = get_clock();
+ mem_data->timestamp = get_tod_clock();
mem_data->sync_count_2++;
}
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 1370e35..2d224b9 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -111,7 +111,7 @@ static void appldata_get_net_sum_data(void *data)
net_data->tx_dropped = tx_dropped;
net_data->collisions = collisions;
- net_data->timestamp = get_clock();
+ net_data->timestamp = get_tod_clock();
net_data->sync_count_2++;
}
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 87521ba..de8e2b3 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -156,7 +156,7 @@ static void appldata_get_os_data(void *data)
}
ops.size = new_size;
}
- os_data->timestamp = get_clock();
+ os_data->timestamp = get_tod_clock();
os_data->sync_count_2++;
}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 4f6afaa..f364dcf 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -245,7 +245,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
if (IS_ERR(d2fc))
return PTR_ERR(d2fc);
- get_clock_ext(d2fc->hdr.tod_ext);
+ get_tod_clock_ext(d2fc->hdr.tod_ext);
d2fc->hdr.len = count * sizeof(struct diag2fc_data);
d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
d2fc->hdr.count = count;
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 10a5088..16760ee 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -13,15 +13,12 @@
* to devices.
*/
-static inline void mb(void)
-{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- /* Fast-BCR without checkpoint synchronization */
- asm volatile("bcr 14,0" : : : "memory");
+/* Fast-BCR without checkpoint synchronization */
+#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0)
#else
- asm volatile("bcr 15,0" : : : "memory");
+#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0)
#endif
-}
#define rmb() mb()
#define wmb() mb()
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
index 6c3aecc..a0e71a5 100644
--- a/arch/s390/include/asm/clp.h
+++ b/arch/s390/include/asm/clp.h
@@ -2,7 +2,7 @@
#define _ASM_S390_CLP_H
/* CLP common request & response block size */
-#define CLP_BLK_SIZE (PAGE_SIZE * 2)
+#define CLP_BLK_SIZE PAGE_SIZE
struct clp_req_hdr {
u16 len;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 35f0020..f1eddd1 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -34,12 +34,12 @@
/* CPU measurement facility support */
static inline int cpum_cf_avail(void)
{
- return MACHINE_HAS_SPP && test_facility(67);
+ return MACHINE_HAS_LPP && test_facility(67);
}
static inline int cpum_sf_avail(void)
{
- return MACHINE_HAS_SPP && test_facility(68);
+ return MACHINE_HAS_LPP && test_facility(68);
}
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 8a32f7d..9411db65 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -19,9 +19,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
}
extern int dma_set_mask(struct device *dev, u64 mask);
-extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 7def773..87c17bf 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -41,6 +41,7 @@ enum interruption_class {
IRQIO_CSC,
IRQIO_PCI,
IRQIO_MSI,
+ IRQIO_VIR,
NMI_NMI,
CPU_RST,
NR_ARCH_IRQS
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index b784154..16bd5d1 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -20,9 +20,7 @@
#include <asm/cpu.h>
#define KVM_MAX_VCPUS 64
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 32
struct sca_entry {
atomic_t scn;
@@ -76,8 +74,11 @@ struct kvm_s390_sie_block {
__u64 epoch; /* 0x0038 */
__u8 reserved40[4]; /* 0x0040 */
#define LCTL_CR0 0x8000
+#define LCTL_CR6 0x0200
+#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
+#define ICTL_LPSW 0x00400000
__u32 ictl; /* 0x0048 */
__u32 eca; /* 0x004c */
__u8 icptcode; /* 0x0050 */
@@ -127,6 +128,7 @@ struct kvm_vcpu_stat {
u32 deliver_prefix_signal;
u32 deliver_restart_signal;
u32 deliver_program_int;
+ u32 deliver_io_int;
u32 exit_wait_state;
u32 instruction_stidp;
u32 instruction_spx;
@@ -187,6 +189,11 @@ struct kvm_s390_emerg_info {
__u16 code;
};
+struct kvm_s390_mchk_info {
+ __u64 cr14;
+ __u64 mcic;
+};
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -197,6 +204,7 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_mchk_info mchk;
};
};
@@ -254,6 +262,7 @@ struct kvm_arch{
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
struct gmap *gmap;
+ int css_support;
};
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 0e47a57..9977e08 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -9,7 +9,7 @@
#include <uapi/asm/mman.h>
#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
-int s390_mmap_check(unsigned long addr, unsigned long len);
-#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len)
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
+#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
#endif
#endif /* __S390_MMAN_H__ */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a86ad40840..75ce9b0 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -155,28 +155,6 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
/*
- * Test and clear dirty bit in storage key.
- * We can't clear the changed bit atomically. This is a potential
- * race against modification of the referenced bit. This function
- * should therefore only be called if it is not mapped in any
- * address space.
- *
- * Note that the bit gets set whenever page content is changed. That means
- * also when the page is modified by DMA or from inside the kernel.
- */
-#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
-{
- unsigned char skey;
-
- skey = page_get_storage_key(pfn << PAGE_SHIFT);
- if (!(skey & _PAGE_CHANGED))
- return 0;
- page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
- return 1;
-}
-
-/*
* Test and clear referenced bit in storage key.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b1fa93c..05333b7 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -160,9 +160,14 @@ void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
int zpci_msihash_init(void);
void zpci_msihash_exit(void);
+#ifdef CONFIG_PCI
/* Error handling and recovery */
void zpci_event_error(void *);
void zpci_event_availability(void *);
+#else /* CONFIG_PCI */
+static inline void zpci_event_error(void *e) {}
+static inline void zpci_event_availability(void *e) {}
+#endif /* CONFIG_PCI */
/* Helpers */
struct zpci_dev *get_zdev(struct pci_dev *);
@@ -180,8 +185,10 @@ void zpci_dma_exit(void);
/* Hotplug */
extern struct mutex zpci_list_lock;
extern struct list_head zpci_list;
-extern struct pci_hp_callback_ops hotplug_ops;
-extern unsigned int pci_probe;
+extern unsigned int s390_pci_probe;
+
+void zpci_register_hp_ops(struct pci_hp_callback_ops *);
+void zpci_deregister_hp_ops(void);
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c1d7930..97de120 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -29,6 +29,7 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <linux/page-flags.h>
#include <asm/bug.h>
#include <asm/page.h>
@@ -221,13 +222,15 @@ extern unsigned long MODULES_END;
/* Software bits in the page table entry */
#define _PAGE_SWT 0x001 /* SW pte type bit t */
#define _PAGE_SWX 0x002 /* SW pte type bit x */
-#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
-#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
-#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
+#define _PAGE_SWC 0x004 /* SW pte changed bit */
+#define _PAGE_SWR 0x008 /* SW pte referenced bit */
+#define _PAGE_SWW 0x010 /* SW pte write bit */
+#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
+ _PAGE_SWC | _PAGE_SWR)
/* Six different types of pages. */
#define _PAGE_TYPE_EMPTY 0x400
@@ -321,6 +324,7 @@ extern unsigned long MODULES_END;
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
+#define _REGION_ENTRY_RO 0x200 /* region protection bit */
#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
@@ -382,9 +386,11 @@ extern unsigned long MODULES_END;
*/
#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
-#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
+#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
+#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
-#define PAGE_KERNEL PAGE_RW
+#define PAGE_KERNEL PAGE_RWC
+#define PAGE_SHARED PAGE_KERNEL
#define PAGE_COPY PAGE_RO
/*
@@ -631,23 +637,23 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Clear page changed & referenced bit in the storage key */
if (bits & _PAGE_CHANGED)
- page_set_storage_key(address, skey ^ bits, 1);
+ page_set_storage_key(address, skey ^ bits, 0);
else if (bits)
page_reset_referenced(address);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
/* Get host changed & referenced bits from pgste */
bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
- /* Clear host bits in pgste. */
+ /* Transfer page changed & referenced bit to kvm user bits */
+ pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
+ /* Clear relevant host bits in pgste. */
pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
/* Copy page access key and fetch protection bit to pgste */
pgste_val(pgste) |=
(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
- /* Transfer changed and referenced to kvm user bits */
- pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
- /* Transfer changed & referenced to pte sofware bits */
- pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
+ /* Transfer referenced bit to pte */
+ pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
#endif
return pgste;
@@ -660,20 +666,25 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
if (!pte_present(*ptep))
return pgste;
+ /* Get referenced bit from storage key */
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
- /* Transfer page referenced bit to pte software bit (host view) */
- if (young || (pgste_val(pgste) & RCP_HR_BIT))
+ if (young)
+ pgste_val(pgste) |= RCP_GR_BIT;
+ /* Get host referenced bit from pgste */
+ if (pgste_val(pgste) & RCP_HR_BIT) {
+ pgste_val(pgste) &= ~RCP_HR_BIT;
+ young = 1;
+ }
+ /* Transfer referenced bit to kvm user bits and pte */
+ if (young) {
+ pgste_val(pgste) |= KVM_UR_BIT;
pte_val(*ptep) |= _PAGE_SWR;
- /* Clear host referenced bit in pgste. */
- pgste_val(pgste) &= ~RCP_HR_BIT;
- /* Transfer page referenced bit to guest bit in pgste */
- pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
+ }
#endif
return pgste;
-
}
-static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
+static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
{
#ifdef CONFIG_PGSTE
unsigned long address;
@@ -687,10 +698,23 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
/* Set page access key and fetch protection bit from pgste */
nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
if (okey != nkey)
- page_set_storage_key(address, nkey, 1);
+ page_set_storage_key(address, nkey, 0);
#endif
}
+static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
+{
+ if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
+ /*
+ * Without enhanced suppression-on-protection force
+ * the dirty bit on for all writable ptes.
+ */
+ pte_val(entry) |= _PAGE_SWC;
+ pte_val(entry) &= ~_PAGE_RO;
+ }
+ *ptep = entry;
+}
+
/**
* struct gmap_struct - guest address space
* @mm: pointer to the parent mm_struct
@@ -749,11 +773,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
- pgste_set_pte(ptep, pgste, entry);
- *ptep = entry;
+ pgste_set_key(ptep, pgste, entry);
+ pgste_set_pte(ptep, entry);
pgste_set_unlock(ptep, pgste);
- } else
+ } else {
+ if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
+ pte_val(entry) |= _PAGE_CO;
*ptep = entry;
+ }
}
/*
@@ -762,16 +789,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
static inline int pte_write(pte_t pte)
{
- return (pte_val(pte) & _PAGE_RO) == 0;
+ return (pte_val(pte) & _PAGE_SWW) != 0;
}
static inline int pte_dirty(pte_t pte)
{
-#ifdef CONFIG_PGSTE
- if (pte_val(pte) & _PAGE_SWC)
- return 1;
-#endif
- return 0;
+ return (pte_val(pte) & _PAGE_SWC) != 0;
}
static inline int pte_young(pte_t pte)
@@ -821,11 +844,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
+ if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
+ pte_val(pte) &= ~_PAGE_RO;
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
+ pte_val(pte) &= ~_PAGE_SWW;
/* Do not clobber _PAGE_TYPE_NONE pages! */
if (!(pte_val(pte) & _PAGE_INVALID))
pte_val(pte) |= _PAGE_RO;
@@ -834,20 +860,26 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
- pte_val(pte) &= ~_PAGE_RO;
+ pte_val(pte) |= _PAGE_SWW;
+ if (pte_val(pte) & _PAGE_SWC)
+ pte_val(pte) &= ~_PAGE_RO;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
-#ifdef CONFIG_PGSTE
pte_val(pte) &= ~_PAGE_SWC;
-#endif
+ /* Do not clobber _PAGE_TYPE_NONE pages! */
+ if (!(pte_val(pte) & _PAGE_INVALID))
+ pte_val(pte) |= _PAGE_RO;
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
+ pte_val(pte) |= _PAGE_SWC;
+ if (pte_val(pte) & _PAGE_SWW)
+ pte_val(pte) &= ~_PAGE_RO;
return pte;
}
@@ -885,10 +917,10 @@ static inline pte_t pte_mkhuge(pte_t pte)
pte_val(pte) |= _SEGMENT_ENTRY_INV;
}
/*
- * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
- * table entry.
+ * Clear SW pte bits, there are no SW bits in a segment table entry.
*/
- pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
+ pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
+ _PAGE_SWR | _PAGE_SWW);
/*
* Also set the change-override bit because we don't need dirty bit
* tracking for hugetlbfs pages.
@@ -1040,9 +1072,11 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long address,
pte_t *ptep, pte_t pte)
{
- *ptep = pte;
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm)) {
+ pgste_set_pte(ptep, pte);
pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
+ } else
+ *ptep = pte;
}
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
@@ -1110,10 +1144,13 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
if (!mm_exclusive(mm))
__ptep_ipte(address, ptep);
- *ptep = pte_wrprotect(pte);
+ pte = pte_wrprotect(pte);
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm)) {
+ pgste_set_pte(ptep, pte);
pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = pte;
}
return pte;
}
@@ -1131,10 +1168,12 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
pgste = pgste_get_lock(ptep);
__ptep_ipte(address, ptep);
- *ptep = entry;
- if (mm_has_pgste(vma->vm_mm))
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste_set_pte(ptep, entry);
pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = entry;
return 1;
}
@@ -1152,8 +1191,13 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
unsigned long physpage = page_to_phys(page);
+ pte_t __pte = mk_pte_phys(physpage, pgprot);
- return mk_pte_phys(physpage, pgprot);
+ if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
+ pte_val(__pte) |= _PAGE_SWC;
+ pte_val(__pte) &= ~_PAGE_RO;
+ }
+ return __pte;
}
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -1245,6 +1289,8 @@ static inline int pmd_trans_splitting(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
+ if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+ pmd_val(entry) |= _SEGMENT_ENTRY_CO;
*pmdp = entry;
}
@@ -1365,6 +1411,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma,
__pmd_idte(address, pmdp);
}
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ if (pmd_write(pmd)) {
+ __pmd_idte(address, pmdp);
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
+ }
+}
+
static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
{
pmd_t __pmd;
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 8337886..06a1361 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -46,7 +46,6 @@ int sclp_cpu_deconfigure(u8 cpu);
void sclp_facilities_detect(void);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
-u8 sclp_get_fac85(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f69f76b..ff67d73 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -64,17 +64,18 @@ extern unsigned int s390_user_mode;
#define MACHINE_FLAG_VM (1UL << 0)
#define MACHINE_FLAG_IEEE (1UL << 1)
-#define MACHINE_FLAG_CSP (1UL << 3)
-#define MACHINE_FLAG_MVPG (1UL << 4)
-#define MACHINE_FLAG_DIAG44 (1UL << 5)
-#define MACHINE_FLAG_IDTE (1UL << 6)
-#define MACHINE_FLAG_DIAG9C (1UL << 7)
-#define MACHINE_FLAG_MVCOS (1UL << 8)
-#define MACHINE_FLAG_KVM (1UL << 9)
+#define MACHINE_FLAG_CSP (1UL << 2)
+#define MACHINE_FLAG_MVPG (1UL << 3)
+#define MACHINE_FLAG_DIAG44 (1UL << 4)
+#define MACHINE_FLAG_IDTE (1UL << 5)
+#define MACHINE_FLAG_DIAG9C (1UL << 6)
+#define MACHINE_FLAG_MVCOS (1UL << 7)
+#define MACHINE_FLAG_KVM (1UL << 8)
+#define MACHINE_FLAG_ESOP (1UL << 9)
#define MACHINE_FLAG_EDAT1 (1UL << 10)
#define MACHINE_FLAG_EDAT2 (1UL << 11)
#define MACHINE_FLAG_LPAR (1UL << 12)
-#define MACHINE_FLAG_SPP (1UL << 13)
+#define MACHINE_FLAG_LPP (1UL << 13)
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_RRBM (1UL << 16)
@@ -84,6 +85,7 @@ extern unsigned int s390_user_mode;
#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
+#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
@@ -96,7 +98,7 @@ extern unsigned int s390_user_mode;
#define MACHINE_HAS_MVCOS (0)
#define MACHINE_HAS_EDAT1 (0)
#define MACHINE_HAS_EDAT2 (0)
-#define MACHINE_HAS_SPP (0)
+#define MACHINE_HAS_LPP (0)
#define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0)
#define MACHINE_HAS_RRBM (0)
@@ -109,7 +111,7 @@ extern unsigned int s390_user_mode;
#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
-#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
+#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
index db7ddfa..abf9e57 100644
--- a/arch/s390/include/asm/signal.h
+++ b/arch/s390/include/asm/signal.h
@@ -21,22 +21,5 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
+#define __ARCH_HAS_SA_RESTORER
#endif
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 4c060bb..8ad8af9 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -15,7 +15,7 @@
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
/* Inline functions for clock register access. */
-static inline int set_clock(__u64 time)
+static inline int set_tod_clock(__u64 time)
{
int cc;
@@ -27,7 +27,7 @@ static inline int set_clock(__u64 time)
return cc;
}
-static inline int store_clock(__u64 *time)
+static inline int store_tod_clock(__u64 *time)
{
int cc;
@@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t;
-static inline unsigned long long get_clock(void)
+static inline unsigned long long get_tod_clock(void)
{
unsigned long long clk;
@@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void)
return clk;
}
-static inline void get_clock_ext(char *clk)
+static inline void get_tod_clock_ext(char *clk)
{
asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
}
-static inline unsigned long long get_clock_xt(void)
+static inline unsigned long long get_tod_clock_xt(void)
{
unsigned char clk[16];
- get_clock_ext(clk);
+ get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
static inline cycles_t get_cycles(void)
{
- return (cycles_t) get_clock() >> 2;
+ return (cycles_t) get_tod_clock() >> 2;
}
int get_sync_clock(unsigned long long *clock);
@@ -123,9 +123,9 @@ extern u64 sched_clock_base_cc;
* function, otherwise the returned value is not guaranteed to
* be monotonic.
*/
-static inline unsigned long long get_clock_monotonic(void)
+static inline unsigned long long get_tod_clock_monotonic(void)
{
- return get_clock_xt() - sched_clock_base_cc;
+ return get_tod_clock_xt() - sched_clock_base_cc;
}
/**
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 6365308..a6667a9 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -43,15 +43,12 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
# ifndef CONFIG_64BIT
# define __ARCH_WANT_STAT64
# define __ARCH_WANT_SYS_TIME
# endif
# ifdef CONFIG_COMPAT
# define __ARCH_WANT_COMPAT_SYS_TIME
-# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
# endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 436d07c..f99eea7 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -28,7 +28,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -76,4 +76,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 65cca95..19f26de 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -352,86 +352,6 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned
return sys_ftruncate(fd, (high << 32) | low);
}
-asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
- mm_segment_t old_fs = get_fs ();
-
- set_fs (KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid,
- (struct timespec __force __user *) &t);
- set_fs (old_fs);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
- return ret;
-}
-
-asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset, size_t sigsetsize)
-{
- sigset_t s;
- compat_sigset_t s32;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- if (set) {
- if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
- return -EFAULT;
- s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- }
- set_fs (KERNEL_DS);
- ret = sys_rt_sigprocmask(how,
- set ? (sigset_t __force __user *) &s : NULL,
- oset ? (sigset_t __force __user *) &s : NULL,
- sigsetsize);
- set_fs (old_fs);
- if (ret) return ret;
- if (oset) {
- s32.sig[1] = (s.sig[0] >> 32);
- s32.sig[0] = s.sig[0];
- if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
- return -EFAULT;
- }
- return 0;
-}
-
-asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
- size_t sigsetsize)
-{
- sigset_t s;
- compat_sigset_t s32;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs (KERNEL_DS);
- ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
- set_fs (old_fs);
- if (!ret) {
- s32.sig[1] = (s.sig[0] >> 32);
- s32.sig[0] = s.sig[0];
- if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
- return -EFAULT;
- }
- return ret;
-}
-
-asmlinkage long
-sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
-{
- siginfo_t info;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- if (copy_siginfo_from_user32(&info, uinfo))
- return -EFAULT;
- set_fs (KERNEL_DS);
- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
- set_fs (old_fs);
- return ret;
-}
-
asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
size_t count, u32 poshi, u32 poslo)
{
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index d4d0239..00d92a5 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -17,13 +17,6 @@ struct ipc_kludge_32 {
__s32 msgtyp;
};
-struct old_sigaction32 {
- __u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */
- compat_old_sigset_t sa_mask; /* A 32 bit mask */
- __u32 sa_flags;
- __u32 sa_restorer; /* Another 32 bit pointer */
-};
-
/* asm/sigcontext.h */
typedef union
{
@@ -68,24 +61,12 @@ struct sigcontext32
};
/* asm/signal.h */
-struct sigaction32 {
- __u32 sa_handler; /* pointer */
- __u32 sa_flags;
- __u32 sa_restorer; /* pointer */
- compat_sigset_t sa_mask; /* mask last for extensibility */
-};
-
-typedef struct {
- __u32 ss_sp; /* pointer */
- int ss_flags;
- compat_size_t ss_size;
-} stack_t32;
/* asm/ucontext.h */
struct ucontext32 {
__u32 uc_flags;
__u32 uc_link; /* pointer */
- stack_t32 uc_stack;
+ compat_stack_t uc_stack;
_sigregs32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
@@ -93,8 +74,6 @@ struct ucontext32 {
struct stat64_emu31;
struct mmap_arg_struct_emu31;
struct fadvise64_64_args;
-struct old_sigaction32;
-struct old_sigaction32;
long sys32_chown16(const char __user * filename, u16 user, u16 group);
long sys32_lchown16(const char __user * filename, u16 user, u16 group);
@@ -119,12 +98,6 @@ long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
long sys32_truncate64(const char __user * path, unsigned long high,
unsigned long low);
long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
-long sys32_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
-long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset, size_t sigsetsize);
-long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
-long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
long sys32_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
long sys32_delete_module(const char __user *name_user, unsigned int flags);
@@ -149,9 +122,4 @@ long sys32_read(unsigned int fd, char __user * buf, size_t count);
long sys32_write(unsigned int fd, const char __user * buf, size_t count);
long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
-long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
- struct old_sigaction32 __user *oact);
-long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
- struct sigaction32 __user *oact, size_t sigsetsize);
-long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss);
#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 593fcc9..3e71194 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -157,122 +157,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return err;
}
-asmlinkage long
-sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
- struct old_sigaction32 __user *oact)
-{
- struct k_sigaction new_ka, old_ka;
- unsigned long sa_handler, sa_restorer;
- int ret;
-
- if (act) {
- compat_old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(sa_handler, &act->sa_handler) ||
- __get_user(sa_restorer, &act->sa_restorer) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
- __get_user(mask, &act->sa_mask))
- return -EFAULT;
- new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
- new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer;
- siginitset(&new_ka.sa.sa_mask, mask);
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- sa_handler = (unsigned long) old_ka.sa.sa_handler;
- sa_restorer = (unsigned long) old_ka.sa.sa_restorer;
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(sa_handler, &oact->sa_handler) ||
- __put_user(sa_restorer, &oact->sa_restorer) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
- return -EFAULT;
- }
-
- return ret;
-}
-
-asmlinkage long
-sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
- struct sigaction32 __user *oact, size_t sigsetsize)
-{
- struct k_sigaction new_ka, old_ka;
- unsigned long sa_handler;
- int ret;
- compat_sigset_t set32;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
-
- if (act) {
- ret = get_user(sa_handler, &act->sa_handler);
- ret |= __copy_from_user(&set32, &act->sa_mask,
- sizeof(compat_sigset_t));
- new_ka.sa.sa_mask.sig[0] =
- set32.sig[0] | (((long)set32.sig[1]) << 32);
- ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-
- if (ret)
- return -EFAULT;
- new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
- set32.sig[0] = old_ka.sa.sa_mask.sig[0];
- ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
- ret |= __copy_to_user(&oact->sa_mask, &set32,
- sizeof(compat_sigset_t));
- ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- }
-
- return ret;
-}
-
-asmlinkage long
-sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss)
-{
- struct pt_regs *regs = task_pt_regs(current);
- stack_t kss, koss;
- unsigned long ss_sp;
- int ret, err = 0;
- mm_segment_t old_fs = get_fs();
-
- if (uss) {
- if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
- return -EFAULT;
- err |= __get_user(ss_sp, &uss->ss_sp);
- err |= __get_user(kss.ss_size, &uss->ss_size);
- err |= __get_user(kss.ss_flags, &uss->ss_flags);
- if (err)
- return -EFAULT;
- kss.ss_sp = (void __user *) ss_sp;
- }
-
- set_fs (KERNEL_DS);
- ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
- (stack_t __force __user *) (uoss ? &koss : NULL),
- regs->gprs[15]);
- set_fs (old_fs);
-
- if (!ret && uoss) {
- if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
- return -EFAULT;
- ss_sp = (unsigned long) koss.ss_sp;
- err |= __put_user(ss_sp, &uoss->ss_sp);
- err |= __put_user(koss.ss_size, &uoss->ss_size);
- err |= __put_user(koss.ss_flags, &uoss->ss_flags);
- if (err)
- return -EFAULT;
- }
- return ret;
-}
-
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{
_s390_regs_common32 regs32;
@@ -380,10 +264,6 @@ asmlinkage long sys32_rt_sigreturn(void)
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
sigset_t set;
- stack_t st;
- __u32 ss_sp;
- int err;
- mm_segment_t old_fs = get_fs();
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -394,15 +274,8 @@ asmlinkage long sys32_rt_sigreturn(void)
goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high))
goto badframe;
- err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
- st.ss_sp = compat_ptr(ss_sp);
- err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
- err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
- if (err)
+ if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
- set_fs (KERNEL_DS);
- do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
- set_fs (old_fs);
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
@@ -530,10 +403,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->gprs[15]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
err |= save_sigregs_gprs_high(regs, frame->gprs_high);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 9b9a805..c14faf3 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -24,12 +24,6 @@ ENTRY(sys32_write_wrapper)
llgfr %r4,%r4 # size_t
jg sys32_write # branch to system call
-ENTRY(sys32_open_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
- lgfr %r4,%r4 # int
- jg compat_sys_open # branch to system call
-
ENTRY(sys32_close_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_close # branch to system call
@@ -226,12 +220,6 @@ ENTRY(sys32_dup2_wrapper)
#sys32_setsid_wrapper # void
-ENTRY(sys32_sigaction_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const struct old_sigaction *
- llgtr %r4,%r4 # struct old_sigaction32 *
- jg sys32_sigaction # branch to system call
-
ENTRY(sys32_setreuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
@@ -396,17 +384,6 @@ ENTRY(sys32_syslog_wrapper)
lgfr %r4,%r4 # int
jg sys_syslog # branch to system call
-ENTRY(compat_sys_setitimer_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct itimerval_emu31 *
- llgtr %r4,%r4 # struct itimerval_emu31 *
- jg compat_sys_setitimer # branch to system call
-
-ENTRY(compat_sys_getitimer_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct itimerval_emu31 *
- jg compat_sys_getitimer # branch to system call
-
ENTRY(compat_sys_newstat_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
@@ -424,13 +401,6 @@ ENTRY(compat_sys_newfstat_wrapper)
#sys32_vhangup_wrapper # void
-ENTRY(compat_sys_wait4_wrapper)
- lgfr %r2,%r2 # pid_t
- llgtr %r3,%r3 # unsigned int *
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # struct rusage *
- jg compat_sys_wait4 # branch to system call
-
ENTRY(sys32_swapoff_wrapper)
llgtr %r2,%r2 # const char *
jg sys_swapoff # branch to system call
@@ -474,12 +444,6 @@ ENTRY(sys32_mprotect_wrapper)
llgfr %r4,%r4 # unsigned long
jg sys_mprotect # branch to system call
-ENTRY(compat_sys_sigprocmask_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # compat_old_sigset_t *
- llgtr %r4,%r4 # compat_old_sigset_t *
- jg compat_sys_sigprocmask # branch to system call
-
ENTRY(sys_init_module_wrapper)
llgtr %r2,%r2 # void *
llgfr %r3,%r3 # unsigned long
@@ -628,11 +592,6 @@ ENTRY(sys32_sched_get_priority_min_wrapper)
lgfr %r2,%r2 # int
jg sys_sched_get_priority_min # branch to system call
-ENTRY(sys32_sched_rr_get_interval_wrapper)
- lgfr %r2,%r2 # pid_t
- llgtr %r3,%r3 # struct compat_timespec *
- jg sys32_sched_rr_get_interval # branch to system call
-
ENTRY(compat_sys_nanosleep_wrapper)
llgtr %r2,%r2 # struct compat_timespec *
llgtr %r3,%r3 # struct compat_timespec *
@@ -686,43 +645,6 @@ ENTRY(sys32_prctl_wrapper)
#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
-ENTRY(sys32_rt_sigaction_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # const struct sigaction_emu31 *
- llgtr %r4,%r4 # const struct sigaction_emu31 *
- llgfr %r5,%r5 # size_t
- jg sys32_rt_sigaction # branch to system call
-
-ENTRY(sys32_rt_sigprocmask_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # old_sigset_emu31 *
- llgtr %r4,%r4 # old_sigset_emu31 *
- llgfr %r5,%r5 # size_t
- jg sys32_rt_sigprocmask # branch to system call
-
-ENTRY(sys32_rt_sigpending_wrapper)
- llgtr %r2,%r2 # sigset_emu31 *
- llgfr %r3,%r3 # size_t
- jg sys32_rt_sigpending # branch to system call
-
-ENTRY(compat_sys_rt_sigtimedwait_wrapper)
- llgtr %r2,%r2 # const sigset_emu31_t *
- llgtr %r3,%r3 # siginfo_emu31_t *
- llgtr %r4,%r4 # const struct compat_timespec *
- llgfr %r5,%r5 # size_t
- jg compat_sys_rt_sigtimedwait # branch to system call
-
-ENTRY(sys32_rt_sigqueueinfo_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- llgtr %r4,%r4 # siginfo_emu31_t *
- jg sys32_rt_sigqueueinfo # branch to system call
-
-ENTRY(compat_sys_rt_sigsuspend_wrapper)
- llgtr %r2,%r2 # compat_sigset_t *
- llgfr %r3,%r3 # compat_size_t
- jg compat_sys_rt_sigsuspend
-
ENTRY(sys32_pread64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
@@ -760,11 +682,6 @@ ENTRY(sys32_capset_wrapper)
llgtr %r3,%r3 # const cap_user_data_t
jg sys_capset # branch to system call
-ENTRY(sys32_sigaltstack_wrapper)
- llgtr %r2,%r2 # const stack_emu31_t *
- llgtr %r3,%r3 # stack_emu31_t *
- jg sys32_sigaltstack
-
ENTRY(sys32_sendfile_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
@@ -921,16 +838,6 @@ ENTRY(sys32_fstat64_wrapper)
llgtr %r3,%r3 # struct stat64 *
jg sys32_fstat64 # branch to system call
-ENTRY(compat_sys_futex_wrapper)
- llgtr %r2,%r2 # u32 *
- lgfr %r3,%r3 # int
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # struct compat_timespec *
- llgtr %r6,%r6 # u32 *
- lgf %r0,164(%r15) # int
- stg %r0,160(%r15)
- jg compat_sys_futex # branch to system call
-
ENTRY(sys32_setxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
@@ -1216,14 +1123,6 @@ ENTRY(sys32_remap_file_pages_wrapper)
llgfr %r6,%r6 # unsigned long
jg sys_remap_file_pages
-ENTRY(compat_sys_waitid_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # pid_t
- llgtr %r4,%r4 # siginfo_emu31_t *
- lgfr %r5,%r5 # int
- llgtr %r6,%r6 # struct rusage_emu31 *
- jg compat_sys_waitid
-
ENTRY(compat_sys_kexec_load_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
@@ -1253,13 +1152,6 @@ ENTRY(sys_inotify_rm_watch_wrapper)
llgfr %r3,%r3 # u32
jg sys_inotify_rm_watch
-ENTRY(compat_sys_openat_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgtr %r3,%r3 # const char *
- lgfr %r4,%r4 # int
- lgfr %r5,%r5 # int
- jg compat_sys_openat
-
ENTRY(sys_mkdirat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
@@ -1362,17 +1254,6 @@ ENTRY(sys_unshare_wrapper)
llgfr %r2,%r2 # unsigned long
jg sys_unshare
-ENTRY(compat_sys_set_robust_list_wrapper)
- llgtr %r2,%r2 # struct compat_robust_list_head *
- llgfr %r3,%r3 # size_t
- jg compat_sys_set_robust_list
-
-ENTRY(compat_sys_get_robust_list_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # compat_uptr_t_t *
- llgtr %r4,%r4 # compat_size_t *
- jg compat_sys_get_robust_list
-
ENTRY(sys_splice_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # loff_t *
@@ -1458,18 +1339,6 @@ ENTRY(sys_timerfd_create_wrapper)
lgfr %r3,%r3 # int
jg sys_timerfd_create
-ENTRY(compat_sys_timerfd_settime_wrapper)
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- llgtr %r4,%r4 # struct compat_itimerspec *
- llgtr %r5,%r5 # struct compat_itimerspec *
- jg compat_sys_timerfd_settime
-
-ENTRY(compat_sys_timerfd_gettime_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct compat_itimerspec *
- jg compat_sys_timerfd_gettime
-
ENTRY(compat_sys_signalfd4_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_sigset_t *
@@ -1550,13 +1419,6 @@ ENTRY(compat_sys_pwritev_wrapper)
llgfr %r6,%r6 # u32
jg compat_sys_pwritev # branch to system call
-ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper)
- lgfr %r2,%r2 # compat_pid_t
- lgfr %r3,%r3 # compat_pid_t
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # struct compat_siginfo *
- jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
-
ENTRY(sys_perf_event_open_wrapper)
llgtr %r2,%r2 # const struct perf_event_attr *
lgfr %r3,%r3 # pid_t
@@ -1607,12 +1469,6 @@ ENTRY(sys_name_to_handle_at_wrapper)
lgfr %r6,%r6 # int
jg sys_name_to_handle_at
-ENTRY(compat_sys_open_by_handle_at_wrapper)
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct file_handle __user *
- lgfr %r4,%r4 # int
- jg compat_sys_open_by_handle_at
-
ENTRY(compat_sys_clock_adjtime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timex __user *
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 19dcf13..f1279dc 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception)
{
- active->id.stck = get_clock();
+ active->id.stck = get_tod_clock();
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index a7f9abd..c50665f 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -840,7 +840,6 @@ static struct insn opcode_b2[] = {
{ "stcke", 0x78, INSTR_S_RD },
{ "sacf", 0x79, INSTR_S_RD },
{ "stsi", 0x7d, INSTR_S_RD },
- { "spp", 0x80, INSTR_S_RD },
{ "srnm", 0x99, INSTR_S_RD },
{ "stfpc", 0x9c, INSTR_S_RD },
{ "lfpc", 0x9d, INSTR_S_RD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1f0eee9..bda011e 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -47,10 +47,10 @@ static void __init reset_tod_clock(void)
{
u64 time;
- if (store_clock(&time) == 0)
+ if (store_tod_clock(&time) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
- if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
+ if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
disabled_wait(0);
sched_clock_base_cc = TOD_UNIX_EPOCH;
@@ -173,7 +173,7 @@ static noinline __init void create_kernel_nss(void)
}
/* re-initialize cputime accounting. */
- sched_clock_base_cc = get_clock();
+ sched_clock_base_cc = get_tod_clock();
S390_lowcore.last_update_clock = sched_clock_base_cc;
S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
S390_lowcore.user_timer = 0;
@@ -381,7 +381,7 @@ static __init void detect_machine_facilities(void)
if (test_facility(27))
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
if (test_facility(40))
- S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
if (test_facility(50) && test_facility(73))
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(66))
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 2711936..c3a736a 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -73,10 +73,6 @@ long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
u32 len_low);
-long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
-long sys_sigaction(int sig, const struct old_sigaction __user *act,
- struct old_sigaction __user *oact);
-long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
long sys_sigreturn(void);
long sys_rt_sigreturn(void);
long sys32_sigreturn(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 6d34e0c..9c837c1 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -72,9 +72,9 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#endif
.endm
- .macro SPP newpp
+ .macro LPP newpp
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
jz .+8
.insn s,0xb2800000,\newpp
#endif
@@ -96,7 +96,7 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
jhe .+22
.endif
lg %r9,BASED(.Lsie_loop)
- SPP BASED(.Lhost_id) # set host id
+ LPP BASED(.Lhost_id) # set host id
#endif
.endm
@@ -967,10 +967,10 @@ sie_loop:
lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
sie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer
- SPP __SF_EMPTY(%r15) # set guest id
+ LPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14)
sie_done:
- SPP __SF_EMPTY+16(%r15) # set host id
+ LPP __SF_EMPTY+16(%r15) # set host id
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
sie_exit:
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6ffcd320..d8a6a38 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1414,6 +1414,16 @@ static struct kobj_attribute dump_type_attr =
static struct kset *dump_kset;
+static void diag308_dump(void *dump_block)
+{
+ diag308(DIAG308_SET, dump_block);
+ while (1) {
+ if (diag308(DIAG308_DUMP, NULL) != 0x302)
+ break;
+ udelay_simple(USEC_PER_SEC);
+ }
+}
+
static void __dump_run(void *unused)
{
struct ccw_dev_id devid;
@@ -1432,12 +1442,10 @@ static void __dump_run(void *unused)
__cpcmd(buf, NULL, 0, NULL);
break;
case DUMP_METHOD_CCW_DIAG:
- diag308(DIAG308_SET, dump_block_ccw);
- diag308(DIAG308_DUMP, NULL);
+ diag308_dump(dump_block_ccw);
break;
case DUMP_METHOD_FCP_DIAG:
- diag308(DIAG308_SET, dump_block_fcp);
- diag308(DIAG308_DUMP, NULL);
+ diag308_dump(dump_block_fcp);
break;
default:
break;
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 9df824e..1630f43 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -81,6 +81,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
[IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
+ [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 4610dea..f750bd7 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -65,8 +65,7 @@ void module_free(struct module *mod, void *module_region)
vfree(module_region);
}
-static void
-check_rela(Elf_Rela *rela, struct module *me)
+static void check_rela(Elf_Rela *rela, struct module *me)
{
struct mod_arch_syminfo *info;
@@ -115,9 +114,8 @@ check_rela(Elf_Rela *rela, struct module *me)
* Account for GOT and PLT relocations. We can't add sections for
* got and plt but we can increase the core module size.
*/
-int
-module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *me)
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *me)
{
Elf_Shdr *symtab;
Elf_Sym *symbols;
@@ -179,13 +177,52 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
return 0;
}
-static int
-apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- struct module *me)
+static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
+ int sign, int bits, int shift)
+{
+ unsigned long umax;
+ long min, max;
+
+ if (val & ((1UL << shift) - 1))
+ return -ENOEXEC;
+ if (sign) {
+ val = (Elf_Addr)(((long) val) >> shift);
+ min = -(1L << (bits - 1));
+ max = (1L << (bits - 1)) - 1;
+ if ((long) val < min || (long) val > max)
+ return -ENOEXEC;
+ } else {
+ val >>= shift;
+ umax = ((1UL << (bits - 1)) << 1) - 1;
+ if ((unsigned long) val > umax)
+ return -ENOEXEC;
+ }
+
+ if (bits == 8)
+ *(unsigned char *) loc = val;
+ else if (bits == 12)
+ *(unsigned short *) loc = (val & 0xfff) |
+ (*(unsigned short *) loc & 0xf000);
+ else if (bits == 16)
+ *(unsigned short *) loc = val;
+ else if (bits == 20)
+ *(unsigned int *) loc = (val & 0xfff) << 16 |
+ (val & 0xff000) >> 4 |
+ (*(unsigned int *) loc & 0xf00000ff);
+ else if (bits == 32)
+ *(unsigned int *) loc = val;
+ else if (bits == 64)
+ *(unsigned long *) loc = val;
+ return 0;
+}
+
+static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ const char *strtab, struct module *me)
{
struct mod_arch_syminfo *info;
Elf_Addr loc, val;
int r_type, r_sym;
+ int rc;
/* This is where to make the change */
loc = base + rela->r_offset;
@@ -197,6 +234,9 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val = symtab[r_sym].st_value;
switch (r_type) {
+ case R_390_NONE: /* No relocation. */
+ rc = 0;
+ break;
case R_390_8: /* Direct 8 bit. */
case R_390_12: /* Direct 12 bit. */
case R_390_16: /* Direct 16 bit. */
@@ -205,20 +245,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_64: /* Direct 64 bit. */
val += rela->r_addend;
if (r_type == R_390_8)
- *(unsigned char *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 8, 0);
else if (r_type == R_390_12)
- *(unsigned short *) loc = (val & 0xfff) |
- (*(unsigned short *) loc & 0xf000);
+ rc = apply_rela_bits(loc, val, 0, 12, 0);
else if (r_type == R_390_16)
- *(unsigned short *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
else if (r_type == R_390_20)
- *(unsigned int *) loc =
- (*(unsigned int *) loc & 0xf00000ff) |
- (val & 0xfff) << 16 | (val & 0xff000) >> 4;
+ rc = apply_rela_bits(loc, val, 1, 20, 0);
else if (r_type == R_390_32)
- *(unsigned int *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 32, 0);
else if (r_type == R_390_64)
- *(unsigned long *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
break;
case R_390_PC16: /* PC relative 16 bit. */
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
@@ -227,15 +264,15 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PC64: /* PC relative 64 bit. */
val += rela->r_addend - loc;
if (r_type == R_390_PC16)
- *(unsigned short *) loc = val;
+ rc = apply_rela_bits(loc, val, 1, 16, 0);
else if (r_type == R_390_PC16DBL)
- *(unsigned short *) loc = val >> 1;
+ rc = apply_rela_bits(loc, val, 1, 16, 1);
else if (r_type == R_390_PC32DBL)
- *(unsigned int *) loc = val >> 1;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
else if (r_type == R_390_PC32)
- *(unsigned int *) loc = val;
+ rc = apply_rela_bits(loc, val, 1, 32, 0);
else if (r_type == R_390_PC64)
- *(unsigned long *) loc = val;
+ rc = apply_rela_bits(loc, val, 1, 64, 0);
break;
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
@@ -260,26 +297,24 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val = info->got_offset + rela->r_addend;
if (r_type == R_390_GOT12 ||
r_type == R_390_GOTPLT12)
- *(unsigned short *) loc = (val & 0xfff) |
- (*(unsigned short *) loc & 0xf000);
+ rc = apply_rela_bits(loc, val, 0, 12, 0);
else if (r_type == R_390_GOT16 ||
r_type == R_390_GOTPLT16)
- *(unsigned short *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
else if (r_type == R_390_GOT20 ||
r_type == R_390_GOTPLT20)
- *(unsigned int *) loc =
- (*(unsigned int *) loc & 0xf00000ff) |
- (val & 0xfff) << 16 | (val & 0xff000) >> 4;
+ rc = apply_rela_bits(loc, val, 1, 20, 0);
else if (r_type == R_390_GOT32 ||
r_type == R_390_GOTPLT32)
- *(unsigned int *) loc = val;
- else if (r_type == R_390_GOTENT ||
- r_type == R_390_GOTPLTENT)
- *(unsigned int *) loc =
- (val + (Elf_Addr) me->module_core - loc) >> 1;
+ rc = apply_rela_bits(loc, val, 0, 32, 0);
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
- *(unsigned long *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT) {
+ val += (Elf_Addr) me->module_core - loc;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
+ }
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
@@ -321,17 +356,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val += rela->r_addend - loc;
}
if (r_type == R_390_PLT16DBL)
- *(unsigned short *) loc = val >> 1;
+ rc = apply_rela_bits(loc, val, 1, 16, 1);
else if (r_type == R_390_PLTOFF16)
- *(unsigned short *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
else if (r_type == R_390_PLT32DBL)
- *(unsigned int *) loc = val >> 1;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
else if (r_type == R_390_PLT32 ||
r_type == R_390_PLTOFF32)
- *(unsigned int *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 32, 0);
else if (r_type == R_390_PLT64 ||
r_type == R_390_PLTOFF64)
- *(unsigned long *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
break;
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
@@ -339,20 +374,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val = val + rela->r_addend -
((Elf_Addr) me->module_core + me->arch.got_offset);
if (r_type == R_390_GOTOFF16)
- *(unsigned short *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
else if (r_type == R_390_GOTOFF32)
- *(unsigned int *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 32, 0);
else if (r_type == R_390_GOTOFF64)
- *(unsigned long *) loc = val;
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf_Addr) me->module_core + me->arch.got_offset +
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
- *(unsigned int *) loc = val;
+ rc = apply_rela_bits(loc, val, 1, 32, 0);
else if (r_type == R_390_GOTPCDBL)
- *(unsigned int *) loc = val >> 1;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
break;
case R_390_COPY:
case R_390_GLOB_DAT: /* Create GOT entry. */
@@ -360,19 +395,25 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_RELATIVE: /* Adjust by program base. */
/* Only needed if we want to support loading of
modules linked with -shared. */
- break;
+ return -ENOEXEC;
default:
- printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ printk(KERN_ERR "module %s: unknown relocation: %u\n",
me->name, r_type);
return -ENOEXEC;
}
+ if (rc) {
+ printk(KERN_ERR "module %s: relocation error for symbol %s "
+ "(r_type %i, value 0x%lx)\n",
+ me->name, strtab + symtab[r_sym].st_name,
+ r_type, (unsigned long) val);
+ return rc;
+ }
return 0;
}
-int
-apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
{
Elf_Addr base;
Elf_Sym *symtab;
@@ -388,7 +429,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
for (i = 0; i < n; i++, rela++) {
- rc = apply_rela(rela, base, symtab, me);
+ rc = apply_rela(rela, base, symtab, strtab, me);
if (rc)
return rc;
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 7918fbe..504175e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -293,7 +293,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
* retry this instruction.
*/
spin_lock(&ipd_lock);
- tmp = get_clock();
+ tmp = get_tod_clock();
if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
ipd_count++;
else
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 86ec744..390d9ae 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -367,13 +367,6 @@ static int __hw_perf_event_init(struct perf_event *event)
if (ev >= PERF_CPUM_CF_MAX_CTR)
return -EINVAL;
- /* The CPU measurement counter facility does not have any interrupts
- * to do sampling. Sampling must be provided by external means,
- * for example, by timers.
- */
- if (hwc->sample_period)
- return -EINVAL;
-
/* Use the hardware perf event structure to store the counter number
* in 'config' member and the counter set to which the counter belongs
* in the 'config_base'. The counter set (config_base) is then used
@@ -418,6 +411,12 @@ static int cpumf_pmu_event_init(struct perf_event *event)
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
case PERF_TYPE_RAW:
+ /* The CPU measurement counter facility does not have overflow
+ * interrupts to do sampling. Sampling must be provided by
+ * external means, for example, by timers.
+ */
+ if (is_sampling_event(event))
+ return -ENOENT;
err = __hw_perf_event_init(event);
break;
default:
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c3ff70a..9c6e747 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -48,54 +48,6 @@ typedef struct
struct ucontext uc;
} rt_sigframe;
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
-{
- sigset_t blocked;
- siginitset(&blocked, mask);
- return sigsuspend(&blocked);
-}
-
-SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act,
- struct old_sigaction __user *, oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
- __get_user(mask, &act->sa_mask))
- return -EFAULT;
- siginitset(&new_ka.sa.sa_mask, mask);
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
- return -EFAULT;
- }
-
- return ret;
-}
-
-SYSCALL_DEFINE2(sigaltstack, const stack_t __user *, uss,
- stack_t __user *, uoss)
-{
- struct pt_regs *regs = task_pt_regs(current);
- return do_sigaltstack(uss, uoss, regs->gprs[15]);
-}
-
/* Returns non-zero on fault. */
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
@@ -190,8 +142,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
set_current_blocked(&set);
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL,
- regs->gprs[15]) == -EFAULT)
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return regs->gprs[2];
badframe:
@@ -325,10 +276,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
- err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->gprs[15]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
err |= save_sigregs(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7433a2f..549c9d1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -365,16 +365,16 @@ void smp_emergency_stop(cpumask_t *cpumask)
u64 end;
int cpu;
- end = get_clock() + (1000000UL << 12);
+ end = get_tod_clock() + (1000000UL << 12);
for_each_cpu(cpu, cpumask) {
struct pcpu *pcpu = pcpu_devices + cpu;
set_bit(ec_stop_cpu, &pcpu->ec_mask);
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
0, NULL) == SIGP_CC_BUSY &&
- get_clock() < end)
+ get_tod_clock() < end)
cpu_relax();
}
- while (get_clock() < end) {
+ while (get_tod_clock() < end) {
for_each_cpu(cpu, cpumask)
if (pcpu_stopped(pcpu_devices + cpu))
cpumask_clear_cpu(cpu, cpumask);
@@ -694,7 +694,7 @@ static void __init smp_detect_cpus(void)
*/
static void __cpuinit smp_start_secondary(void *cpuvoid)
{
- S390_lowcore.last_update_clock = get_clock();
+ S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long) restart_stack;
S390_lowcore.restart_fn = (unsigned long) do_restart;
S390_lowcore.restart_data = 0;
@@ -947,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev,
unsigned int sequence;
do {
- now = get_clock();
+ now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 6a6c61f..aaac708 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -13,7 +13,7 @@ SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
SYSCALL(sys_fork,sys_fork,sys_fork)
SYSCALL(sys_read,sys_read,sys32_read_wrapper)
SYSCALL(sys_write,sys_write,sys32_write_wrapper)
-SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
+SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */
SYSCALL(sys_close,sys_close,sys32_close_wrapper)
SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
@@ -75,7 +75,7 @@ SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
SYSCALL(sys_getppid,sys_getppid,sys_getppid)
SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
SYSCALL(sys_setsid,sys_setsid,sys_setsid)
-SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper)
+SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction)
NI_SYSCALL /* old sgetmask syscall*/
NI_SYSCALL /* old ssetmask syscall*/
SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */
@@ -112,8 +112,8 @@ SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */
NI_SYSCALL /* ioperm for i386 */
SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
-SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper)
-SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper) /* 105 */
+SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer)
+SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */
SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
@@ -122,7 +122,7 @@ SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 1
SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
NI_SYSCALL /* old "idle" system call */
NI_SYSCALL /* vm86old for i386 */
-SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
+SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
SYSCALL(sys_s390_ipc,sys_s390_ipc,sys32_ipc_wrapper)
@@ -134,7 +134,7 @@ SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper)
NI_SYSCALL /* modify_ldt for i386 */
SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
-SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
+SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask)
NI_SYSCALL /* old "create module" */
SYSCALL(sys_init_module,sys_init_module,sys_init_module_wrapper)
SYSCALL(sys_delete_module,sys_delete_module,sys_delete_module_wrapper)
@@ -169,7 +169,7 @@ SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_w
SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */
-SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper)
+SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */
@@ -182,19 +182,19 @@ SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old set
SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
-SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
-SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */
-SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
-SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper)
-SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper)
-SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend_wrapper)
+SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction)
+SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
+SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending)
+SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
+SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
+SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */
SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */
SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
-SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack_wrapper)
+SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
NI_SYSCALL /* streams1 */
NI_SYSCALL /* streams2 */
@@ -246,7 +246,7 @@ SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */
SYSCALL(sys_gettid,sys_gettid,sys_gettid)
SYSCALL(sys_tkill,sys_tkill,sys_tkill_wrapper)
-SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper)
+SYSCALL(sys_futex,sys_futex,compat_sys_futex)
SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */
SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill_wrapper)
@@ -289,14 +289,14 @@ SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper)
SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl_wrapper) /* 280 */
-SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper)
+SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid)
SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper)
SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper)
SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,sys_inotify_add_watch_wrapper) /* 285 */
SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,sys_inotify_rm_watch_wrapper)
NI_SYSCALL /* 287 sys_migrate_pages */
-SYSCALL(sys_openat,sys_openat,compat_sys_openat_wrapper)
+SYSCALL(sys_openat,sys_openat,compat_sys_openat)
SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper)
SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */
SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper)
@@ -312,8 +312,8 @@ SYSCALL(sys_faccessat,sys_faccessat,sys_faccessat_wrapper) /* 300 */
SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6_wrapper)
SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll_wrapper)
SYSCALL(sys_unshare,sys_unshare,sys_unshare_wrapper)
-SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list_wrapper)
-SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapper)
+SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list)
+SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
@@ -328,8 +328,8 @@ SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
NI_SYSCALL /* 317 old sys_timer_fd */
SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper)
SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper)
-SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime_wrapper) /* 320 */
-SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime_wrapper)
+SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
+SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4_wrapper)
SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper)
SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper)
@@ -338,13 +338,13 @@ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper)
SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
-SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
+SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
-SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper)
+SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a5f4f5a..876546b 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace __kprobes sched_clock(void)
{
- return tod_to_ns(get_clock_monotonic());
+ return tod_to_ns(get_tod_clock_monotonic());
}
/*
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
do_div(nsecs, 125);
S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+ /* Program the maximum value if we have an overflow (== year 2042) */
+ if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+ S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
@@ -191,7 +194,7 @@ static void stp_reset(void);
void read_persistent_clock(struct timespec *ts)
{
- tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
+ tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
}
void read_boot_clock(struct timespec *ts)
@@ -201,7 +204,7 @@ void read_boot_clock(struct timespec *ts)
static cycle_t read_tod_clock(struct clocksource *cs)
{
- return get_clock();
+ return get_tod_clock();
}
static struct clocksource clocksource_tod = {
@@ -339,7 +342,7 @@ int get_sync_clock(unsigned long long *clock)
sw_ptr = &get_cpu_var(clock_sync_word);
sw0 = atomic_read(sw_ptr);
- *clock = get_clock();
+ *clock = get_tod_clock();
sw1 = atomic_read(sw_ptr);
put_cpu_var(clock_sync_word);
if (sw0 == sw1 && (sw0 & 0x80000000U))
@@ -483,7 +486,7 @@ static void etr_reset(void)
.p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
.es = 0, .sl = 0 };
if (etr_setr(&etr_eacr) == 0) {
- etr_tolec = get_clock();
+ etr_tolec = get_tod_clock();
set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
if (etr_port0_online && etr_port1_online)
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
@@ -765,8 +768,8 @@ static int etr_sync_clock(void *data)
__ctl_set_bit(14, 21);
__ctl_set_bit(0, 29);
clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
- old_clock = get_clock();
- if (set_clock(clock) == 0) {
+ old_clock = get_tod_clock();
+ if (set_tod_clock(clock) == 0) {
__udelay(1); /* Wait for the clock to start. */
__ctl_clear_bit(0, 29);
__ctl_clear_bit(14, 21);
@@ -842,7 +845,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
* assume that this can have caused an stepping
* port switch.
*/
- etr_tolec = get_clock();
+ etr_tolec = get_tod_clock();
eacr.p0 = etr_port0_online;
if (!eacr.p0)
eacr.e0 = 0;
@@ -855,7 +858,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
* assume that this can have caused an stepping
* port switch.
*/
- etr_tolec = get_clock();
+ etr_tolec = get_tod_clock();
eacr.p1 = etr_port1_online;
if (!eacr.p1)
eacr.e1 = 0;
@@ -971,7 +974,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
etr_eacr = eacr;
etr_setr(&etr_eacr);
if (dp_changed)
- etr_tolec = get_clock();
+ etr_tolec = get_tod_clock();
}
/*
@@ -1009,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work)
/* Store aib to get the current ETR status word. */
BUG_ON(etr_stetr(&aib) != 0);
etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
- now = get_clock();
+ now = get_tod_clock();
/*
* Update the port information if the last stepping port change
@@ -1534,10 +1537,10 @@ static int stp_sync_clock(void *data)
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
- old_clock = get_clock();
+ old_clock = get_tod_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
if (rc == 0) {
- delta = adjust_time(old_clock, get_clock(), 0);
+ delta = adjust_time(old_clock, get_tod_clock(), 0);
fixup_clock_comparator(delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 70ecfc5..13dd63f 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -271,7 +271,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules();
show_regs(regs);
bust_spinlocks(0);
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 79cb51a..35b13ed 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -75,6 +75,10 @@ SECTIONS
EXIT_TEXT
}
+ .exit.data : {
+ EXIT_DATA
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b6..a0042ac 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk)
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void vtime_account(struct task_struct *tsk)
+void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system;
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk)
virt_timer_forward(system);
}
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
void vtime_account_system(struct task_struct *tsk)
-__attribute__((alias("vtime_account")));
+__attribute__((alias("vtime_account_irq_enter")));
EXPORT_SYMBOL_GPL(vtime_account_system);
void __kprobes vtime_stop_cpu(void)
@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
unsigned int sequence;
do {
- now = get_clock();
+ now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index b58dd86..60f9f8a 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -18,7 +18,7 @@ if VIRTUALIZATION
config KVM
def_tristate y
prompt "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && EXPERIMENTAL
+ depends on HAVE_KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 22798ec..f26ff1e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -26,27 +26,20 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
- ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
u64 useraddr;
int reg, rc;
vcpu->stat.instruction_lctlg++;
- if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
- return -EOPNOTSUPP;
- useraddr = disp2;
- if (base2)
- useraddr += vcpu->run->s.regs.gprs[base2];
+ useraddr = kvm_s390_get_base_disp_rsy(vcpu);
if (useraddr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
reg = reg1;
- VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
- disp2);
+ VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
+ useraddr);
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
do {
@@ -68,23 +61,19 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 useraddr;
u32 val = 0;
int reg, rc;
vcpu->stat.instruction_lctl++;
- useraddr = disp2;
- if (base2)
- useraddr += vcpu->run->s.regs.gprs[base2];
+ useraddr = kvm_s390_get_base_disp_rs(vcpu);
if (useraddr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
- disp2);
+ VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
+ useraddr);
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
reg = reg1;
@@ -104,14 +93,31 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
return 0;
}
-static intercept_handler_t instruction_handlers[256] = {
+static const intercept_handler_t eb_handlers[256] = {
+ [0x2f] = handle_lctlg,
+ [0x8a] = kvm_s390_handle_priv_eb,
+};
+
+static int handle_eb(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
+ if (handler)
+ return handler(vcpu);
+ return -EOPNOTSUPP;
+}
+
+static const intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
+ [0x82] = kvm_s390_handle_lpsw,
[0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb7] = handle_lctl,
+ [0xb9] = kvm_s390_handle_b9,
[0xe5] = kvm_s390_handle_e5,
- [0xeb] = handle_lctlg,
+ [0xeb] = handle_eb,
};
static int handle_noop(struct kvm_vcpu *vcpu)
@@ -258,6 +264,7 @@ static const intercept_handler_t intercept_funcs[] = {
[0x0C >> 2] = handle_instruction_and_prog,
[0x10 >> 2] = handle_noop,
[0x14 >> 2] = handle_noop,
+ [0x18 >> 2] = handle_noop,
[0x1C >> 2] = kvm_s390_handle_wait,
[0x20 >> 2] = handle_validity,
[0x28 >> 2] = handle_stop,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 82c481d..37116a7 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -21,11 +21,31 @@
#include "gaccess.h"
#include "trace-s390.h"
+#define IOINT_SCHID_MASK 0x0000ffff
+#define IOINT_SSID_MASK 0x00030000
+#define IOINT_CSSID_MASK 0x03fc0000
+#define IOINT_AI_MASK 0x04000000
+
+static int is_ioint(u64 type)
+{
+ return ((type & 0xfffe0000u) != 0xfffe0000u);
+}
+
static int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
}
+static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
+}
+
+static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
+}
+
static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
@@ -35,6 +55,13 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
return 1;
}
+static u64 int_word_to_isc_bits(u32 int_word)
+{
+ u8 isc = (int_word & 0x38000000) >> 27;
+
+ return (0x80 >> isc) << 24;
+}
+
static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info *inti)
{
@@ -67,7 +94,22 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
case KVM_S390_SIGP_SET_PREFIX:
case KVM_S390_RESTART:
return 1;
+ case KVM_S390_MCHK:
+ if (psw_mchk_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
+ return 1;
+ return 0;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ if (psw_ioint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[6] &
+ int_word_to_isc_bits(inti->io.io_int_word))
+ return 1;
+ return 0;
default:
+ printk(KERN_WARNING "illegal interrupt type %llx\n",
+ inti->type);
BUG();
}
return 0;
@@ -93,6 +135,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000;
+ vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;
}
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -116,6 +159,18 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
case KVM_S390_SIGP_STOP:
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
break;
+ case KVM_S390_MCHK:
+ if (psw_mchk_disabled(vcpu))
+ vcpu->arch.sie_block->ictl |= ICTL_LPSW;
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR14;
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ if (psw_ioint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR6;
+ break;
default:
BUG();
}
@@ -297,6 +352,73 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
exception = 1;
break;
+ case KVM_S390_MCHK:
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ inti->mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ inti->mchk.cr14,
+ inti->mchk.mcic);
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u64(vcpu, __LC_MCCK_CODE, inti->mchk.mcic);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_MCK_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ {
+ __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr;
+ __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word;
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
+ param0, param1);
+ rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_ID,
+ inti->io.subchannel_id);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u16(vcpu, __LC_SUBCHANNEL_NR,
+ inti->io.subchannel_nr);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_IO_INT_PARM,
+ inti->io.io_int_parm);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_IO_INT_WORD,
+ inti->io.io_int_word);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_IO_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+ }
default:
BUG();
}
@@ -362,7 +484,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
}
if ((!rc) && (vcpu->arch.sie_block->ckc <
- get_clock() + vcpu->arch.sie_block->epoch)) {
+ get_tod_clock() + vcpu->arch.sie_block->epoch)) {
if ((!psw_extint_disabled(vcpu)) &&
(vcpu->arch.sie_block->gcr[0] & 0x800ul))
rc = 1;
@@ -402,7 +524,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
goto no_timer;
}
- now = get_clock() + vcpu->arch.sie_block->epoch;
+ now = get_tod_clock() + vcpu->arch.sie_block->epoch;
if (vcpu->arch.sie_block->ckc < now) {
__unset_cpu_idle(vcpu);
return 0;
@@ -492,7 +614,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
}
if ((vcpu->arch.sie_block->ckc <
- get_clock() + vcpu->arch.sie_block->epoch))
+ get_tod_clock() + vcpu->arch.sie_block->epoch))
__try_deliver_ckc_interrupt(vcpu);
if (atomic_read(&fi->active)) {
@@ -518,6 +640,61 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
}
}
+void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
+ struct kvm_s390_interrupt_info *n, *inti = NULL;
+ int deliver;
+
+ __reset_intercept_indicators(vcpu);
+ if (atomic_read(&li->active)) {
+ do {
+ deliver = 0;
+ spin_lock_bh(&li->lock);
+ list_for_each_entry_safe(inti, n, &li->list, list) {
+ if ((inti->type == KVM_S390_MCHK) &&
+ __interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&li->list))
+ atomic_set(&li->active, 0);
+ spin_unlock_bh(&li->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+
+ if (atomic_read(&fi->active)) {
+ do {
+ deliver = 0;
+ spin_lock(&fi->lock);
+ list_for_each_entry_safe(inti, n, &fi->list, list) {
+ if ((inti->type == KVM_S390_MCHK) &&
+ __interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&fi->list))
+ atomic_set(&fi->active, 0);
+ spin_unlock(&fi->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+}
+
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -540,12 +717,50 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
return 0;
}
+struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ u64 cr6, u64 schid)
+{
+ struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_interrupt_info *inti, *iter;
+
+ if ((!schid && !cr6) || (schid && cr6))
+ return NULL;
+ mutex_lock(&kvm->lock);
+ fi = &kvm->arch.float_int;
+ spin_lock(&fi->lock);
+ inti = NULL;
+ list_for_each_entry(iter, &fi->list, list) {
+ if (!is_ioint(iter->type))
+ continue;
+ if (cr6 &&
+ ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
+ continue;
+ if (schid) {
+ if (((schid & 0x00000000ffff0000) >> 16) !=
+ iter->io.subchannel_id)
+ continue;
+ if ((schid & 0x000000000000ffff) !=
+ iter->io.subchannel_nr)
+ continue;
+ }
+ inti = iter;
+ break;
+ }
+ if (inti)
+ list_del_init(&inti->list);
+ if (list_empty(&fi->list))
+ atomic_set(&fi->active, 0);
+ spin_unlock(&fi->lock);
+ mutex_unlock(&kvm->lock);
+ return inti;
+}
+
int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_interrupt_info *inti, *iter;
int sigcpu;
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -569,6 +784,29 @@ int kvm_s390_inject_vm(struct kvm *kvm,
case KVM_S390_SIGP_STOP:
case KVM_S390_INT_EXTERNAL_CALL:
case KVM_S390_INT_EMERGENCY:
+ kfree(inti);
+ return -EINVAL;
+ case KVM_S390_MCHK:
+ VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
+ s390int->parm64);
+ inti->type = s390int->type;
+ inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
+ inti->mchk.mcic = s390int->parm64;
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ if (s390int->type & IOINT_AI_MASK)
+ VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
+ else
+ VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
+ s390int->type & IOINT_CSSID_MASK,
+ s390int->type & IOINT_SSID_MASK,
+ s390int->type & IOINT_SCHID_MASK);
+ inti->type = s390int->type;
+ inti->io.subchannel_id = s390int->parm >> 16;
+ inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
+ inti->io.io_int_parm = s390int->parm64 >> 32;
+ inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
+ break;
default:
kfree(inti);
return -EINVAL;
@@ -579,7 +817,22 @@ int kvm_s390_inject_vm(struct kvm *kvm,
mutex_lock(&kvm->lock);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
- list_add_tail(&inti->list, &fi->list);
+ if (!is_ioint(inti->type))
+ list_add_tail(&inti->list, &fi->list);
+ else {
+ u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
+
+ /* Keep I/O interrupts sorted in isc order. */
+ list_for_each_entry(iter, &fi->list, list) {
+ if (!is_ioint(iter->type))
+ continue;
+ if (int_word_to_isc_bits(iter->io.io_int_word)
+ <= isc_bits)
+ continue;
+ break;
+ }
+ list_add_tail(&inti->list, &iter->list);
+ }
atomic_set(&fi->active, 1);
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
@@ -651,8 +904,15 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
inti->type = s390int->type;
inti->emerg.code = s390int->parm;
break;
+ case KVM_S390_MCHK:
+ VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+ s390int->parm64);
+ inti->type = s390int->type;
+ inti->mchk.mcic = s390int->parm64;
+ break;
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
default:
kfree(inti);
return -EINVAL;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f090e81..4cf35a0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -140,6 +140,8 @@ int kvm_dev_ioctl_check_extension(long ext)
#endif
case KVM_CAP_SYNC_REGS:
case KVM_CAP_ONE_REG:
+ case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_S390_CSS_SUPPORT:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
@@ -147,7 +149,7 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_S390_COW:
- r = sclp_get_fac85() & 0x2;
+ r = MACHINE_HAS_ESOP;
break;
default:
r = 0;
@@ -234,6 +236,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.gmap)
goto out_nogmap;
}
+
+ kvm->arch.css_support = 0;
+
return 0;
out_nogmap:
debug_unregister(kvm->arch.dbf);
@@ -659,6 +664,7 @@ rerun_vcpu:
case KVM_EXIT_INTR:
case KVM_EXIT_S390_RESET:
case KVM_EXIT_S390_UCONTROL:
+ case KVM_EXIT_S390_TSCH:
break;
default:
BUG();
@@ -766,6 +772,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
} else
prefix = 0;
+ /*
+ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
+ * copying in vcpu load/put. Lets update our copies before we save
+ * it into the save area
+ */
+ save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_access_regs(vcpu->run->s.regs.acrs);
+
if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
vcpu->arch.guest_fpregs.fprs, 128, prefix))
return -EFAULT;
@@ -810,6 +824,29 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r;
+
+ if (cap->flags)
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_S390_CSS_SUPPORT:
+ if (!vcpu->kvm->arch.css_support) {
+ vcpu->kvm->arch.css_support = 1;
+ trace_kvm_s390_enable_css(vcpu->kvm);
+ }
+ r = 0;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -896,6 +933,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_ENABLE_CAP:
+ {
+ struct kvm_enable_cap cap;
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ break;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -930,7 +976,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
@@ -960,7 +1006,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
int rc;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index d75bc5e..4d89d64 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -65,21 +65,67 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
vcpu->arch.sie_block->ihcpu = 0xffff;
}
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
+{
+ u32 base2 = vcpu->arch.sie_block->ipb >> 28;
+ u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+}
+
+static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+ u64 *address1, u64 *address2)
+{
+ u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
+ u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
+ u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
+ u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
+
+ *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
+ *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+}
+
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
+{
+ u32 base2 = vcpu->arch.sie_block->ipb >> 28;
+ u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+ ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
+ /* The displacement is a 20bit _SIGNED_ value */
+ if (disp2 & 0x80000)
+ disp2+=0xfff00000;
+
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
+}
+
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
+{
+ u32 base2 = vcpu->arch.sie_block->ipb >> 28;
+ u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+
+ return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+}
+
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
+void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
int kvm_s390_inject_vm(struct kvm *kvm,
struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt *s390int);
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
+struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ u64 cr6, u64 schid);
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d768906..0ef9894 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -18,23 +18,21 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
+#include <asm/ptrace.h>
+#include <asm/compat.h>
#include "gaccess.h"
#include "kvm-s390.h"
#include "trace.h"
static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
u32 address = 0;
u8 tmp;
vcpu->stat.instruction_spx++;
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */
if (operand2 & 3) {
@@ -67,15 +65,12 @@ out:
static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
u32 address;
vcpu->stat.instruction_stpx++;
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
/* must be word boundary */
if (operand2 & 3) {
@@ -100,15 +95,12 @@ out:
static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 useraddr;
int rc;
vcpu->stat.instruction_stap++;
- useraddr = disp2;
- if (base2)
- useraddr += vcpu->run->s.regs.gprs[base2];
+
+ useraddr = kvm_s390_get_base_disp_s(vcpu);
if (useraddr & 1) {
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -135,24 +127,96 @@ static int handle_skey(struct kvm_vcpu *vcpu)
return 0;
}
-static int handle_stsch(struct kvm_vcpu *vcpu)
+static int handle_tpi(struct kvm_vcpu *vcpu)
{
- vcpu->stat.instruction_stsch++;
- VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
- /* condition code 3 */
+ u64 addr;
+ struct kvm_s390_interrupt_info *inti;
+ int cc;
+
+ addr = kvm_s390_get_base_disp_s(vcpu);
+
+ inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
+ if (inti) {
+ if (addr) {
+ /*
+ * Store the two-word I/O interruption code into the
+ * provided area.
+ */
+ put_guest_u16(vcpu, addr, inti->io.subchannel_id);
+ put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr);
+ put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm);
+ } else {
+ /*
+ * Store the three-word I/O interruption code into
+ * the appropriate lowcore area.
+ */
+ put_guest_u16(vcpu, 184, inti->io.subchannel_id);
+ put_guest_u16(vcpu, 186, inti->io.subchannel_nr);
+ put_guest_u32(vcpu, 188, inti->io.io_int_parm);
+ put_guest_u32(vcpu, 192, inti->io.io_int_word);
+ }
+ cc = 1;
+ } else
+ cc = 0;
+ kfree(inti);
+ /* Set condition code and we're done. */
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
return 0;
}
-static int handle_chsc(struct kvm_vcpu *vcpu)
+static int handle_tsch(struct kvm_vcpu *vcpu)
{
- vcpu->stat.instruction_chsc++;
- VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
- /* condition code 3 */
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
- return 0;
+ struct kvm_s390_interrupt_info *inti;
+
+ inti = kvm_s390_get_io_int(vcpu->kvm, 0,
+ vcpu->run->s.regs.gprs[1]);
+
+ /*
+ * Prepare exit to userspace.
+ * We indicate whether we dequeued a pending I/O interrupt
+ * so that userspace can re-inject it if the instruction gets
+ * a program check. While this may re-order the pending I/O
+ * interrupts, this is no problem since the priority is kept
+ * intact.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
+ vcpu->run->s390_tsch.dequeued = !!inti;
+ if (inti) {
+ vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
+ vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
+ vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
+ vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
+ }
+ vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
+ kfree(inti);
+ return -EREMOTE;
+}
+
+static int handle_io_inst(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
+
+ if (vcpu->kvm->arch.css_support) {
+ /*
+ * Most I/O instructions will be handled by userspace.
+ * Exceptions are tpi and the interrupt portion of tsch.
+ */
+ if (vcpu->arch.sie_block->ipa == 0xb236)
+ return handle_tpi(vcpu);
+ if (vcpu->arch.sie_block->ipa == 0xb235)
+ return handle_tsch(vcpu);
+ /* Handle in userspace. */
+ return -EOPNOTSUPP;
+ } else {
+ /*
+ * Set condition code 3 to stop the guest from issueing channel
+ * I/O instructions.
+ */
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ return 0;
+ }
}
static int handle_stfl(struct kvm_vcpu *vcpu)
@@ -176,17 +240,107 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
return 0;
}
+static void handle_new_psw(struct kvm_vcpu *vcpu)
+{
+ /* Check whether the new psw is enabled for machine checks. */
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
+ kvm_s390_deliver_pending_machine_checks(vcpu);
+}
+
+#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
+#define PSW_ADDR_24 0x00000000000fffffUL
+#define PSW_ADDR_31 0x000000007fffffffUL
+
+int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
+{
+ u64 addr;
+ psw_compat_t new_psw;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+
+ addr = kvm_s390_get_base_disp_s(vcpu);
+
+ if (addr & 7) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ if (!(new_psw.mask & PSW32_MASK_BASE)) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ vcpu->arch.sie_block->gpsw.mask =
+ (new_psw.mask & ~PSW32_MASK_BASE) << 32;
+ vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
+
+ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
+ (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
+ (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
+ ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+ PSW_MASK_EA)) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ handle_new_psw(vcpu);
+out:
+ return 0;
+}
+
+static int handle_lpswe(struct kvm_vcpu *vcpu)
+{
+ u64 addr;
+ psw_t new_psw;
+
+ addr = kvm_s390_get_base_disp_s(vcpu);
+
+ if (addr & 7) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
+ vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
+
+ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
+ (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+ PSW_MASK_BA) &&
+ (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
+ (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
+ (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
+ ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
+ PSW_MASK_EA)) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ handle_new_psw(vcpu);
+out:
+ return 0;
+}
+
static int handle_stidp(struct kvm_vcpu *vcpu)
{
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
int rc;
vcpu->stat.instruction_stidp++;
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
if (operand2 & 7) {
kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -240,17 +394,13 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u64 operand2;
unsigned long mem;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
- operand2 = disp2;
- if (base2)
- operand2 += vcpu->run->s.regs.gprs[base2];
+ operand2 = kvm_s390_get_base_disp_s(vcpu);
if (operand2 & 0xfff && fc > 0)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -297,7 +447,7 @@ out_fail:
return 0;
}
-static intercept_handler_t priv_handlers[256] = {
+static const intercept_handler_t b2_handlers[256] = {
[0x02] = handle_stidp,
[0x10] = handle_set_prefix,
[0x11] = handle_store_prefix,
@@ -305,10 +455,25 @@ static intercept_handler_t priv_handlers[256] = {
[0x29] = handle_skey,
[0x2a] = handle_skey,
[0x2b] = handle_skey,
- [0x34] = handle_stsch,
- [0x5f] = handle_chsc,
+ [0x30] = handle_io_inst,
+ [0x31] = handle_io_inst,
+ [0x32] = handle_io_inst,
+ [0x33] = handle_io_inst,
+ [0x34] = handle_io_inst,
+ [0x35] = handle_io_inst,
+ [0x36] = handle_io_inst,
+ [0x37] = handle_io_inst,
+ [0x38] = handle_io_inst,
+ [0x39] = handle_io_inst,
+ [0x3a] = handle_io_inst,
+ [0x3b] = handle_io_inst,
+ [0x3c] = handle_io_inst,
+ [0x5f] = handle_io_inst,
+ [0x74] = handle_io_inst,
+ [0x76] = handle_io_inst,
[0x7d] = handle_stsi,
[0xb1] = handle_stfl,
+ [0xb2] = handle_lpswe,
};
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
@@ -322,7 +487,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
* state bit and (a) handle the instruction or (b) send a code 2
* program check.
* Anything else goes to userspace.*/
- handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
if (handler) {
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu,
@@ -333,19 +498,74 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_epsw(struct kvm_vcpu *vcpu)
+{
+ int reg1, reg2;
+
+ reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
+ reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
+
+ /* This basically extracts the mask half of the psw. */
+ vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
+ vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
+ if (reg2) {
+ vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
+ vcpu->run->s.regs.gprs[reg2] |=
+ vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
+ }
+ return 0;
+}
+
+static const intercept_handler_t b9_handlers[256] = {
+ [0x8d] = handle_epsw,
+ [0x9c] = handle_io_inst,
+};
+
+int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ /* This is handled just as for the B2 instructions. */
+ handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ if (handler) {
+ if ((handler != handle_epsw) &&
+ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+ else
+ return handler(vcpu);
+ }
+ return -EOPNOTSUPP;
+}
+
+static const intercept_handler_t eb_handlers[256] = {
+ [0x8a] = handle_io_inst,
+};
+
+int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ /* All eb instructions that end up here are privileged. */
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+ handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
+ if (handler)
+ return handler(vcpu);
+ return -EOPNOTSUPP;
+}
+
static int handle_tprot(struct kvm_vcpu *vcpu)
{
- int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
- int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
- int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
- int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
- u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0;
- u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0;
+ u64 address1, address2;
struct vm_area_struct *vma;
unsigned long user_address;
vcpu->stat.instruction_tprot++;
+ kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
+
/* we only handle the Linux memory detection case:
* access key == 0
* guest DAT == off
@@ -405,7 +625,7 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
return 0;
}
-static intercept_handler_t x01_handlers[256] = {
+static const intercept_handler_t x01_handlers[256] = {
[0x07] = handle_sckpf,
};
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 566ddf6..1c48ab2 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -137,8 +137,10 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock);
- if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
+ if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ kfree(inti);
goto out;
+ }
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
@@ -324,8 +326,6 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u32 parameter;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
u8 order_code;
@@ -336,9 +336,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu,
PGM_PRIVILEGED_OPERATION);
- order_code = disp2;
- if (base2)
- order_code += vcpu->run->s.regs.gprs[base2];
+ order_code = kvm_s390_get_base_disp_rs(vcpu);
if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1];
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 90fdf85..13f30f5 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -141,13 +141,13 @@ TRACE_EVENT(kvm_s390_inject_vcpu,
* Trace point for the actual delivery of interrupts.
*/
TRACE_EVENT(kvm_s390_deliver_interrupt,
- TP_PROTO(unsigned int id, __u64 type, __u32 data0, __u64 data1),
+ TP_PROTO(unsigned int id, __u64 type, __u64 data0, __u64 data1),
TP_ARGS(id, type, data0, data1),
TP_STRUCT__entry(
__field(int, id)
__field(__u32, inttype)
- __field(__u32, data0)
+ __field(__u64, data0)
__field(__u64, data1)
),
@@ -159,7 +159,7 @@ TRACE_EVENT(kvm_s390_deliver_interrupt,
),
TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
- "data:%08x %016llx",
+ "data:%08llx %016llx",
__entry->id, __entry->inttype,
__print_symbolic(__entry->inttype, kvm_s390_int_type),
__entry->data0, __entry->data1)
@@ -204,6 +204,26 @@ TRACE_EVENT(kvm_s390_stop_request,
);
+/*
+ * Trace point for enabling channel I/O instruction support.
+ */
+TRACE_EVENT(kvm_s390_enable_css,
+ TP_PROTO(void *kvm),
+ TP_ARGS(kvm),
+
+ TP_STRUCT__entry(
+ __field(void *, kvm)
+ ),
+
+ TP_fast_assign(
+ __entry->kvm = kvm;
+ ),
+
+ TP_printk("enabling channel I/O support (kvm @ %p)\n",
+ __entry->kvm)
+ );
+
+
#endif /* _TRACE_KVMS390_H */
/* This part must be outside protection */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 42d0cf8..c61b9fa 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -32,7 +32,7 @@ static void __udelay_disabled(unsigned long long usecs)
unsigned long cr0, cr6, new;
u64 clock_saved, end;
- end = get_clock() + (usecs << 12);
+ end = get_tod_clock() + (usecs << 12);
clock_saved = local_tick_disable();
__ctl_store(cr0, 0, 0);
__ctl_store(cr6, 6, 6);
@@ -45,7 +45,7 @@ static void __udelay_disabled(unsigned long long usecs)
set_clock_comparator(end);
vtime_stop_cpu();
local_irq_disable();
- } while (get_clock() < end);
+ } while (get_tod_clock() < end);
lockdep_on();
__ctl_load(cr0, 0, 0);
__ctl_load(cr6, 6, 6);
@@ -56,7 +56,7 @@ static void __udelay_enabled(unsigned long long usecs)
{
u64 clock_saved, end;
- end = get_clock() + (usecs << 12);
+ end = get_tod_clock() + (usecs << 12);
do {
clock_saved = 0;
if (end < S390_lowcore.clock_comparator) {
@@ -67,7 +67,7 @@ static void __udelay_enabled(unsigned long long usecs)
local_irq_disable();
if (clock_saved)
local_tick_enable(clock_saved);
- } while (get_clock() < end);
+ } while (get_tod_clock() < end);
}
/*
@@ -111,8 +111,8 @@ void udelay_simple(unsigned long long usecs)
{
u64 end;
- end = get_clock() + (usecs << 12);
- while (get_clock() < end)
+ end = get_tod_clock() + (usecs << 12);
+ while (get_tod_clock() < end)
cpu_relax();
}
@@ -122,10 +122,10 @@ void __ndelay(unsigned long long nsecs)
nsecs <<= 9;
do_div(nsecs, 125);
- end = get_clock() + nsecs;
+ end = get_tod_clock() + nsecs;
if (nsecs & ~0xfffUL)
__udelay(nsecs >> 12);
- while (get_clock() < end)
+ while (get_tod_clock() < end)
barrier();
}
EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 9017a63..a70ee84 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -50,7 +50,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm,
ptep = pte_offset_map(pmd, addr);
if (!pte_present(*ptep))
return -0x11UL;
- if (write && !pte_write(*ptep))
+ if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
return -0x04UL;
return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ae672f4..49ce6bb2 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -228,4 +228,16 @@ int arch_add_memory(int nid, u64 start, u64 size)
vmem_remove_mapping(start, size);
return rc;
}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
+{
+ /*
+ * There is no hardware or firmware interface which could trigger a
+ * hot memory remove on s390. So there is nothing that needs to be
+ * implemented.
+ */
+ return -EBUSY;
+}
+#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index c59a5ef..06bafec 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,12 +101,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
#else
-int s390_mmap_check(unsigned long addr, unsigned long len)
+int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
int rc;
- if (!is_compat_task() &&
- len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
+ if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+ return 0;
+ if (!(flags & MAP_FIXED))
+ addr = 0;
+ if ((addr + len) >= TASK_SIZE) {
rc = crst_table_upgrade(current->mm, 1UL << 53);
if (rc)
return rc;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 29ccee3..d21040e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -127,7 +127,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pte_val(*pte) = _PAGE_TYPE_EMPTY;
continue;
}
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+ pte_val(*pte) = __pa(address);
}
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 6ed1426..e21aaf4 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -85,11 +85,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
int ret = -ENOMEM;
while (address < end) {
- pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -101,9 +99,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
- pte_val(pte) |= _REGION3_ENTRY_LARGE;
- pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
- pud_val(*pu_dir) = pte_val(pte);
+ pud_val(*pu_dir) = __pa(address) |
+ _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
+ (ro ? _REGION_ENTRY_RO : 0);
address += PUD_SIZE;
continue;
}
@@ -118,8 +116,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
- pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
- pmd_val(*pm_dir) = pte_val(pte);
+ pmd_val(*pm_dir) = __pa(address) |
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+ (ro ? _SEGMENT_ENTRY_RO : 0);
address += PMD_SIZE;
continue;
}
@@ -132,7 +131,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
pt_dir = pte_offset_kernel(pm_dir, address);
- *pt_dir = pte;
+ pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
address += PAGE_SIZE;
}
ret = 0;
@@ -199,7 +198,6 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
- pte_t pte;
int ret = -ENOMEM;
start_addr = (unsigned long) start;
@@ -237,9 +235,8 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
new_page = vmemmap_alloc_block(PMD_SIZE, node);
if (!new_page)
goto out;
- pte = mk_pte_phys(__pa(new_page), PAGE_RW);
- pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
- pmd_val(*pm_dir) = pte_val(pte);
+ pmd_val(*pm_dir) = __pa(new_page) |
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@@ -260,8 +257,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
- pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
- *pt_dir = pte;
+ pte_val(*pt_dir) = __pa(new_page);
}
address += PAGE_SIZE;
}
@@ -272,6 +268,10 @@ out:
return ret;
}
+void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+{
+}
+
/*
* Add memory segment to the segment list if it doesn't overlap with
* an already present segment.
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bb28441..0972e91 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -7,6 +7,7 @@
*/
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/filter.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
@@ -254,6 +255,8 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
case BPF_S_ANC_HATYPE:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_CPU:
+ case BPF_S_ANC_VLAN_TAG:
+ case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_RET_K:
/* first instruction sets A register */
break;
@@ -699,6 +702,24 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* l %r5,<d(rxhash)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
break;
+ case BPF_S_ANC_VLAN_TAG:
+ case BPF_S_ANC_VLAN_TAG_PRESENT:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+ BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+ /* lhi %r5,0 */
+ EMIT4(0xa7580000);
+ /* icm %r5,3,<d(vlan_tci)>(%r2) */
+ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
+ if (filter->code == BPF_S_ANC_VLAN_TAG) {
+ /* nill %r5,0xefff */
+ EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
+ } else {
+ /* nill %r5,0x1000 */
+ EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
+ /* srl %r5,12 */
+ EMIT4_DISP(0x88500000, 12);
+ }
+ break;
case BPF_S_ANC_CPU: /* A = smp_processor_id() */
#ifdef CONFIG_SMP
/* l %r5,<d(cpu_nr)> */
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 60e0372..27b4c17 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -51,8 +51,7 @@ EXPORT_SYMBOL_GPL(zpci_list);
DEFINE_MUTEX(zpci_list_lock);
EXPORT_SYMBOL_GPL(zpci_list_lock);
-struct pci_hp_callback_ops hotplug_ops;
-EXPORT_SYMBOL_GPL(hotplug_ops);
+static struct pci_hp_callback_ops *hotplug_ops;
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
static DEFINE_SPINLOCK(zpci_domain_lock);
@@ -974,8 +973,8 @@ int zpci_create_device(struct zpci_dev *zdev)
mutex_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
- if (hotplug_ops.create_slot)
- hotplug_ops.create_slot(zdev);
+ if (hotplug_ops)
+ hotplug_ops->create_slot(zdev);
mutex_unlock(&zpci_list_lock);
if (zdev->state == ZPCI_FN_STATE_STANDBY)
@@ -989,8 +988,8 @@ int zpci_create_device(struct zpci_dev *zdev)
out_start:
mutex_lock(&zpci_list_lock);
list_del(&zdev->entry);
- if (hotplug_ops.remove_slot)
- hotplug_ops.remove_slot(zdev);
+ if (hotplug_ops)
+ hotplug_ops->remove_slot(zdev);
mutex_unlock(&zpci_list_lock);
out_bus:
zpci_free_domain(zdev);
@@ -1072,13 +1071,29 @@ static void zpci_mem_exit(void)
kmem_cache_destroy(zdev_fmb_cache);
}
-unsigned int pci_probe = 1;
-EXPORT_SYMBOL_GPL(pci_probe);
+void zpci_register_hp_ops(struct pci_hp_callback_ops *ops)
+{
+ mutex_lock(&zpci_list_lock);
+ hotplug_ops = ops;
+ mutex_unlock(&zpci_list_lock);
+}
+EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
+
+void zpci_deregister_hp_ops(void)
+{
+ mutex_lock(&zpci_list_lock);
+ hotplug_ops = NULL;
+ mutex_unlock(&zpci_list_lock);
+}
+EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
+
+unsigned int s390_pci_probe = 1;
+EXPORT_SYMBOL_GPL(s390_pci_probe);
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
- pci_probe = 0;
+ s390_pci_probe = 0;
return NULL;
}
return str;
@@ -1088,7 +1103,7 @@ static int __init pci_base_init(void)
{
int rc;
- if (!pci_probe)
+ if (!s390_pci_probe)
return 0;
if (!test_facility(2) || !test_facility(69)
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 2c847143..f339fe2f 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -19,25 +19,25 @@
* Call Logical Processor
* Retry logic is handled by the caller.
*/
-static inline u8 clp_instr(void *req)
+static inline u8 clp_instr(void *data)
{
- u64 ilpm;
+ struct { u8 _[CLP_BLK_SIZE]; } *req = data;
+ u64 ignored;
u8 cc;
asm volatile (
- " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
+ " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
- : [cc] "=d" (cc), [ilpm] "=d" (ilpm)
+ : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req)
: [req] "a" (req)
- : "cc", "memory");
+ : "cc");
return cc;
}
static void *clp_alloc_block(void)
{
- struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
- return (page) ? page_address(page) : NULL;
+ return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
}
static void clp_free_block(void *ptr)
OpenPOWER on IntegriCloud