diff options
Diffstat (limited to 'arch/s390')
68 files changed, 1050 insertions, 955 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 32425af..f09ae7b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,88 +60,88 @@ config PCI_QUIRKS config S390 def_bool y - select USE_GENERIC_SMP_HELPERS if SMP - select GENERIC_CPU_DEVICES if !SMP - select HAVE_SYSCALL_WRAPPERS - select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST - select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_C_RECORDMCOUNT - select HAVE_SYSCALL_TRACEPOINTS - select SYSCTL_EXCEPTION_TRACE - select HAVE_DYNAMIC_FTRACE - select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_OPROFILE - select HAVE_KPROBES - select HAVE_KRETPROBES - select HAVE_KVM if 64BIT - select HAVE_ARCH_TRACEHOOK - select INIT_ALL_POSSIBLE - select HAVE_IRQ_WORK - select HAVE_PERF_EVENTS - select ARCH_HAVE_NMI_SAFE_CMPXCHG - select HAVE_DEBUG_KMEMLEAK - select HAVE_KERNEL_GZIP - select HAVE_KERNEL_BZIP2 - select HAVE_KERNEL_LZMA - select HAVE_KERNEL_LZO - select HAVE_KERNEL_XZ - select HAVE_ARCH_MUTEX_CPU_RELAX - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 - select HAVE_BPF_JIT if 64BIT && PACK_STACK - select ARCH_SAVE_PAGE_KEYS if HIBERNATION - select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE - select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select HAVE_CMPXCHG_LOCAL - select HAVE_CMPXCHG_DOUBLE - select HAVE_ALIGNED_STRUCT_PAGE if SLUB - select HAVE_VIRT_CPU_ACCOUNTING - select VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK - select BUILDTIME_EXTABLE_SORT - select ARCH_INLINE_SPIN_TRYLOCK - select ARCH_INLINE_SPIN_TRYLOCK_BH - select ARCH_INLINE_SPIN_LOCK - select ARCH_INLINE_SPIN_LOCK_BH - select ARCH_INLINE_SPIN_LOCK_IRQ - select ARCH_INLINE_SPIN_LOCK_IRQSAVE - select ARCH_INLINE_SPIN_UNLOCK - select ARCH_INLINE_SPIN_UNLOCK_BH - select ARCH_INLINE_SPIN_UNLOCK_IRQ - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE - select ARCH_INLINE_READ_TRYLOCK + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH select ARCH_INLINE_READ_LOCK_IRQ select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_TRYLOCK select ARCH_INLINE_READ_UNLOCK select ARCH_INLINE_READ_UNLOCK_BH select ARCH_INLINE_READ_UNLOCK_IRQ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE - select ARCH_INLINE_WRITE_TRYLOCK + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE select ARCH_INLINE_WRITE_LOCK select ARCH_INLINE_WRITE_LOCK_BH select ARCH_INLINE_WRITE_LOCK_IRQ select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_TRYLOCK select ARCH_INLINE_WRITE_UNLOCK select ARCH_INLINE_WRITE_UNLOCK_BH select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE - select HAVE_UID16 if 32BIT + select ARCH_SAVE_PAGE_KEYS if HIBERNATION select ARCH_WANT_IPC_PARSE_VERSION - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT + select BUILDTIME_EXTABLE_SORT + select CLONE_BACKWARDS2 + select GENERIC_CLOCKEVENTS + select GENERIC_CPU_DEVICES if !SMP + select GENERIC_KERNEL_THREAD select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL_OLD - select GENERIC_CLOCKEVENTS - select KTIME_SCALAR if 32BIT + select HAVE_ALIGNED_STRUCT_PAGE if SLUB + select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 + select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_SECCOMP_FILTER - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT + select HAVE_BPF_JIT if 64BIT && PACK_STACK + select HAVE_CMPXCHG_DOUBLE + select HAVE_CMPXCHG_LOCAL + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZMA + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ + select HAVE_KPROBES + select HAVE_KRETPROBES + select HAVE_KVM if 64BIT + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC + select HAVE_OPROFILE + select HAVE_PERF_EVENTS + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_SYSCALL_WRAPPERS + select HAVE_UID16 if 32BIT + select HAVE_VIRT_CPU_ACCOUNTING + select INIT_ALL_POSSIBLE + select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS2 + select OLD_SIGSUSPEND3 + select OLD_SIGACTION + select SYSCTL_EXCEPTION_TRACE + select USE_GENERIC_SMP_HELPERS if SMP + select VIRT_CPU_ACCOUNTING config SCHED_OMIT_FRAME_POINTER def_bool y @@ -251,6 +251,7 @@ config COMPAT depends on 64BIT select COMPAT_BINFMT_ELF if BINFMT_ELF select ARCH_WANT_OLD_COMPAT_IPC + select COMPAT_OLD_SIGACTION help Select this option if you want to enable your system kernel to handle system-calls from ELF binaries for 31 bit ESA. This option @@ -720,8 +721,8 @@ source "arch/s390/kvm/Kconfig" config S390_GUEST def_bool y - prompt "s390 support for virtio devices (EXPERIMENTAL)" - depends on 64BIT && EXPERIMENTAL + prompt "s390 support for virtio devices" + depends on 64BIT select VIRTUALIZATION select VIRTIO select VIRTIO_CONSOLE diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 4b8e08b..7e3ce78 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -24,8 +24,8 @@ CHECKFLAGS += -D__s390__ -msize-long else LD_BFD := elf64-s390 LDFLAGS := -m elf64_s390 -KBUILD_AFLAGS_MODULE += -fpic -D__PIC__ -KBUILD_CFLAGS_MODULE += -fpic -D__PIC__ +KBUILD_AFLAGS_MODULE += -fPIC +KBUILD_CFLAGS_MODULE += -fPIC KBUILD_CFLAGS += -m64 KBUILD_AFLAGS += -m64 UTS_MACHINE := s390x diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 02d9a1c..7ef60b5 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c @@ -108,7 +108,7 @@ static void appldata_get_mem_data(void *data) mem_data->totalswap = P2K(val.totalswap); mem_data->freeswap = P2K(val.freeswap); - mem_data->timestamp = get_clock(); + mem_data->timestamp = get_tod_clock(); mem_data->sync_count_2++; } diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 1370e35..2d224b9 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -111,7 +111,7 @@ static void appldata_get_net_sum_data(void *data) net_data->tx_dropped = tx_dropped; net_data->collisions = collisions; - net_data->timestamp = get_clock(); + net_data->timestamp = get_tod_clock(); net_data->sync_count_2++; } diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 87521ba..de8e2b3 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -156,7 +156,7 @@ static void appldata_get_os_data(void *data) } ops.size = new_size; } - os_data->timestamp = get_clock(); + os_data->timestamp = get_tod_clock(); os_data->sync_count_2++; } diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 4f6afaa..f364dcf 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -245,7 +245,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size) d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); if (IS_ERR(d2fc)) return PTR_ERR(d2fc); - get_clock_ext(d2fc->hdr.tod_ext); + get_tod_clock_ext(d2fc->hdr.tod_ext); d2fc->hdr.len = count * sizeof(struct diag2fc_data); d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; d2fc->hdr.count = count; diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 10a5088..16760ee 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -13,15 +13,12 @@ * to devices. */ -static inline void mb(void) -{ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - /* Fast-BCR without checkpoint synchronization */ - asm volatile("bcr 14,0" : : : "memory"); +/* Fast-BCR without checkpoint synchronization */ +#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) #else - asm volatile("bcr 15,0" : : : "memory"); +#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) #endif -} #define rmb() mb() #define wmb() mb() diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h index 6c3aecc..a0e71a5 100644 --- a/arch/s390/include/asm/clp.h +++ b/arch/s390/include/asm/clp.h @@ -2,7 +2,7 @@ #define _ASM_S390_CLP_H /* CLP common request & response block size */ -#define CLP_BLK_SIZE (PAGE_SIZE * 2) +#define CLP_BLK_SIZE PAGE_SIZE struct clp_req_hdr { u16 len; diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 18cd6b5..f8c6df6 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -7,6 +7,9 @@ #include <linux/sched.h> #include <linux/thread_info.h> +#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) +#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)) + #define PSW32_MASK_PER 0x40000000UL #define PSW32_MASK_DAT 0x04000000UL #define PSW32_MASK_IO 0x02000000UL diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 35f0020..f1eddd1 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -34,12 +34,12 @@ /* CPU measurement facility support */ static inline int cpum_cf_avail(void) { - return MACHINE_HAS_SPP && test_facility(67); + return MACHINE_HAS_LPP && test_facility(67); } static inline int cpum_sf_avail(void) { - return MACHINE_HAS_SPP && test_facility(68); + return MACHINE_HAS_LPP && test_facility(68); } diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 8a32f7d..9411db65 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -19,9 +19,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) } extern int dma_set_mask(struct device *dev, u64 mask); -extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); + +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ +} #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h index de015d8..bb9bdcd 100644 --- a/arch/s390/include/asm/dma.h +++ b/arch/s390/include/asm/dma.h @@ -10,4 +10,10 @@ */ #define MAX_DMA_ADDRESS 0x80000000 +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + #endif /* _ASM_S390_DMA_H */ diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 16c3eb1..27cb321 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -85,6 +85,11 @@ static inline void iounmap(volatile void __iomem *addr) #define __raw_writel zpci_write_u32 #define __raw_writeq zpci_write_u64 +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl +#define readq_relaxed readq + #endif /* CONFIG_PCI */ #include <asm-generic/io.h> diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index aa6d0d7..87c17bf 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -2,44 +2,62 @@ #define _ASM_IRQ_H #include <linux/hardirq.h> +#include <linux/percpu.h> +#include <linux/cache.h> #include <linux/types.h> -enum interruption_class { +enum interruption_main_class { EXTERNAL_INTERRUPT, IO_INTERRUPT, - EXTINT_CLK, - EXTINT_EXC, - EXTINT_EMS, - EXTINT_TMR, - EXTINT_TLA, - EXTINT_PFL, - EXTINT_DSD, - EXTINT_VRT, - EXTINT_SCP, - EXTINT_IUC, - EXTINT_CMS, - EXTINT_CMC, - EXTINT_CMR, - IOINT_CIO, - IOINT_QAI, - IOINT_DAS, - IOINT_C15, - IOINT_C70, - IOINT_TAP, - IOINT_VMR, - IOINT_LCS, - IOINT_CLW, - IOINT_CTC, - IOINT_APB, - IOINT_ADM, - IOINT_CSC, - IOINT_PCI, - IOINT_MSI, - IOINT_VIR, + NR_IRQS +}; + +enum interruption_class { + IRQEXT_CLK, + IRQEXT_EXC, + IRQEXT_EMS, + IRQEXT_TMR, + IRQEXT_TLA, + IRQEXT_PFL, + IRQEXT_DSD, + IRQEXT_VRT, + IRQEXT_SCP, + IRQEXT_IUC, + IRQEXT_CMS, + IRQEXT_CMC, + IRQEXT_CMR, + IRQIO_CIO, + IRQIO_QAI, + IRQIO_DAS, + IRQIO_C15, + IRQIO_C70, + IRQIO_TAP, + IRQIO_VMR, + IRQIO_LCS, + IRQIO_CLW, + IRQIO_CTC, + IRQIO_APB, + IRQIO_ADM, + IRQIO_CSC, + IRQIO_PCI, + IRQIO_MSI, + IRQIO_VIR, NMI_NMI, - NR_IRQS, + CPU_RST, + NR_ARCH_IRQS }; +struct irq_stat { + unsigned int irqs[NR_ARCH_IRQS]; +}; + +DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); + +static __always_inline void inc_irq_stat(enum interruption_class irq) +{ + __get_cpu_var(irq_stat).irqs[irq]++; +} + struct ext_code { unsigned short subcode; unsigned short code; diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 0e47a57..9977e08 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -9,7 +9,7 @@ #include <uapi/asm/mman.h> #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) -int s390_mmap_check(unsigned long addr, unsigned long len); -#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) +int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); +#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) #endif #endif /* __S390_MMAN_H__ */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a86ad40840..75ce9b0 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -155,28 +155,6 @@ static inline int page_reset_referenced(unsigned long addr) #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ /* - * Test and clear dirty bit in storage key. - * We can't clear the changed bit atomically. This is a potential - * race against modification of the referenced bit. This function - * should therefore only be called if it is not mapped in any - * address space. - * - * Note that the bit gets set whenever page content is changed. That means - * also when the page is modified by DMA or from inside the kernel. - */ -#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) -{ - unsigned char skey; - - skey = page_get_storage_key(pfn << PAGE_SHIFT); - if (!(skey & _PAGE_CHANGED)) - return 0; - page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped); - return 1; -} - -/* * Test and clear referenced bit in storage key. */ #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index a6175ad..05333b7 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -9,6 +9,7 @@ #include <asm-generic/pci.h> #include <asm-generic/pci-dma-compat.h> #include <asm/pci_clp.h> +#include <asm/pci_debug.h> #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 @@ -33,6 +34,25 @@ int pci_proc_domain(struct pci_bus *); #define ZPCI_FC_BLOCKED 0x20 #define ZPCI_FC_DMA_ENABLED 0x10 +struct zpci_fmb { + u32 format : 8; + u32 dma_valid : 1; + u32 : 23; + u32 samples; + u64 last_update; + /* hardware counters */ + u64 ld_ops; + u64 st_ops; + u64 stb_ops; + u64 rpcit_ops; + u64 dma_rbytes; + u64 dma_wbytes; + /* software counters */ + atomic64_t allocated_pages; + atomic64_t mapped_pages; + atomic64_t unmapped_pages; +} __packed __aligned(16); + struct msi_map { unsigned long irq; struct msi_desc *msi; @@ -92,7 +112,15 @@ struct zpci_dev { u64 end_dma; /* End of available DMA addresses */ u64 dma_mask; /* DMA address space mask */ + /* Function measurement block */ + struct zpci_fmb *fmb; + u16 fmb_update; /* update interval */ + enum pci_bus_speed max_bus_speed; + + struct dentry *debugfs_dev; + struct dentry *debugfs_perf; + struct dentry *debugfs_debug; }; struct pci_hp_callback_ops { @@ -132,9 +160,14 @@ void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *); int zpci_msihash_init(void); void zpci_msihash_exit(void); +#ifdef CONFIG_PCI /* Error handling and recovery */ void zpci_event_error(void *); void zpci_event_availability(void *); +#else /* CONFIG_PCI */ +static inline void zpci_event_error(void *e) {} +static inline void zpci_event_availability(void *e) {} +#endif /* CONFIG_PCI */ /* Helpers */ struct zpci_dev *get_zdev(struct pci_dev *); @@ -152,7 +185,20 @@ void zpci_dma_exit(void); /* Hotplug */ extern struct mutex zpci_list_lock; extern struct list_head zpci_list; -extern struct pci_hp_callback_ops hotplug_ops; -extern unsigned int pci_probe; +extern unsigned int s390_pci_probe; + +void zpci_register_hp_ops(struct pci_hp_callback_ops *); +void zpci_deregister_hp_ops(void); + +/* FMB */ +int zpci_fmb_enable_device(struct zpci_dev *); +int zpci_fmb_disable_device(struct zpci_dev *); + +/* Debug */ +int zpci_debug_init(void); +void zpci_debug_exit(void); +void zpci_debug_init_device(struct zpci_dev *); +void zpci_debug_exit_device(struct zpci_dev *); +void zpci_debug_info(struct zpci_dev *, struct seq_file *); #endif diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h new file mode 100644 index 0000000..6bbec42 --- /dev/null +++ b/arch/s390/include/asm/pci_debug.h @@ -0,0 +1,36 @@ +#ifndef _S390_ASM_PCI_DEBUG_H +#define _S390_ASM_PCI_DEBUG_H + +#include <asm/debug.h> + +extern debug_info_t *pci_debug_msg_id; +extern debug_info_t *pci_debug_err_id; + +#ifdef CONFIG_PCI_DEBUG +#define zpci_dbg(fmt, args...) \ + do { \ + if (pci_debug_msg_id->level >= 2) \ + debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\ + } while (0) + +#else /* !CONFIG_PCI_DEBUG */ +#define zpci_dbg(fmt, args...) do { } while (0) +#endif + +#define zpci_err(text...) \ + do { \ + char debug_buffer[16]; \ + snprintf(debug_buffer, 16, text); \ + debug_text_event(pci_debug_err_id, 0, debug_buffer); \ + } while (0) + +static inline void zpci_err_hex(void *addr, int len) +{ + while (len > 0) { + debug_event(pci_debug_err_id, 0, (void *) addr, len); + len -= pci_debug_err_id->buf_size; + addr += pci_debug_err_id->buf_size; + } +} + +#endif diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index c928dc1..97de120 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -29,6 +29,7 @@ #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/mm_types.h> +#include <linux/page-flags.h> #include <asm/bug.h> #include <asm/page.h> @@ -221,13 +222,15 @@ extern unsigned long MODULES_END; /* Software bits in the page table entry */ #define _PAGE_SWT 0x001 /* SW pte type bit t */ #define _PAGE_SWX 0x002 /* SW pte type bit x */ -#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */ -#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */ -#define _PAGE_SPECIAL 0x010 /* SW associated with special page */ +#define _PAGE_SWC 0x004 /* SW pte changed bit */ +#define _PAGE_SWR 0x008 /* SW pte referenced bit */ +#define _PAGE_SWW 0x010 /* SW pte write bit */ +#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ #define __HAVE_ARCH_PTE_SPECIAL /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ + _PAGE_SWC | _PAGE_SWR) /* Six different types of pages. */ #define _PAGE_TYPE_EMPTY 0x400 @@ -321,6 +324,7 @@ extern unsigned long MODULES_END; /* Bits in the region table entry */ #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ +#define _REGION_ENTRY_RO 0x200 /* region protection bit */ #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ @@ -382,9 +386,11 @@ extern unsigned long MODULES_END; */ #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) #define PAGE_RO __pgprot(_PAGE_TYPE_RO) -#define PAGE_RW __pgprot(_PAGE_TYPE_RW) +#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) +#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) -#define PAGE_KERNEL PAGE_RW +#define PAGE_KERNEL PAGE_RWC +#define PAGE_SHARED PAGE_KERNEL #define PAGE_COPY PAGE_RO /* @@ -631,23 +637,23 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); /* Clear page changed & referenced bit in the storage key */ if (bits & _PAGE_CHANGED) - page_set_storage_key(address, skey ^ bits, 1); + page_set_storage_key(address, skey ^ bits, 0); else if (bits) page_reset_referenced(address); /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ /* Get host changed & referenced bits from pgste */ bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; - /* Clear host bits in pgste. */ + /* Transfer page changed & referenced bit to kvm user bits */ + pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ + /* Clear relevant host bits in pgste. */ pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); /* Copy page access key and fetch protection bit to pgste */ pgste_val(pgste) |= (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; - /* Transfer changed and referenced to kvm user bits */ - pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ - /* Transfer changed & referenced to pte sofware bits */ - pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */ + /* Transfer referenced bit to pte */ + pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; #endif return pgste; @@ -660,20 +666,25 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) if (!pte_present(*ptep)) return pgste; + /* Get referenced bit from storage key */ young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); - /* Transfer page referenced bit to pte software bit (host view) */ - if (young || (pgste_val(pgste) & RCP_HR_BIT)) + if (young) + pgste_val(pgste) |= RCP_GR_BIT; + /* Get host referenced bit from pgste */ + if (pgste_val(pgste) & RCP_HR_BIT) { + pgste_val(pgste) &= ~RCP_HR_BIT; + young = 1; + } + /* Transfer referenced bit to kvm user bits and pte */ + if (young) { + pgste_val(pgste) |= KVM_UR_BIT; pte_val(*ptep) |= _PAGE_SWR; - /* Clear host referenced bit in pgste. */ - pgste_val(pgste) &= ~RCP_HR_BIT; - /* Transfer page referenced bit to guest bit in pgste */ - pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */ + } #endif return pgste; - } -static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) +static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) { #ifdef CONFIG_PGSTE unsigned long address; @@ -687,10 +698,23 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) /* Set page access key and fetch protection bit from pgste */ nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; if (okey != nkey) - page_set_storage_key(address, nkey, 1); + page_set_storage_key(address, nkey, 0); #endif } +static inline void pgste_set_pte(pte_t *ptep, pte_t entry) +{ + if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { + /* + * Without enhanced suppression-on-protection force + * the dirty bit on for all writable ptes. + */ + pte_val(entry) |= _PAGE_SWC; + pte_val(entry) &= ~_PAGE_RO; + } + *ptep = entry; +} + /** * struct gmap_struct - guest address space * @mm: pointer to the parent mm_struct @@ -749,11 +773,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_set_pte(ptep, pgste, entry); - *ptep = entry; + pgste_set_key(ptep, pgste, entry); + pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); - } else + } else { + if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) + pte_val(entry) |= _PAGE_CO; *ptep = entry; + } } /* @@ -762,16 +789,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, */ static inline int pte_write(pte_t pte) { - return (pte_val(pte) & _PAGE_RO) == 0; + return (pte_val(pte) & _PAGE_SWW) != 0; } static inline int pte_dirty(pte_t pte) { -#ifdef CONFIG_PGSTE - if (pte_val(pte) & _PAGE_SWC) - return 1; -#endif - return 0; + return (pte_val(pte) & _PAGE_SWC) != 0; } static inline int pte_young(pte_t pte) @@ -821,11 +844,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) |= pgprot_val(newprot); + if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) + pte_val(pte) &= ~_PAGE_RO; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_SWW; /* Do not clobber _PAGE_TYPE_NONE pages! */ if (!(pte_val(pte) & _PAGE_INVALID)) pte_val(pte) |= _PAGE_RO; @@ -834,20 +860,26 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) &= ~_PAGE_RO; + pte_val(pte) |= _PAGE_SWW; + if (pte_val(pte) & _PAGE_SWC) + pte_val(pte) &= ~_PAGE_RO; return pte; } static inline pte_t pte_mkclean(pte_t pte) { -#ifdef CONFIG_PGSTE pte_val(pte) &= ~_PAGE_SWC; -#endif + /* Do not clobber _PAGE_TYPE_NONE pages! */ + if (!(pte_val(pte) & _PAGE_INVALID)) + pte_val(pte) |= _PAGE_RO; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { + pte_val(pte) |= _PAGE_SWC; + if (pte_val(pte) & _PAGE_SWW) + pte_val(pte) &= ~_PAGE_RO; return pte; } @@ -885,10 +917,10 @@ static inline pte_t pte_mkhuge(pte_t pte) pte_val(pte) |= _SEGMENT_ENTRY_INV; } /* - * Clear SW pte bits SWT and SWX, there are no SW bits in a segment - * table entry. + * Clear SW pte bits, there are no SW bits in a segment table entry. */ - pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); + pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | + _PAGE_SWR | _PAGE_SWW); /* * Also set the change-override bit because we don't need dirty bit * tracking for hugetlbfs pages. @@ -1040,9 +1072,11 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long address, pte_t *ptep, pte_t pte) { - *ptep = pte; - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); + } else + *ptep = pte; } #define __HAVE_ARCH_PTEP_CLEAR_FLUSH @@ -1110,10 +1144,13 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, if (!mm_exclusive(mm)) __ptep_ipte(address, ptep); - *ptep = pte_wrprotect(pte); + pte = pte_wrprotect(pte); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, pgste); + } else + *ptep = pte; } return pte; } @@ -1131,10 +1168,12 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, pgste = pgste_get_lock(ptep); __ptep_ipte(address, ptep); - *ptep = entry; - if (mm_has_pgste(vma->vm_mm)) + if (mm_has_pgste(vma->vm_mm)) { + pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); + } else + *ptep = entry; return 1; } @@ -1152,8 +1191,13 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) { unsigned long physpage = page_to_phys(page); + pte_t __pte = mk_pte_phys(physpage, pgprot); - return mk_pte_phys(physpage, pgprot); + if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { + pte_val(__pte) |= _PAGE_SWC; + pte_val(__pte) &= ~_PAGE_RO; + } + return __pte; } #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) @@ -1245,6 +1289,8 @@ static inline int pmd_trans_splitting(pmd_t pmd) static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t entry) { + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) + pmd_val(entry) |= _SEGMENT_ENTRY_CO; *pmdp = entry; } @@ -1365,6 +1411,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma, __pmd_idte(address, pmdp); } +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + if (pmd_write(pmd)) { + __pmd_idte(address, pmdp); + set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); + } +} + static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) { pmd_t __pmd; @@ -1387,10 +1445,7 @@ static inline int has_transparent_hugepage(void) static inline unsigned long pmd_pfn(pmd_t pmd) { - if (pmd_trans_huge(pmd)) - return pmd_val(pmd) >> HPAGE_SHIFT; - else - return pmd_val(pmd) >> PAGE_SHIFT; + return pmd_val(pmd) >> PAGE_SHIFT; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 8337886..06a1361 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -46,7 +46,6 @@ int sclp_cpu_deconfigure(u8 cpu); void sclp_facilities_detect(void); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); -u8 sclp_get_fac85(void); int sclp_sdias_blk_count(void); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_chp_configure(struct chp_id chpid); diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index f69f76b..ff67d73 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -64,17 +64,18 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_VM (1UL << 0) #define MACHINE_FLAG_IEEE (1UL << 1) -#define MACHINE_FLAG_CSP (1UL << 3) -#define MACHINE_FLAG_MVPG (1UL << 4) -#define MACHINE_FLAG_DIAG44 (1UL << 5) -#define MACHINE_FLAG_IDTE (1UL << 6) -#define MACHINE_FLAG_DIAG9C (1UL << 7) -#define MACHINE_FLAG_MVCOS (1UL << 8) -#define MACHINE_FLAG_KVM (1UL << 9) +#define MACHINE_FLAG_CSP (1UL << 2) +#define MACHINE_FLAG_MVPG (1UL << 3) +#define MACHINE_FLAG_DIAG44 (1UL << 4) +#define MACHINE_FLAG_IDTE (1UL << 5) +#define MACHINE_FLAG_DIAG9C (1UL << 6) +#define MACHINE_FLAG_MVCOS (1UL << 7) +#define MACHINE_FLAG_KVM (1UL << 8) +#define MACHINE_FLAG_ESOP (1UL << 9) #define MACHINE_FLAG_EDAT1 (1UL << 10) #define MACHINE_FLAG_EDAT2 (1UL << 11) #define MACHINE_FLAG_LPAR (1UL << 12) -#define MACHINE_FLAG_SPP (1UL << 13) +#define MACHINE_FLAG_LPP (1UL << 13) #define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_RRBM (1UL << 16) @@ -84,6 +85,7 @@ extern unsigned int s390_user_mode; #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) +#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 @@ -96,7 +98,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_MVCOS (0) #define MACHINE_HAS_EDAT1 (0) #define MACHINE_HAS_EDAT2 (0) -#define MACHINE_HAS_SPP (0) +#define MACHINE_HAS_LPP (0) #define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TE (0) #define MACHINE_HAS_RRBM (0) @@ -109,7 +111,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) -#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) +#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h index db7ddfa..abf9e57 100644 --- a/arch/s390/include/asm/signal.h +++ b/arch/s390/include/asm/signal.h @@ -21,22 +21,5 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - void (*sa_restorer)(void); - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; - +#define __ARCH_HAS_SA_RESTORER #endif diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index fba4d66..8ad8af9 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -15,7 +15,7 @@ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL /* Inline functions for clock register access. */ -static inline int set_clock(__u64 time) +static inline int set_tod_clock(__u64 time) { int cc; @@ -27,7 +27,7 @@ static inline int set_clock(__u64 time) return cc; } -static inline int store_clock(__u64 *time) +static inline int store_tod_clock(__u64 *time) { int cc; @@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_clock(void) +static inline unsigned long long get_tod_clock(void) { unsigned long long clk; @@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void) return clk; } -static inline void get_clock_ext(char *clk) +static inline void get_tod_clock_ext(char *clk) { asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); } -static inline unsigned long long get_clock_xt(void) +static inline unsigned long long get_tod_clock_xt(void) { unsigned char clk[16]; - get_clock_ext(clk); + get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } static inline cycles_t get_cycles(void) { - return (cycles_t) get_clock() >> 2; + return (cycles_t) get_tod_clock() >> 2; } int get_sync_clock(unsigned long long *clock); @@ -123,9 +123,37 @@ extern u64 sched_clock_base_cc; * function, otherwise the returned value is not guaranteed to * be monotonic. */ -static inline unsigned long long get_clock_monotonic(void) +static inline unsigned long long get_tod_clock_monotonic(void) { - return get_clock_xt() - sched_clock_base_cc; + return get_tod_clock_xt() - sched_clock_base_cc; +} + +/** + * tod_to_ns - convert a TOD format value to nanoseconds + * @todval: to be converted TOD format value + * Returns: number of nanoseconds that correspond to the TOD format value + * + * Converting a 64 Bit TOD format value to nanoseconds means that the value + * must be divided by 4.096. In order to achieve that we multiply with 125 + * and divide by 512: + * + * ns = (todval * 125) >> 9; + * + * In order to avoid an overflow with the multiplication we can rewrite this. + * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) + * we end up with + * + * ns = ((2^32 * th + tl) * 125 ) >> 9; + * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); + * + */ +static inline unsigned long long tod_to_ns(unsigned long long todval) +{ + unsigned long long ns; + + ns = ((todval >> 32) << 23) * 125; + ns += ((todval & 0xffffffff) * 125) >> 9; + return ns; } #endif diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 086bb8e..a6667a9 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -43,17 +43,13 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND # ifndef CONFIG_64BIT # define __ARCH_WANT_STAT64 # define __ARCH_WANT_SYS_TIME # endif # ifdef CONFIG_COMPAT # define __ARCH_WANT_COMPAT_SYS_TIME -# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND # endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h index 8c6a49e..2f43cfb 100644 --- a/arch/s390/include/uapi/asm/signal.h +++ b/arch/s390/include/uapi/asm/signal.h @@ -90,12 +90,6 @@ typedef unsigned long sigset_t; #define SA_RESTORER 0x04000000 -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 436d07c..f99eea7 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -28,7 +28,7 @@ #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ +#define SO_REUSEPORT 15 #define SO_PASSCRED 16 #define SO_PEERCRED 17 #define SO_RCVLOWAT 18 @@ -76,4 +76,6 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#define SO_LOCK_FILTER 44 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 63e6078..864f693 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -279,7 +279,8 @@ #define __NR_process_vm_writev 341 #define __NR_s390_runtime_instr 342 #define __NR_kcmp 343 -#define NR_syscalls 344 +#define __NR_finit_module 344 +#define NR_syscalls 345 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 65cca95..19f26de 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -352,86 +352,6 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned return sys_ftruncate(fd, (high << 32) | low); } -asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval) -{ - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs (); - - set_fs (KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, - (struct timespec __force __user *) &t); - set_fs (old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - -asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, size_t sigsetsize) -{ - sigset_t s; - compat_sigset_t s32; - int ret; - mm_segment_t old_fs = get_fs(); - - if (set) { - if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) - return -EFAULT; - s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); - } - set_fs (KERNEL_DS); - ret = sys_rt_sigprocmask(how, - set ? (sigset_t __force __user *) &s : NULL, - oset ? (sigset_t __force __user *) &s : NULL, - sigsetsize); - set_fs (old_fs); - if (ret) return ret; - if (oset) { - s32.sig[1] = (s.sig[0] >> 32); - s32.sig[0] = s.sig[0]; - if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) - return -EFAULT; - } - return 0; -} - -asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, - size_t sigsetsize) -{ - sigset_t s; - compat_sigset_t s32; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs (KERNEL_DS); - ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); - set_fs (old_fs); - if (!ret) { - s32.sig[1] = (s.sig[0] >> 32); - s32.sig[0] = s.sig[0]; - if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) - return -EFAULT; - } - return ret; -} - -asmlinkage long -sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) -{ - siginfo_t info; - int ret; - mm_segment_t old_fs = get_fs(); - - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; - set_fs (KERNEL_DS); - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info); - set_fs (old_fs); - return ret; -} - asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count, u32 poshi, u32 poslo) { diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index d4d0239..00d92a5 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h @@ -17,13 +17,6 @@ struct ipc_kludge_32 { __s32 msgtyp; }; -struct old_sigaction32 { - __u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */ - compat_old_sigset_t sa_mask; /* A 32 bit mask */ - __u32 sa_flags; - __u32 sa_restorer; /* Another 32 bit pointer */ -}; - /* asm/sigcontext.h */ typedef union { @@ -68,24 +61,12 @@ struct sigcontext32 }; /* asm/signal.h */ -struct sigaction32 { - __u32 sa_handler; /* pointer */ - __u32 sa_flags; - __u32 sa_restorer; /* pointer */ - compat_sigset_t sa_mask; /* mask last for extensibility */ -}; - -typedef struct { - __u32 ss_sp; /* pointer */ - int ss_flags; - compat_size_t ss_size; -} stack_t32; /* asm/ucontext.h */ struct ucontext32 { __u32 uc_flags; __u32 uc_link; /* pointer */ - stack_t32 uc_stack; + compat_stack_t uc_stack; _sigregs32 uc_mcontext; compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; @@ -93,8 +74,6 @@ struct ucontext32 { struct stat64_emu31; struct mmap_arg_struct_emu31; struct fadvise64_64_args; -struct old_sigaction32; -struct old_sigaction32; long sys32_chown16(const char __user * filename, u16 user, u16 group); long sys32_lchown16(const char __user * filename, u16 user, u16 group); @@ -119,12 +98,6 @@ long sys32_ipc(u32 call, int first, int second, int third, u32 ptr); long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low); long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low); -long sys32_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval); -long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, size_t sigsetsize); -long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize); -long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo); long sys32_init_module(void __user *umod, unsigned long len, const char __user *uargs); long sys32_delete_module(const char __user *name_user, unsigned int flags); @@ -149,9 +122,4 @@ long sys32_read(unsigned int fd, char __user * buf, size_t count); long sys32_write(unsigned int fd, const char __user * buf, size_t count); long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise); long sys32_fadvise64_64(struct fadvise64_64_args __user *args); -long sys32_sigaction(int sig, const struct old_sigaction32 __user *act, - struct old_sigaction32 __user *oact); -long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, - struct sigaction32 __user *oact, size_t sigsetsize); -long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss); #endif /* _ASM_S390X_S390_H */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 593fcc9..3e71194 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -157,122 +157,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return err; } -asmlinkage long -sys32_sigaction(int sig, const struct old_sigaction32 __user *act, - struct old_sigaction32 __user *oact) -{ - struct k_sigaction new_ka, old_ka; - unsigned long sa_handler, sa_restorer; - int ret; - - if (act) { - compat_old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(sa_handler, &act->sa_handler) || - __get_user(sa_restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - new_ka.sa.sa_handler = (__sighandler_t) sa_handler; - new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - sa_handler = (unsigned long) old_ka.sa.sa_handler; - sa_restorer = (unsigned long) old_ka.sa.sa_restorer; - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(sa_handler, &oact->sa_handler) || - __put_user(sa_restorer, &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -asmlinkage long -sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, - struct sigaction32 __user *oact, size_t sigsetsize) -{ - struct k_sigaction new_ka, old_ka; - unsigned long sa_handler; - int ret; - compat_sigset_t set32; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; - - if (act) { - ret = get_user(sa_handler, &act->sa_handler); - ret |= __copy_from_user(&set32, &act->sa_mask, - sizeof(compat_sigset_t)); - new_ka.sa.sa_mask.sig[0] = - set32.sig[0] | (((long)set32.sig[1]) << 32); - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - - if (ret) - return -EFAULT; - new_ka.sa.sa_handler = (__sighandler_t) sa_handler; - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); - set32.sig[0] = old_ka.sa.sa_mask.sig[0]; - ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler); - ret |= __copy_to_user(&oact->sa_mask, &set32, - sizeof(compat_sigset_t)); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - } - - return ret; -} - -asmlinkage long -sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss) -{ - struct pt_regs *regs = task_pt_regs(current); - stack_t kss, koss; - unsigned long ss_sp; - int ret, err = 0; - mm_segment_t old_fs = get_fs(); - - if (uss) { - if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) - return -EFAULT; - err |= __get_user(ss_sp, &uss->ss_sp); - err |= __get_user(kss.ss_size, &uss->ss_size); - err |= __get_user(kss.ss_flags, &uss->ss_flags); - if (err) - return -EFAULT; - kss.ss_sp = (void __user *) ss_sp; - } - - set_fs (KERNEL_DS); - ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL), - (stack_t __force __user *) (uoss ? &koss : NULL), - regs->gprs[15]); - set_fs (old_fs); - - if (!ret && uoss) { - if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) - return -EFAULT; - ss_sp = (unsigned long) koss.ss_sp; - err |= __put_user(ss_sp, &uoss->ss_sp); - err |= __put_user(koss.ss_size, &uoss->ss_size); - err |= __put_user(koss.ss_flags, &uoss->ss_flags); - if (err) - return -EFAULT; - } - return ret; -} - static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) { _s390_regs_common32 regs32; @@ -380,10 +264,6 @@ asmlinkage long sys32_rt_sigreturn(void) struct pt_regs *regs = task_pt_regs(current); rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; sigset_t set; - stack_t st; - __u32 ss_sp; - int err; - mm_segment_t old_fs = get_fs(); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -394,15 +274,8 @@ asmlinkage long sys32_rt_sigreturn(void) goto badframe; if (restore_sigregs_gprs_high(regs, frame->gprs_high)) goto badframe; - err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); - st.ss_sp = compat_ptr(ss_sp); - err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); - err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); - if (err) + if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; - set_fs (KERNEL_DS); - do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); - set_fs (old_fs); return regs->gprs[2]; badframe: force_sig(SIGSEGV, current); @@ -530,10 +403,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->gprs[15]), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]); err |= save_sigregs32(regs, &frame->uc.uc_mcontext); err |= save_sigregs_gprs_high(regs, frame->gprs_high); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 827e094..c14faf3 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -24,12 +24,6 @@ ENTRY(sys32_write_wrapper) llgfr %r4,%r4 # size_t jg sys32_write # branch to system call -ENTRY(sys32_open_wrapper) - llgtr %r2,%r2 # const char * - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - jg compat_sys_open # branch to system call - ENTRY(sys32_close_wrapper) llgfr %r2,%r2 # unsigned int jg sys_close # branch to system call @@ -226,12 +220,6 @@ ENTRY(sys32_dup2_wrapper) #sys32_setsid_wrapper # void -ENTRY(sys32_sigaction_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const struct old_sigaction * - llgtr %r4,%r4 # struct old_sigaction32 * - jg sys32_sigaction # branch to system call - ENTRY(sys32_setreuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t llgfr %r3,%r3 # __kernel_old_uid_emu31_t @@ -396,17 +384,6 @@ ENTRY(sys32_syslog_wrapper) lgfr %r4,%r4 # int jg sys_syslog # branch to system call -ENTRY(compat_sys_setitimer_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct itimerval_emu31 * - llgtr %r4,%r4 # struct itimerval_emu31 * - jg compat_sys_setitimer # branch to system call - -ENTRY(compat_sys_getitimer_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct itimerval_emu31 * - jg compat_sys_getitimer # branch to system call - ENTRY(compat_sys_newstat_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat_emu31 * @@ -424,13 +401,6 @@ ENTRY(compat_sys_newfstat_wrapper) #sys32_vhangup_wrapper # void -ENTRY(compat_sys_wait4_wrapper) - lgfr %r2,%r2 # pid_t - llgtr %r3,%r3 # unsigned int * - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct rusage * - jg compat_sys_wait4 # branch to system call - ENTRY(sys32_swapoff_wrapper) llgtr %r2,%r2 # const char * jg sys_swapoff # branch to system call @@ -474,12 +444,6 @@ ENTRY(sys32_mprotect_wrapper) llgfr %r4,%r4 # unsigned long jg sys_mprotect # branch to system call -ENTRY(compat_sys_sigprocmask_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_old_sigset_t * - llgtr %r4,%r4 # compat_old_sigset_t * - jg compat_sys_sigprocmask # branch to system call - ENTRY(sys_init_module_wrapper) llgtr %r2,%r2 # void * llgfr %r3,%r3 # unsigned long @@ -628,11 +592,6 @@ ENTRY(sys32_sched_get_priority_min_wrapper) lgfr %r2,%r2 # int jg sys_sched_get_priority_min # branch to system call -ENTRY(sys32_sched_rr_get_interval_wrapper) - lgfr %r2,%r2 # pid_t - llgtr %r3,%r3 # struct compat_timespec * - jg sys32_sched_rr_get_interval # branch to system call - ENTRY(compat_sys_nanosleep_wrapper) llgtr %r2,%r2 # struct compat_timespec * llgtr %r3,%r3 # struct compat_timespec * @@ -686,43 +645,6 @@ ENTRY(sys32_prctl_wrapper) #sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue -ENTRY(sys32_rt_sigaction_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const struct sigaction_emu31 * - llgtr %r4,%r4 # const struct sigaction_emu31 * - llgfr %r5,%r5 # size_t - jg sys32_rt_sigaction # branch to system call - -ENTRY(sys32_rt_sigprocmask_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # old_sigset_emu31 * - llgtr %r4,%r4 # old_sigset_emu31 * - llgfr %r5,%r5 # size_t - jg sys32_rt_sigprocmask # branch to system call - -ENTRY(sys32_rt_sigpending_wrapper) - llgtr %r2,%r2 # sigset_emu31 * - llgfr %r3,%r3 # size_t - jg sys32_rt_sigpending # branch to system call - -ENTRY(compat_sys_rt_sigtimedwait_wrapper) - llgtr %r2,%r2 # const sigset_emu31_t * - llgtr %r3,%r3 # siginfo_emu31_t * - llgtr %r4,%r4 # const struct compat_timespec * - llgfr %r5,%r5 # size_t - jg compat_sys_rt_sigtimedwait # branch to system call - -ENTRY(sys32_rt_sigqueueinfo_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgtr %r4,%r4 # siginfo_emu31_t * - jg sys32_rt_sigqueueinfo # branch to system call - -ENTRY(compat_sys_rt_sigsuspend_wrapper) - llgtr %r2,%r2 # compat_sigset_t * - llgfr %r3,%r3 # compat_size_t - jg compat_sys_rt_sigsuspend - ENTRY(sys32_pread64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * @@ -760,11 +682,6 @@ ENTRY(sys32_capset_wrapper) llgtr %r3,%r3 # const cap_user_data_t jg sys_capset # branch to system call -ENTRY(sys32_sigaltstack_wrapper) - llgtr %r2,%r2 # const stack_emu31_t * - llgtr %r3,%r3 # stack_emu31_t * - jg sys32_sigaltstack - ENTRY(sys32_sendfile_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int @@ -921,16 +838,6 @@ ENTRY(sys32_fstat64_wrapper) llgtr %r3,%r3 # struct stat64 * jg sys32_fstat64 # branch to system call -ENTRY(compat_sys_futex_wrapper) - llgtr %r2,%r2 # u32 * - lgfr %r3,%r3 # int - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct compat_timespec * - llgtr %r6,%r6 # u32 * - lgf %r0,164(%r15) # int - stg %r0,160(%r15) - jg compat_sys_futex # branch to system call - ENTRY(sys32_setxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * @@ -1216,14 +1123,6 @@ ENTRY(sys32_remap_file_pages_wrapper) llgfr %r6,%r6 # unsigned long jg sys_remap_file_pages -ENTRY(compat_sys_waitid_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # pid_t - llgtr %r4,%r4 # siginfo_emu31_t * - lgfr %r5,%r5 # int - llgtr %r6,%r6 # struct rusage_emu31 * - jg compat_sys_waitid - ENTRY(compat_sys_kexec_load_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long @@ -1253,13 +1152,6 @@ ENTRY(sys_inotify_rm_watch_wrapper) llgfr %r3,%r3 # u32 jg sys_inotify_rm_watch -ENTRY(compat_sys_openat_wrapper) - llgfr %r2,%r2 # unsigned int - llgtr %r3,%r3 # const char * - lgfr %r4,%r4 # int - lgfr %r5,%r5 # int - jg compat_sys_openat - ENTRY(sys_mkdirat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * @@ -1362,17 +1254,6 @@ ENTRY(sys_unshare_wrapper) llgfr %r2,%r2 # unsigned long jg sys_unshare -ENTRY(compat_sys_set_robust_list_wrapper) - llgtr %r2,%r2 # struct compat_robust_list_head * - llgfr %r3,%r3 # size_t - jg compat_sys_set_robust_list - -ENTRY(compat_sys_get_robust_list_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # compat_uptr_t_t * - llgtr %r4,%r4 # compat_size_t * - jg compat_sys_get_robust_list - ENTRY(sys_splice_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # loff_t * @@ -1458,18 +1339,6 @@ ENTRY(sys_timerfd_create_wrapper) lgfr %r3,%r3 # int jg sys_timerfd_create -ENTRY(compat_sys_timerfd_settime_wrapper) - lgfr %r2,%r2 # int - lgfr %r3,%r3 # int - llgtr %r4,%r4 # struct compat_itimerspec * - llgtr %r5,%r5 # struct compat_itimerspec * - jg compat_sys_timerfd_settime - -ENTRY(compat_sys_timerfd_gettime_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct compat_itimerspec * - jg compat_sys_timerfd_gettime - ENTRY(compat_sys_signalfd4_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_sigset_t * @@ -1550,13 +1419,6 @@ ENTRY(compat_sys_pwritev_wrapper) llgfr %r6,%r6 # u32 jg compat_sys_pwritev # branch to system call -ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper) - lgfr %r2,%r2 # compat_pid_t - lgfr %r3,%r3 # compat_pid_t - lgfr %r4,%r4 # int - llgtr %r5,%r5 # struct compat_siginfo * - jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call - ENTRY(sys_perf_event_open_wrapper) llgtr %r2,%r2 # const struct perf_event_attr * lgfr %r3,%r3 # pid_t @@ -1607,12 +1469,6 @@ ENTRY(sys_name_to_handle_at_wrapper) lgfr %r6,%r6 # int jg sys_name_to_handle_at -ENTRY(compat_sys_open_by_handle_at_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # struct file_handle __user * - lgfr %r4,%r4 # int - jg compat_sys_open_by_handle_at - ENTRY(compat_sys_clock_adjtime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timex __user * @@ -1659,3 +1515,9 @@ ENTRY(sys_kcmp_wrapper) llgfr %r5,%r5 # unsigned long llgfr %r6,%r6 # unsigned long jg sys_kcmp + +ENTRY(sys_finit_module_wrapper) + lgfr %r2,%r2 # int + llgtr %r3,%r3 # const char __user * + lgfr %r4,%r4 # int + jg sys_finit_module diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ba500d8..09a94cd 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -867,7 +867,7 @@ static inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, int exception) { - active->id.stck = get_clock(); + active->id.stck = get_tod_clock(); active->id.fields.cpuid = smp_processor_id(); active->caller = __builtin_return_address(0); active->id.fields.exception = exception; @@ -1127,13 +1127,14 @@ debug_register_view(debug_info_t * id, struct debug_view *view) if (i == DEBUG_MAX_VIEWS) { pr_err("Registering view %s/%s would exceed the maximum " "number of views %i\n", id->name, view->name, i); - debugfs_remove(pde); rc = -1; } else { id->views[i] = view; id->debugfs_entries[i] = pde; } spin_unlock_irqrestore(&id->lock, flags); + if (rc) + debugfs_remove(pde); out: return rc; } @@ -1146,9 +1147,9 @@ EXPORT_SYMBOL(debug_register_view); int debug_unregister_view(debug_info_t * id, struct debug_view *view) { - int rc = 0; - int i; + struct dentry *dentry = NULL; unsigned long flags; + int i, rc = 0; if (!id) goto out; @@ -1160,10 +1161,12 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view) if (i == DEBUG_MAX_VIEWS) rc = -1; else { - debugfs_remove(id->debugfs_entries[i]); + dentry = id->debugfs_entries[i]; id->views[i] = NULL; + id->debugfs_entries[i] = NULL; } spin_unlock_irqrestore(&id->lock, flags); + debugfs_remove(dentry); out: return rc; } diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index a7f9abd..c50665f 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -840,7 +840,6 @@ static struct insn opcode_b2[] = { { "stcke", 0x78, INSTR_S_RD }, { "sacf", 0x79, INSTR_S_RD }, { "stsi", 0x7d, INSTR_S_RD }, - { "spp", 0x80, INSTR_S_RD }, { "srnm", 0x99, INSTR_S_RD }, { "stfpc", 0x9c, INSTR_S_RD }, { "lfpc", 0x9d, INSTR_S_RD }, diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 1f0eee9..bda011e 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -47,10 +47,10 @@ static void __init reset_tod_clock(void) { u64 time; - if (store_clock(&time) == 0) + if (store_tod_clock(&time) == 0) return; /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) disabled_wait(0); sched_clock_base_cc = TOD_UNIX_EPOCH; @@ -173,7 +173,7 @@ static noinline __init void create_kernel_nss(void) } /* re-initialize cputime accounting. */ - sched_clock_base_cc = get_clock(); + sched_clock_base_cc = get_tod_clock(); S390_lowcore.last_update_clock = sched_clock_base_cc; S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; S390_lowcore.user_timer = 0; @@ -381,7 +381,7 @@ static __init void detect_machine_facilities(void) if (test_facility(27)) S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; if (test_facility(40)) - S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; + S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; if (test_facility(50) && test_facility(73)) S390_lowcore.machine_flags |= MACHINE_FLAG_TE; if (test_facility(66)) diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 2711936..c3a736a 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -73,10 +73,6 @@ long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low, long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low); -long sys_sigsuspend(int history0, int history1, old_sigset_t mask); -long sys_sigaction(int sig, const struct old_sigaction __user *act, - struct old_sigaction __user *oact); -long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss); long sys_sigreturn(void); long sys_rt_sigreturn(void); long sys32_sigreturn(void); diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 6d34e0c..9c837c1 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -72,9 +72,9 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) #endif .endm - .macro SPP newpp + .macro LPP newpp #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP jz .+8 .insn s,0xb2800000,\newpp #endif @@ -96,7 +96,7 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) jhe .+22 .endif lg %r9,BASED(.Lsie_loop) - SPP BASED(.Lhost_id) # set host id + LPP BASED(.Lhost_id) # set host id #endif .endm @@ -967,10 +967,10 @@ sie_loop: lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce sie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer - SPP __SF_EMPTY(%r15) # set guest id + LPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) sie_done: - SPP __SF_EMPTY+16(%r15) # set host id + LPP __SF_EMPTY+16(%r15) # set host id lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_exit: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 6ffcd320..d8a6a38 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1414,6 +1414,16 @@ static struct kobj_attribute dump_type_attr = static struct kset *dump_kset; +static void diag308_dump(void *dump_block) +{ + diag308(DIAG308_SET, dump_block); + while (1) { + if (diag308(DIAG308_DUMP, NULL) != 0x302) + break; + udelay_simple(USEC_PER_SEC); + } +} + static void __dump_run(void *unused) { struct ccw_dev_id devid; @@ -1432,12 +1442,10 @@ static void __dump_run(void *unused) __cpcmd(buf, NULL, 0, NULL); break; case DUMP_METHOD_CCW_DIAG: - diag308(DIAG308_SET, dump_block_ccw); - diag308(DIAG308_DUMP, NULL); + diag308_dump(dump_block_ccw); break; case DUMP_METHOD_FCP_DIAG: - diag308(DIAG308_SET, dump_block_fcp); - diag308(DIAG308_DUMP, NULL); + diag308_dump(dump_block_fcp); break; default: break; diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index a9806ea..1630f43 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -24,44 +24,66 @@ #include <asm/irq.h> #include "entry.h" +DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); +EXPORT_PER_CPU_SYMBOL_GPL(irq_stat); + struct irq_class { char *name; char *desc; }; -static const struct irq_class intrclass_names[] = { +/* + * The list of "main" irq classes on s390. This is the list of interrrupts + * that appear both in /proc/stat ("intr" line) and /proc/interrupts. + * Historically only external and I/O interrupts have been part of /proc/stat. + * We can't add the split external and I/O sub classes since the first field + * in the "intr" line in /proc/stat is supposed to be the sum of all other + * fields. + * Since the external and I/O interrupt fields are already sums we would end + * up with having a sum which accounts each interrupt twice. + */ +static const struct irq_class irqclass_main_desc[NR_IRQS] = { [EXTERNAL_INTERRUPT] = {.name = "EXT"}, - [IO_INTERRUPT] = {.name = "I/O"}, - [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, - [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, - [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, - [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, - [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, - [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, - [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, - [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, - [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, - [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, - [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, - [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, - [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, - [IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, - [IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, - [IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, - [IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"}, - [IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"}, - [IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, - [IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, - [IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, - [IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, - [IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, - [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, - [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, - [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, - [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, - [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, - [IOINT_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, + [IO_INTERRUPT] = {.name = "I/O"} +}; + +/* + * The list of split external and I/O interrupts that appear only in + * /proc/interrupts. + * In addition this list contains non external / I/O events like NMIs. + */ +static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { + [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, + [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, + [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, + [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, + [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, + [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, + [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, + [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, + [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, + [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, + [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, + [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, + [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, + [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, + [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, + [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, + [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"}, + [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"}, + [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, + [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, + [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, + [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, + [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, + [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, + [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, + [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, + [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, + [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, + [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, + [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, }; /* @@ -69,30 +91,34 @@ static const struct irq_class intrclass_names[] = { */ int show_interrupts(struct seq_file *p, void *v) { - int i = *(loff_t *) v, j; + int irq = *(loff_t *) v; + int cpu; get_online_cpus(); - if (i == 0) { + if (irq == 0) { seq_puts(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(cpu) + seq_printf(p, "CPU%d ", cpu); seq_putc(p, '\n'); } - - if (i < NR_IRQS) { - seq_printf(p, "%s: ", intrclass_names[i].name); -#ifndef CONFIG_SMP - seq_printf(p, "%10u ", kstat_irqs(i)); -#else - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); -#endif - if (intrclass_names[i].desc) - seq_printf(p, " %s", intrclass_names[i].desc); - seq_putc(p, '\n'); - } + if (irq < NR_IRQS) { + seq_printf(p, "%s: ", irqclass_main_desc[irq].name); + for_each_online_cpu(cpu) + seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); + seq_putc(p, '\n'); + goto skip_arch_irqs; + } + for (irq = 0; irq < NR_ARCH_IRQS; irq++) { + seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); + for_each_online_cpu(cpu) + seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); + if (irqclass_sub_desc[irq].desc) + seq_printf(p, " %s", irqclass_sub_desc[irq].desc); + seq_putc(p, '\n'); + } +skip_arch_irqs: put_online_cpus(); - return 0; + return 0; } /* @@ -223,7 +249,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code, /* Serve timer interrupts first. */ clock_comparator_work(); } - kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; + kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL); if (ext_code.code != 0x1004) __get_cpu_var(s390_idle).nohz_delay = 1; diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 4610dea..f750bd7 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -65,8 +65,7 @@ void module_free(struct module *mod, void *module_region) vfree(module_region); } -static void -check_rela(Elf_Rela *rela, struct module *me) +static void check_rela(Elf_Rela *rela, struct module *me) { struct mod_arch_syminfo *info; @@ -115,9 +114,8 @@ check_rela(Elf_Rela *rela, struct module *me) * Account for GOT and PLT relocations. We can't add sections for * got and plt but we can increase the core module size. */ -int -module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *me) +int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *me) { Elf_Shdr *symtab; Elf_Sym *symbols; @@ -179,13 +177,52 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, return 0; } -static int -apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - struct module *me) +static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, + int sign, int bits, int shift) +{ + unsigned long umax; + long min, max; + + if (val & ((1UL << shift) - 1)) + return -ENOEXEC; + if (sign) { + val = (Elf_Addr)(((long) val) >> shift); + min = -(1L << (bits - 1)); + max = (1L << (bits - 1)) - 1; + if ((long) val < min || (long) val > max) + return -ENOEXEC; + } else { + val >>= shift; + umax = ((1UL << (bits - 1)) << 1) - 1; + if ((unsigned long) val > umax) + return -ENOEXEC; + } + + if (bits == 8) + *(unsigned char *) loc = val; + else if (bits == 12) + *(unsigned short *) loc = (val & 0xfff) | + (*(unsigned short *) loc & 0xf000); + else if (bits == 16) + *(unsigned short *) loc = val; + else if (bits == 20) + *(unsigned int *) loc = (val & 0xfff) << 16 | + (val & 0xff000) >> 4 | + (*(unsigned int *) loc & 0xf00000ff); + else if (bits == 32) + *(unsigned int *) loc = val; + else if (bits == 64) + *(unsigned long *) loc = val; + return 0; +} + +static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + const char *strtab, struct module *me) { struct mod_arch_syminfo *info; Elf_Addr loc, val; int r_type, r_sym; + int rc; /* This is where to make the change */ loc = base + rela->r_offset; @@ -197,6 +234,9 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = symtab[r_sym].st_value; switch (r_type) { + case R_390_NONE: /* No relocation. */ + rc = 0; + break; case R_390_8: /* Direct 8 bit. */ case R_390_12: /* Direct 12 bit. */ case R_390_16: /* Direct 16 bit. */ @@ -205,20 +245,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_64: /* Direct 64 bit. */ val += rela->r_addend; if (r_type == R_390_8) - *(unsigned char *) loc = val; + rc = apply_rela_bits(loc, val, 0, 8, 0); else if (r_type == R_390_12) - *(unsigned short *) loc = (val & 0xfff) | - (*(unsigned short *) loc & 0xf000); + rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_20) - *(unsigned int *) loc = - (*(unsigned int *) loc & 0xf00000ff) | - (val & 0xfff) << 16 | (val & 0xff000) >> 4; + rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ @@ -227,15 +264,15 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PC64: /* PC relative 64 bit. */ val += rela->r_addend - loc; if (r_type == R_390_PC16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 1, 16, 0); else if (r_type == R_390_PC16DBL) - *(unsigned short *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PC32DBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PC32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_PC64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 1, 64, 0); break; case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ @@ -260,26 +297,24 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = info->got_offset + rela->r_addend; if (r_type == R_390_GOT12 || r_type == R_390_GOTPLT12) - *(unsigned short *) loc = (val & 0xfff) | - (*(unsigned short *) loc & 0xf000); + rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_GOT16 || r_type == R_390_GOTPLT16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOT20 || r_type == R_390_GOTPLT20) - *(unsigned int *) loc = - (*(unsigned int *) loc & 0xf00000ff) | - (val & 0xfff) << 16 | (val & 0xff000) >> 4; + rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_GOT32 || r_type == R_390_GOTPLT32) - *(unsigned int *) loc = val; - else if (r_type == R_390_GOTENT || - r_type == R_390_GOTPLTENT) - *(unsigned int *) loc = - (val + (Elf_Addr) me->module_core - loc) >> 1; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) { + val += (Elf_Addr) me->module_core - loc; + rc = apply_rela_bits(loc, val, 1, 32, 1); + } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ @@ -321,17 +356,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val += rela->r_addend - loc; } if (r_type == R_390_PLT16DBL) - *(unsigned short *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PLTOFF16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_PLT32DBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PLT32 || r_type == R_390_PLTOFF32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_PLT64 || r_type == R_390_PLTOFF64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */ @@ -339,20 +374,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = val + rela->r_addend - ((Elf_Addr) me->module_core + me->arch.got_offset); if (r_type == R_390_GOTOFF16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOTOFF32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOTOFF64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ val = (Elf_Addr) me->module_core + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_GOTPCDBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); break; case R_390_COPY: case R_390_GLOB_DAT: /* Create GOT entry. */ @@ -360,19 +395,25 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_RELATIVE: /* Adjust by program base. */ /* Only needed if we want to support loading of modules linked with -shared. */ - break; + return -ENOEXEC; default: - printk(KERN_ERR "module %s: Unknown relocation: %u\n", + printk(KERN_ERR "module %s: unknown relocation: %u\n", me->name, r_type); return -ENOEXEC; } + if (rc) { + printk(KERN_ERR "module %s: relocation error for symbol %s " + "(r_type %i, value 0x%lx)\n", + me->name, strtab + symtab[r_sym].st_name, + r_type, (unsigned long) val); + return rc; + } return 0; } -int -apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - unsigned int symindex, unsigned int relsec, - struct module *me) +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) { Elf_Addr base; Elf_Sym *symtab; @@ -388,7 +429,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); for (i = 0; i < n; i++, rela++) { - rc = apply_rela(rela, base, symtab, me); + rc = apply_rela(rela, base, symtab, strtab, me); if (rc) return rc; } diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index a6daa5c..504175e 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -254,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) int umode; nmi_enter(); - kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++; + inc_irq_stat(NMI_NMI); mci = (struct mci *) &S390_lowcore.mcck_interruption_code; mcck = &__get_cpu_var(cpu_mcck); umode = user_mode(regs); @@ -293,7 +293,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) * retry this instruction. */ spin_lock(&ipd_lock); - tmp = get_clock(); + tmp = get_tod_clock(); if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) ipd_count++; else diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index c4e7269..390d9ae 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -229,7 +229,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, if (!(alert & CPU_MF_INT_CF_MASK)) return; - kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++; + inc_irq_stat(IRQEXT_CMC); cpuhw = &__get_cpu_var(cpu_hw_events); /* Measurement alerts are shared and might happen when the PMU @@ -367,13 +367,6 @@ static int __hw_perf_event_init(struct perf_event *event) if (ev >= PERF_CPUM_CF_MAX_CTR) return -EINVAL; - /* The CPU measurement counter facility does not have any interrupts - * to do sampling. Sampling must be provided by external means, - * for example, by timers. - */ - if (hwc->sample_period) - return -EINVAL; - /* Use the hardware perf event structure to store the counter number * in 'config' member and the counter set to which the counter belongs * in the 'config_base'. The counter set (config_base) is then used @@ -418,6 +411,12 @@ static int cpumf_pmu_event_init(struct perf_event *event) case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: case PERF_TYPE_RAW: + /* The CPU measurement counter facility does not have overflow + * interrupts to do sampling. Sampling must be provided by + * external means, for example, by timers. + */ + if (is_sampling_event(event)) + return -ENOENT; err = __hw_perf_event_init(event); break; default: diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 61066f6..077a993 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -71,7 +71,7 @@ static void runtime_instr_int_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_RI_MASK)) return; - kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++; + inc_irq_stat(IRQEXT_CMR); if (!current->thread.ri_cb) return; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2568590..a5360de 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -16,7 +16,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/memblock.h> @@ -289,6 +289,7 @@ void machine_power_off(void) * Dummy power off function. */ void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL_GPL(pm_power_off); static int __init early_parse_mem(char *p) { diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index c3ff70a..9c6e747 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -48,54 +48,6 @@ typedef struct struct ucontext uc; } rt_sigframe; -/* - * Atomically swap in the new signal mask, and wait for a signal. - */ -SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) -{ - sigset_t blocked; - siginitset(&blocked, mask); - return sigsuspend(&blocked); -} - -SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act, - struct old_sigaction __user *, oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - if (act) { - old_sigset_t mask; - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -SYSCALL_DEFINE2(sigaltstack, const stack_t __user *, uss, - stack_t __user *, uoss) -{ - struct pt_regs *regs = task_pt_regs(current); - return do_sigaltstack(uss, uoss, regs->gprs[15]); -} - /* Returns non-zero on fault. */ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) { @@ -190,8 +142,7 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, - regs->gprs[15]) == -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->gprs[2]; badframe: @@ -325,10 +276,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); - err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->gprs[15]), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]); err |= save_sigregs(regs, &frame->uc.uc_mcontext); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index ea431e5..549c9d1 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -365,16 +365,16 @@ void smp_emergency_stop(cpumask_t *cpumask) u64 end; int cpu; - end = get_clock() + (1000000UL << 12); + end = get_tod_clock() + (1000000UL << 12); for_each_cpu(cpu, cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; set_bit(ec_stop_cpu, &pcpu->ec_mask); while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 0, NULL) == SIGP_CC_BUSY && - get_clock() < end) + get_tod_clock() < end) cpu_relax(); } - while (get_clock() < end) { + while (get_tod_clock() < end) { for_each_cpu(cpu, cpumask) if (pcpu_stopped(pcpu_devices + cpu)) cpumask_clear_cpu(cpu, cpumask); @@ -433,9 +433,9 @@ static void do_ext_call_interrupt(struct ext_code ext_code, cpu = smp_processor_id(); if (ext_code.code == 0x1202) - kstat_cpu(cpu).irqs[EXTINT_EXC]++; + inc_irq_stat(IRQEXT_EXC); else - kstat_cpu(cpu).irqs[EXTINT_EMS]++; + inc_irq_stat(IRQEXT_EMS); /* * handle bit signal external calls */ @@ -623,9 +623,9 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) return info; } -static int __devinit smp_add_present_cpu(int cpu); +static int __cpuinit smp_add_present_cpu(int cpu); -static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info, +static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) { struct pcpu *pcpu; @@ -694,7 +694,7 @@ static void __init smp_detect_cpus(void) */ static void __cpuinit smp_start_secondary(void *cpuvoid) { - S390_lowcore.last_update_clock = get_clock(); + S390_lowcore.last_update_clock = get_tod_clock(); S390_lowcore.restart_stack = (unsigned long) restart_stack; S390_lowcore.restart_fn = (unsigned long) do_restart; S390_lowcore.restart_data = 0; @@ -709,6 +709,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) pfault_init(); notify_cpu_starting(smp_processor_id()); set_cpu_online(smp_processor_id(), true); + inc_irq_stat(CPU_RST); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); @@ -946,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev, unsigned int sequence; do { - now = get_clock(); + now = get_tod_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_time = ACCESS_ONCE(idle->idle_time); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); @@ -986,7 +987,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, return notifier_from_errno(err); } -static int __devinit smp_add_present_cpu(int cpu) +static int __cpuinit smp_add_present_cpu(int cpu) { struct cpu *c = &pcpu_devices[cpu].cpu; struct device *s = &c->dev; diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 4817485..aaac708 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -13,7 +13,7 @@ SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) SYSCALL(sys_fork,sys_fork,sys_fork) SYSCALL(sys_read,sys_read,sys32_read_wrapper) SYSCALL(sys_write,sys_write,sys32_write_wrapper) -SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ +SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */ SYSCALL(sys_close,sys_close,sys32_close_wrapper) SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) @@ -75,7 +75,7 @@ SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper) SYSCALL(sys_getppid,sys_getppid,sys_getppid) SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */ SYSCALL(sys_setsid,sys_setsid,sys_setsid) -SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper) +SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction) NI_SYSCALL /* old sgetmask syscall*/ NI_SYSCALL /* old ssetmask syscall*/ SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */ @@ -112,8 +112,8 @@ SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */ NI_SYSCALL /* ioperm for i386 */ SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper) SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper) -SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper) -SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper) /* 105 */ +SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer) +SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */ SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper) SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper) SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper) @@ -122,7 +122,7 @@ SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 1 SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) NI_SYSCALL /* old "idle" system call */ NI_SYSCALL /* vm86old for i386 */ -SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper) +SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4) SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) SYSCALL(sys_s390_ipc,sys_s390_ipc,sys32_ipc_wrapper) @@ -134,7 +134,7 @@ SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper) NI_SYSCALL /* modify_ldt for i386 */ SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper) SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ -SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) +SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask) NI_SYSCALL /* old "create module" */ SYSCALL(sys_init_module,sys_init_module,sys_init_module_wrapper) SYSCALL(sys_delete_module,sys_delete_module,sys_delete_module_wrapper) @@ -169,7 +169,7 @@ SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_w SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield) SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper) SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */ -SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper) +SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper) SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper) SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */ @@ -182,19 +182,19 @@ SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old set SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn) -SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) -SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ -SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) -SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper) -SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper) -SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend_wrapper) +SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction) +SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */ +SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending) +SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait) +SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo) +SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend) SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */ SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper) SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */ SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ -SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack_wrapper) +SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack) SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) NI_SYSCALL /* streams1 */ NI_SYSCALL /* streams2 */ @@ -246,7 +246,7 @@ SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper) SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */ SYSCALL(sys_gettid,sys_gettid,sys_gettid) SYSCALL(sys_tkill,sys_tkill,sys_tkill_wrapper) -SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper) +SYSCALL(sys_futex,sys_futex,compat_sys_futex) SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper) SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */ SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill_wrapper) @@ -289,14 +289,14 @@ SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper) SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper) SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper) SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl_wrapper) /* 280 */ -SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper) +SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid) SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper) SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper) SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init) SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,sys_inotify_add_watch_wrapper) /* 285 */ SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,sys_inotify_rm_watch_wrapper) NI_SYSCALL /* 287 sys_migrate_pages */ -SYSCALL(sys_openat,sys_openat,compat_sys_openat_wrapper) +SYSCALL(sys_openat,sys_openat,compat_sys_openat) SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper) SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */ SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper) @@ -312,8 +312,8 @@ SYSCALL(sys_faccessat,sys_faccessat,sys_faccessat_wrapper) /* 300 */ SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6_wrapper) SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll_wrapper) SYSCALL(sys_unshare,sys_unshare,sys_unshare_wrapper) -SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list_wrapper) -SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapper) +SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list) +SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list) SYSCALL(sys_splice,sys_splice,sys_splice_wrapper) SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper) SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) @@ -328,8 +328,8 @@ SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) NI_SYSCALL /* 317 old sys_timer_fd */ SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper) SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper) -SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime_wrapper) /* 320 */ -SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime_wrapper) +SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ +SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime) SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4_wrapper) SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper) SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper) @@ -338,13 +338,13 @@ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) -SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ +SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper) SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper) SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper) SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */ -SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at_wrapper) +SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) @@ -352,3 +352,4 @@ SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wr SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper) SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper) +SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7fcd690..876546b 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return (get_clock_monotonic() * 125) >> 9; + return tod_to_ns(get_tod_clock_monotonic()); } /* @@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); do_div(nsecs, 125); S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); + /* Program the maximum value if we have an overflow (== year 2042) */ + if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) + S390_lowcore.clock_comparator = -1ULL; set_clock_comparator(S390_lowcore.clock_comparator); return 0; } @@ -168,7 +171,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { - kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++; + inc_irq_stat(IRQEXT_CLK); if (S390_lowcore.clock_comparator == -1ULL) set_clock_comparator(S390_lowcore.clock_comparator); } @@ -179,7 +182,7 @@ static void stp_timing_alert(struct stp_irq_parm *); static void timing_alert_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { - kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++; + inc_irq_stat(IRQEXT_TLA); if (param32 & 0x00c40000) etr_timing_alert((struct etr_irq_parm *) ¶m32); if (param32 & 0x00038000) @@ -191,7 +194,7 @@ static void stp_reset(void); void read_persistent_clock(struct timespec *ts) { - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); + tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); } void read_boot_clock(struct timespec *ts) @@ -201,7 +204,7 @@ void read_boot_clock(struct timespec *ts) static cycle_t read_tod_clock(struct clocksource *cs) { - return get_clock(); + return get_tod_clock(); } static struct clocksource clocksource_tod = { @@ -339,7 +342,7 @@ int get_sync_clock(unsigned long long *clock) sw_ptr = &get_cpu_var(clock_sync_word); sw0 = atomic_read(sw_ptr); - *clock = get_clock(); + *clock = get_tod_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(clock_sync_word); if (sw0 == sw1 && (sw0 & 0x80000000U)) @@ -483,7 +486,7 @@ static void etr_reset(void) .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, .es = 0, .sl = 0 }; if (etr_setr(&etr_eacr) == 0) { - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); if (etr_port0_online && etr_port1_online) set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); @@ -765,8 +768,8 @@ static int etr_sync_clock(void *data) __ctl_set_bit(14, 21); __ctl_set_bit(0, 29); clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; - old_clock = get_clock(); - if (set_clock(clock) == 0) { + old_clock = get_tod_clock(); + if (set_tod_clock(clock) == 0) { __udelay(1); /* Wait for the clock to start. */ __ctl_clear_bit(0, 29); __ctl_clear_bit(14, 21); @@ -842,7 +845,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p0 = etr_port0_online; if (!eacr.p0) eacr.e0 = 0; @@ -855,7 +858,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p1 = etr_port1_online; if (!eacr.p1) eacr.e1 = 0; @@ -971,7 +974,7 @@ static void etr_update_eacr(struct etr_eacr eacr) etr_eacr = eacr; etr_setr(&etr_eacr); if (dp_changed) - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); } /* @@ -1009,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work) /* Store aib to get the current ETR status word. */ BUG_ON(etr_stetr(&aib) != 0); etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ - now = get_clock(); + now = get_tod_clock(); /* * Update the port information if the last stepping port change @@ -1534,10 +1537,10 @@ static int stp_sync_clock(void *data) if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || stp_info.tmd != 2) { - old_clock = get_clock(); + old_clock = get_tod_clock(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); if (rc == 0) { - delta = adjust_time(old_clock, get_clock(), 0); + delta = adjust_time(old_clock, get_tod_clock(), 0); fixup_clock_comparator(delta); rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index f1aba87..4b2e3e3 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -10,6 +10,7 @@ #include <linux/bootmem.h> #include <linux/cpuset.h> #include <linux/device.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> @@ -42,6 +43,7 @@ static struct mask_info socket_info; static struct mask_info book_info; struct cpu_topology_s390 cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 79cb51a..35b13ed 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -75,6 +75,10 @@ SECTIONS EXIT_TEXT } + .exit.data : { + EXIT_DATA + } + /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e84b8b6..a0042ac 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk) * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ -void vtime_account(struct task_struct *tsk) +void vtime_account_irq_enter(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); u64 timer, system; @@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk) virt_timer_forward(system); } -EXPORT_SYMBOL_GPL(vtime_account); +EXPORT_SYMBOL_GPL(vtime_account_irq_enter); void vtime_account_system(struct task_struct *tsk) -__attribute__((alias("vtime_account"))); +__attribute__((alias("vtime_account_irq_enter"))); EXPORT_SYMBOL_GPL(vtime_account_system); void __kprobes vtime_stop_cpu(void) @@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu) unsigned int sequence; do { - now = get_clock(); + now = get_tod_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_exit = ACCESS_ONCE(idle->clock_idle_exit); diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index b58dd86..60f9f8a 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -18,7 +18,7 @@ if VIRTUALIZATION config KVM def_tristate y prompt "Kernel-based Virtual Machine (KVM) support" - depends on HAVE_KVM && EXPERIMENTAL + depends on HAVE_KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_CPU_RELAX_INTERCEPT diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2f6ccb0..37116a7 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -484,7 +484,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) } if ((!rc) && (vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) { + get_tod_clock() + vcpu->arch.sie_block->epoch)) { if ((!psw_extint_disabled(vcpu)) && (vcpu->arch.sie_block->gcr[0] & 0x800ul)) rc = 1; @@ -524,13 +524,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - now = get_clock() + vcpu->arch.sie_block->epoch; + now = get_tod_clock() + vcpu->arch.sie_block->epoch; if (vcpu->arch.sie_block->ckc < now) { __unset_cpu_idle(vcpu); return 0; } - sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); @@ -614,7 +614,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if ((vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) + get_tod_clock() + vcpu->arch.sie_block->epoch)) __try_deliver_ckc_interrupt(vcpu); if (atomic_read(&fi->active)) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4377d18..4cf35a0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -149,7 +149,7 @@ int kvm_dev_ioctl_check_extension(long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_S390_COW: - r = sclp_get_fac85() & 0x2; + r = MACHINE_HAS_ESOP; break; default: r = 0; @@ -618,7 +618,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; + preempt_disable(); kvm_guest_enter(); + preempt_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); trace_kvm_s390_sie_enter(vcpu, diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 42d0cf8..c61b9fa 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -32,7 +32,7 @@ static void __udelay_disabled(unsigned long long usecs) unsigned long cr0, cr6, new; u64 clock_saved, end; - end = get_clock() + (usecs << 12); + end = get_tod_clock() + (usecs << 12); clock_saved = local_tick_disable(); __ctl_store(cr0, 0, 0); __ctl_store(cr6, 6, 6); @@ -45,7 +45,7 @@ static void __udelay_disabled(unsigned long long usecs) set_clock_comparator(end); vtime_stop_cpu(); local_irq_disable(); - } while (get_clock() < end); + } while (get_tod_clock() < end); lockdep_on(); __ctl_load(cr0, 0, 0); __ctl_load(cr6, 6, 6); @@ -56,7 +56,7 @@ static void __udelay_enabled(unsigned long long usecs) { u64 clock_saved, end; - end = get_clock() + (usecs << 12); + end = get_tod_clock() + (usecs << 12); do { clock_saved = 0; if (end < S390_lowcore.clock_comparator) { @@ -67,7 +67,7 @@ static void __udelay_enabled(unsigned long long usecs) local_irq_disable(); if (clock_saved) local_tick_enable(clock_saved); - } while (get_clock() < end); + } while (get_tod_clock() < end); } /* @@ -111,8 +111,8 @@ void udelay_simple(unsigned long long usecs) { u64 end; - end = get_clock() + (usecs << 12); - while (get_clock() < end) + end = get_tod_clock() + (usecs << 12); + while (get_tod_clock() < end) cpu_relax(); } @@ -122,10 +122,10 @@ void __ndelay(unsigned long long nsecs) nsecs <<= 9; do_div(nsecs, 125); - end = get_clock() + nsecs; + end = get_tod_clock() + nsecs; if (nsecs & ~0xfffUL) __udelay(nsecs >> 12); - while (get_clock() < end) + while (get_tod_clock() < end) barrier(); } EXPORT_SYMBOL(__ndelay); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 9017a63..a70ee84 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -50,7 +50,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm, ptep = pte_offset_map(pmd, addr); if (!pte_present(*ptep)) return -0x11UL; - if (write && !pte_write(*ptep)) + if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) return -0x04UL; return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 42601d6..2fb9e63 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -569,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code, subcode = ext_code.subcode; if ((subcode & 0xff00) != __SUBCODE_MASK) return; - kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; + inc_irq_stat(IRQEXT_PFL); /* Get the token (= pid of the affected task). */ pid = sizeof(void *) == 4 ? param32 : param64; rcu_read_lock(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ae672f4..49ce6bb2 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -228,4 +228,16 @@ int arch_add_memory(int nid, u64 start, u64 size) vmem_remove_mapping(start, size); return rc; } + +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) +{ + /* + * There is no hardware or firmware interface which could trigger a + * hot memory remove on s390. So there is nothing that needs to be + * implemented. + */ + return -EBUSY; +} +#endif #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index c59a5ef..06bafec 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -101,12 +101,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) #else -int s390_mmap_check(unsigned long addr, unsigned long len) +int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { int rc; - if (!is_compat_task() && - len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { + if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) + return 0; + if (!(flags & MAP_FIXED)) + addr = 0; + if ((addr + len) >= TASK_SIZE) { rc = crst_table_upgrade(current->mm, 1UL << 53); if (rc) return rc; diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 29ccee3..d21040e 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -127,7 +127,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) pte_val(*pte) = _PAGE_TYPE_EMPTY; continue; } - *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); + pte_val(*pte) = __pa(address); } } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 6ed1426..e21aaf4 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -85,11 +85,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; - pte_t pte; int ret = -ENOMEM; while (address < end) { - pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -101,9 +99,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { - pte_val(pte) |= _REGION3_ENTRY_LARGE; - pte_val(pte) |= _REGION_ENTRY_TYPE_R3; - pud_val(*pu_dir) = pte_val(pte); + pud_val(*pu_dir) = __pa(address) | + _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | + (ro ? _REGION_ENTRY_RO : 0); address += PUD_SIZE; continue; } @@ -118,8 +116,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; - pmd_val(*pm_dir) = pte_val(pte); + pmd_val(*pm_dir) = __pa(address) | + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | + (ro ? _SEGMENT_ENTRY_RO : 0); address += PMD_SIZE; continue; } @@ -132,7 +131,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) } pt_dir = pte_offset_kernel(pm_dir, address); - *pt_dir = pte; + pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); address += PAGE_SIZE; } ret = 0; @@ -199,7 +198,6 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; - pte_t pte; int ret = -ENOMEM; start_addr = (unsigned long) start; @@ -237,9 +235,8 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) new_page = vmemmap_alloc_block(PMD_SIZE, node); if (!new_page) goto out; - pte = mk_pte_phys(__pa(new_page), PAGE_RW); - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; - pmd_val(*pm_dir) = pte_val(pte); + pmd_val(*pm_dir) = __pa(new_page) | + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE; address = (address + PMD_SIZE) & PMD_MASK; continue; } @@ -260,8 +257,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; - pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); - *pt_dir = pte; + pte_val(*pt_dir) = __pa(new_page); } address += PAGE_SIZE; } @@ -272,6 +268,10 @@ out: return ret; } +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{ +} + /* * Add memory segment to the segment list if it doesn't overlap with * an already present segment. diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index bb28441..0972e91 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -7,6 +7,7 @@ */ #include <linux/moduleloader.h> #include <linux/netdevice.h> +#include <linux/if_vlan.h> #include <linux/filter.h> #include <asm/cacheflush.h> #include <asm/processor.h> @@ -254,6 +255,8 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) case BPF_S_ANC_HATYPE: case BPF_S_ANC_RXHASH: case BPF_S_ANC_CPU: + case BPF_S_ANC_VLAN_TAG: + case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_S_RET_K: /* first instruction sets A register */ break; @@ -699,6 +702,24 @@ call_fn: /* lg %r1,<d(function)>(%r13) */ /* l %r5,<d(rxhash)>(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash)); break; + case BPF_S_ANC_VLAN_TAG: + case BPF_S_ANC_VLAN_TAG_PRESENT: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + /* lhi %r5,0 */ + EMIT4(0xa7580000); + /* icm %r5,3,<d(vlan_tci)>(%r2) */ + EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); + if (filter->code == BPF_S_ANC_VLAN_TAG) { + /* nill %r5,0xefff */ + EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); + } else { + /* nill %r5,0x1000 */ + EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT); + /* srl %r5,12 */ + EMIT4_DISP(0x88500000, 12); + } + break; case BPF_S_ANC_CPU: /* A = smp_processor_id() */ #ifdef CONFIG_SMP /* l %r5,<d(cpu_nr)> */ diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 0cb385d..b5b2916 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -233,7 +233,7 @@ static void hws_ext_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_SF_MASK)) return; - kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++; + inc_irq_stat(IRQEXT_CMS); atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); if (hws_wq) diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index ab0827b..f0f426a 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile @@ -3,4 +3,4 @@ # obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \ - pci_sysfs.o pci_event.o + pci_sysfs.o pci_event.o pci_debug.o diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 7ed38e5..27b4c17 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -51,8 +51,7 @@ EXPORT_SYMBOL_GPL(zpci_list); DEFINE_MUTEX(zpci_list_lock); EXPORT_SYMBOL_GPL(zpci_list_lock); -struct pci_hp_callback_ops hotplug_ops; -EXPORT_SYMBOL_GPL(hotplug_ops); +static struct pci_hp_callback_ops *hotplug_ops; static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); static DEFINE_SPINLOCK(zpci_domain_lock); @@ -98,6 +97,10 @@ EXPORT_SYMBOL_GPL(zpci_iomap_start); static int __read_mostly aisb_max; static struct kmem_cache *zdev_irq_cache; +static struct kmem_cache *zdev_fmb_cache; + +debug_info_t *pci_debug_msg_id; +debug_info_t *pci_debug_err_id; static inline int irq_to_msi_nr(unsigned int irq) { @@ -156,35 +159,6 @@ int pci_proc_domain(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pci_proc_domain); -/* Store PCI function information block */ -static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc) -{ - struct zpci_fib *fib; - u8 status, cc; - - fib = (void *) get_zeroed_page(GFP_KERNEL); - if (!fib) - return -ENOMEM; - - do { - cc = __stpcifc(zdev->fh, 0, fib, &status); - if (cc == 2) { - msleep(ZPCI_INSN_BUSY_DELAY); - memset(fib, 0, PAGE_SIZE); - } - } while (cc == 2); - - if (cc) - pr_err_once("%s: cc: %u status: %u\n", - __func__, cc, status); - - /* Return PCI function controls */ - *fc = fib->fc; - - free_page((unsigned long) fib); - return (cc) ? -EIO : 0; -} - /* Modify PCI: Register adapter interruptions */ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, u64 aibv) @@ -216,6 +190,7 @@ struct mod_pci_args { u64 base; u64 limit; u64 iota; + u64 fmb_addr; }; static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) @@ -232,6 +207,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args fib->pba = args->base; fib->pal = args->limit; fib->iota = args->iota; + fib->fmb_addr = args->fmb_addr; rc = mpcifc_instr(req, fib); free_page((unsigned long) fib); @@ -242,7 +218,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, u64 base, u64 limit, u64 iota) { - struct mod_pci_args args = { base, limit, iota }; + struct mod_pci_args args = { base, limit, iota, 0 }; WARN_ON_ONCE(iota & 0x3fff); args.iota |= ZPCI_IOTA_RTTO_FLAG; @@ -252,7 +228,7 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, /* Modify PCI: Unregister I/O address translation parameters */ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) { - struct mod_pci_args args = { 0, 0, 0 }; + struct mod_pci_args args = { 0, 0, 0, 0 }; return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); } @@ -260,11 +236,46 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) /* Modify PCI: Unregister adapter interruptions */ static int zpci_unregister_airq(struct zpci_dev *zdev) { - struct mod_pci_args args = { 0, 0, 0 }; + struct mod_pci_args args = { 0, 0, 0, 0 }; return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); } +/* Modify PCI: Set PCI function measurement parameters */ +int zpci_fmb_enable_device(struct zpci_dev *zdev) +{ + struct mod_pci_args args = { 0, 0, 0, 0 }; + + if (zdev->fmb) + return -EINVAL; + + zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); + if (!zdev->fmb) + return -ENOMEM; + memset(zdev->fmb, 0, sizeof(*zdev->fmb)); + WARN_ON((u64) zdev->fmb & 0xf); + + args.fmb_addr = virt_to_phys(zdev->fmb); + return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); +} + +/* Modify PCI: Disable PCI function measurement */ +int zpci_fmb_disable_device(struct zpci_dev *zdev) +{ + struct mod_pci_args args = { 0, 0, 0, 0 }; + int rc; + + if (!zdev->fmb) + return -EINVAL; + + /* Function measurement is disabled if fmb address is zero */ + rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args); + + kmem_cache_free(zdev_fmb_cache, zdev->fmb); + zdev->fmb = NULL; + return rc; +} + #define ZPCI_PCIAS_CFGSPC 15 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) @@ -344,7 +355,7 @@ unsigned int probe_irq_mask(unsigned long val) } EXPORT_SYMBOL_GPL(probe_irq_mask); -void __devinit pcibios_fixup_bus(struct pci_bus *bus) +void pcibios_fixup_bus(struct pci_bus *bus) { } @@ -428,7 +439,7 @@ static void zpci_irq_handler(void *dont, void *need) int rescan = 0, max = aisb_max; struct zdev_irq_map *imap; - kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++; + inc_irq_stat(IRQIO_PCI); sbit = start; scan: @@ -440,7 +451,7 @@ scan: /* find vector bit */ imap = bucket->imap[sbit]; for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { - kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++; + inc_irq_stat(IRQIO_MSI); clear_bit(63 - mbit, &imap->aibv); spin_lock(&imap->lock); @@ -633,6 +644,7 @@ static void zpci_remove_device(struct pci_dev *pdev) dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); zdev->state = ZPCI_FN_STATE_CONFIGURED; zpci_dma_exit_device(zdev); + zpci_fmb_disable_device(zdev); zpci_sysfs_remove_device(&pdev->dev); zpci_unmap_resources(pdev); list_del(&zdev->entry); /* can be called from init */ @@ -799,6 +811,16 @@ static void zpci_irq_exit(void) kfree(bucket); } +void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m) +{ + if (!zdev) + return; + + seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries)); + seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n", + get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb); +} + static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, unsigned long flags, int domain) { @@ -951,8 +973,8 @@ int zpci_create_device(struct zpci_dev *zdev) mutex_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); - if (hotplug_ops.create_slot) - hotplug_ops.create_slot(zdev); + if (hotplug_ops) + hotplug_ops->create_slot(zdev); mutex_unlock(&zpci_list_lock); if (zdev->state == ZPCI_FN_STATE_STANDBY) @@ -966,8 +988,8 @@ int zpci_create_device(struct zpci_dev *zdev) out_start: mutex_lock(&zpci_list_lock); list_del(&zdev->entry); - if (hotplug_ops.remove_slot) - hotplug_ops.remove_slot(zdev); + if (hotplug_ops) + hotplug_ops->remove_slot(zdev); mutex_unlock(&zpci_list_lock); out_bus: zpci_free_domain(zdev); @@ -994,6 +1016,8 @@ int zpci_scan_device(struct zpci_dev *zdev) goto out; } + zpci_debug_init_device(zdev); + zpci_fmb_enable_device(zdev); zpci_map_resources(zdev); pci_bus_add_devices(zdev->bus); @@ -1020,6 +1044,11 @@ static int zpci_mem_init(void) if (!zdev_irq_cache) goto error_zdev; + zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), + 16, 0, NULL); + if (!zdev_fmb_cache) + goto error_fmb; + /* TODO: use realloc */ zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), GFP_KERNEL); @@ -1028,6 +1057,8 @@ static int zpci_mem_init(void) return 0; error_iomap: + kmem_cache_destroy(zdev_fmb_cache); +error_fmb: kmem_cache_destroy(zdev_irq_cache); error_zdev: return -ENOMEM; @@ -1037,15 +1068,32 @@ static void zpci_mem_exit(void) { kfree(zpci_iomap_start); kmem_cache_destroy(zdev_irq_cache); + kmem_cache_destroy(zdev_fmb_cache); } -unsigned int pci_probe = 1; -EXPORT_SYMBOL_GPL(pci_probe); +void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) +{ + mutex_lock(&zpci_list_lock); + hotplug_ops = ops; + mutex_unlock(&zpci_list_lock); +} +EXPORT_SYMBOL_GPL(zpci_register_hp_ops); + +void zpci_deregister_hp_ops(void) +{ + mutex_lock(&zpci_list_lock); + hotplug_ops = NULL; + mutex_unlock(&zpci_list_lock); +} +EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); + +unsigned int s390_pci_probe = 1; +EXPORT_SYMBOL_GPL(s390_pci_probe); char * __init pcibios_setup(char *str) { if (!strcmp(str, "off")) { - pci_probe = 0; + s390_pci_probe = 0; return NULL; } return str; @@ -1055,7 +1103,7 @@ static int __init pci_base_init(void) { int rc; - if (!pci_probe) + if (!s390_pci_probe) return 0; if (!test_facility(2) || !test_facility(69) @@ -1066,6 +1114,10 @@ static int __init pci_base_init(void) test_facility(69), test_facility(70), test_facility(71)); + rc = zpci_debug_init(); + if (rc) + return rc; + rc = zpci_mem_init(); if (rc) goto out_mem; @@ -1098,6 +1150,7 @@ out_irq: out_hash: zpci_mem_exit(); out_mem: + zpci_debug_exit(); return rc; } subsys_initcall(pci_base_init); diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 7f4ce8d..f339fe2f 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -19,25 +19,25 @@ * Call Logical Processor * Retry logic is handled by the caller. */ -static inline u8 clp_instr(void *req) +static inline u8 clp_instr(void *data) { - u64 ilpm; + struct { u8 _[CLP_BLK_SIZE]; } *req = data; + u64 ignored; u8 cc; asm volatile ( - " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n" + " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n" " ipm %[cc]\n" " srl %[cc],28\n" - : [cc] "=d" (cc), [ilpm] "=d" (ilpm) + : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req) : [req] "a" (req) - : "cc", "memory"); + : "cc"); return cc; } static void *clp_alloc_block(void) { - struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); - return (page) ? page_address(page) : NULL; + return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); } static void clp_free_block(void *ptr) @@ -51,6 +51,7 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev, zdev->tlb_refresh = response->refresh; zdev->dma_mask = response->dasm; zdev->msi_addr = response->msia; + zdev->fmb_update = response->mui; pr_debug("Supported number of MSI vectors: %u\n", response->noi); switch (response->version) { diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c new file mode 100644 index 0000000..a303c95 --- /dev/null +++ b/arch/s390/pci/pci_debug.c @@ -0,0 +1,193 @@ +/* + * Copyright IBM Corp. 2012 + * + * Author(s): + * Jan Glauber <jang@linux.vnet.ibm.com> + */ + +#define COMPONENT "zPCI" +#define pr_fmt(fmt) COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/pci.h> +#include <asm/debug.h> + +#include <asm/pci_dma.h> + +static struct dentry *debugfs_root; + +static char *pci_perf_names[] = { + /* hardware counters */ + "Load operations", + "Store operations", + "Store block operations", + "Refresh operations", + "DMA read bytes", + "DMA write bytes", + /* software counters */ + "Allocated pages", + "Mapped pages", + "Unmapped pages", +}; + +static int pci_perf_show(struct seq_file *m, void *v) +{ + struct zpci_dev *zdev = m->private; + u64 *stat; + int i; + + if (!zdev) + return 0; + if (!zdev->fmb) + return seq_printf(m, "FMB statistics disabled\n"); + + /* header */ + seq_printf(m, "FMB @ %p\n", zdev->fmb); + seq_printf(m, "Update interval: %u ms\n", zdev->fmb_update); + seq_printf(m, "Samples: %u\n", zdev->fmb->samples); + seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update); + + /* hardware counters */ + stat = (u64 *) &zdev->fmb->ld_ops; + for (i = 0; i < 4; i++) + seq_printf(m, "%26s:\t%llu\n", + pci_perf_names[i], *(stat + i)); + if (zdev->fmb->dma_valid) + for (i = 4; i < 6; i++) + seq_printf(m, "%26s:\t%llu\n", + pci_perf_names[i], *(stat + i)); + /* software counters */ + for (i = 6; i < ARRAY_SIZE(pci_perf_names); i++) + seq_printf(m, "%26s:\t%llu\n", + pci_perf_names[i], + atomic64_read((atomic64_t *) (stat + i))); + + return 0; +} + +static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *off) +{ + struct zpci_dev *zdev = ((struct seq_file *) file->private_data)->private; + unsigned long val; + int rc; + + if (!zdev) + return 0; + + rc = kstrtoul_from_user(ubuf, count, 10, &val); + if (rc) + return rc; + + switch (val) { + case 0: + rc = zpci_fmb_disable_device(zdev); + if (rc) + return rc; + break; + case 1: + rc = zpci_fmb_enable_device(zdev); + if (rc) + return rc; + break; + } + return count; +} + +static int pci_perf_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, pci_perf_show, + filp->f_path.dentry->d_inode->i_private); +} + +static const struct file_operations debugfs_pci_perf_fops = { + .open = pci_perf_seq_open, + .read = seq_read, + .write = pci_perf_seq_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int pci_debug_show(struct seq_file *m, void *v) +{ + struct zpci_dev *zdev = m->private; + + zpci_debug_info(zdev, m); + return 0; +} + +static int pci_debug_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, pci_debug_show, + filp->f_path.dentry->d_inode->i_private); +} + +static const struct file_operations debugfs_pci_debug_fops = { + .open = pci_debug_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void zpci_debug_init_device(struct zpci_dev *zdev) +{ + zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev), + debugfs_root); + if (IS_ERR(zdev->debugfs_dev)) + zdev->debugfs_dev = NULL; + + zdev->debugfs_perf = debugfs_create_file("statistics", + S_IFREG | S_IRUGO | S_IWUSR, + zdev->debugfs_dev, zdev, + &debugfs_pci_perf_fops); + if (IS_ERR(zdev->debugfs_perf)) + zdev->debugfs_perf = NULL; + + zdev->debugfs_debug = debugfs_create_file("debug", + S_IFREG | S_IRUGO | S_IWUSR, + zdev->debugfs_dev, zdev, + &debugfs_pci_debug_fops); + if (IS_ERR(zdev->debugfs_debug)) + zdev->debugfs_debug = NULL; +} + +void zpci_debug_exit_device(struct zpci_dev *zdev) +{ + debugfs_remove(zdev->debugfs_perf); + debugfs_remove(zdev->debugfs_debug); + debugfs_remove(zdev->debugfs_dev); +} + +int __init zpci_debug_init(void) +{ + /* event trace buffer */ + pci_debug_msg_id = debug_register("pci_msg", 16, 1, 16 * sizeof(long)); + if (!pci_debug_msg_id) + return -EINVAL; + debug_register_view(pci_debug_msg_id, &debug_sprintf_view); + debug_set_level(pci_debug_msg_id, 3); + zpci_dbg("Debug view initialized\n"); + + /* error log */ + pci_debug_err_id = debug_register("pci_error", 2, 1, 16); + if (!pci_debug_err_id) + return -EINVAL; + debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); + debug_set_level(pci_debug_err_id, 6); + zpci_err("Debug view initialized\n"); + + debugfs_root = debugfs_create_dir("pci", NULL); + return 0; +} + +void zpci_debug_exit(void) +{ + if (pci_debug_msg_id) + debug_unregister(pci_debug_msg_id); + if (pci_debug_err_id) + debug_unregister(pci_debug_err_id); + + debugfs_remove(debugfs_root); +} diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index c64b4b2..a547419 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -13,8 +13,6 @@ #include <linux/pci.h> #include <asm/pci_dma.h> -static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO; - static struct kmem_cache *dma_region_table_cache; static struct kmem_cache *dma_page_table_cache; @@ -291,8 +289,10 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, if (direction == DMA_NONE || direction == DMA_TO_DEVICE) flags |= ZPCI_TABLE_PROTECTED; - if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) + if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { + atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); return dma_addr + offset; + } out_free: dma_free_iommu(zdev, iommu_page_index, nr_pages); @@ -315,6 +315,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr); + atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; dma_free_iommu(zdev, iommu_page_index, npages); } @@ -323,6 +324,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { + struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev)); struct page *page; unsigned long pa; dma_addr_t map; @@ -331,6 +333,8 @@ static void *s390_dma_alloc(struct device *dev, size_t size, page = alloc_pages(flag, get_order(size)); if (!page) return NULL; + + atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); pa = page_to_phys(page); memset((void *) pa, 0, size); diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index dbed8cd..ec62e3a 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -45,6 +45,8 @@ static void zpci_event_log_err(struct zpci_ccdf_err *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + zpci_err("SEI error CCD:\n"); + zpci_err_hex(ccdf, sizeof(*ccdf)); dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec); } |