From dd17c8f72993f9461e9c19250e3f155d6d99df22 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 29 Oct 2009 22:34:15 +0900 Subject: percpu: remove per_cpu__ prefix. Now that the return from alloc_percpu is compatible with the address of per-cpu vars, it makes sense to hand around the address of per-cpu variables. To make this sane, we remove the per_cpu__ prefix we used created to stop people accidentally using these vars directly. Now we have sparse, we can use that (next patch). tj: * Updated to convert stuff which were missed by or added after the original patch. * Kill per_cpu_var() macro. Signed-off-by: Rusty Russell Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter --- arch/ia64/include/asm/percpu.h | 4 ++-- arch/ia64/kernel/ia64_ksyms.c | 4 ++-- arch/ia64/mm/discontig.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 30cf465..f7c00a5 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -9,7 +9,7 @@ #define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE #ifdef __ASSEMBLY__ -# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */ +# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */ #else /* !__ASSEMBLY__ */ @@ -39,7 +39,7 @@ extern void *per_cpu_init(void); * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly * more efficient. */ -#define __ia64_per_cpu_var(var) per_cpu__##var +#define __ia64_per_cpu_var(var) var #include diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 461b999..7f4a0ed 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic #endif #include -EXPORT_SYMBOL(per_cpu__ia64_cpu_info); +EXPORT_SYMBOL(ia64_cpu_info); #ifdef CONFIG_SMP -EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); +EXPORT_SYMBOL(local_per_cpu_offset); #endif #include diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 19c4b21..8d586d1 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void) cpu = 0; node = node_cpuid[cpu].nid; cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + - ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); + ((char *)&ia64_cpu_info - __per_cpu_start)); cpu0_cpu_info->node_data = mem_data[node].node_data; } #endif /* CONFIG_SMP */ -- cgit v1.1 From 3a4d5c94e959359ece6d6b55045c3f046677f55c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 14 Jan 2010 06:17:27 +0000 Subject: vhost_net: a kernel-level virtio server What it is: vhost net is a character device that can be used to reduce the number of system calls involved in virtio networking. Existing virtio net code is used in the guest without modification. There's similarity with vringfd, with some differences and reduced scope - uses eventfd for signalling - structures can be moved around in memory at any time (good for migration, bug work-arounds in userspace) - write logging is supported (good for migration) - support memory table and not just an offset (needed for kvm) common virtio related code has been put in a separate file vhost.c and can be made into a separate module if/when more backends appear. I used Rusty's lguest.c as the source for developing this part : this supplied me with witty comments I wouldn't be able to write myself. What it is not: vhost net is not a bus, and not a generic new system call. No assumptions are made on how guest performs hypercalls. Userspace hypervisors are supported as well as kvm. How it works: Basically, we connect virtio frontend (configured by userspace) to a backend. The backend could be a network device, or a tap device. Backend is also configured by userspace, including vlan/mac etc. Status: This works for me, and I haven't see any crashes. Compared to userspace, people reported improved latency (as I save up to 4 system calls per packet), as well as better bandwidth and CPU utilization. Features that I plan to look at in the future: - mergeable buffers - zero copy - scalability tuning: figure out the best threading model to use Note on RCU usage (this is also documented in vhost.h, near private_pointer which is the value protected by this variant of RCU): what is happening is that the rcu_dereference() is being used in a workqueue item. The role of rcu_read_lock() is taken on by the start of execution of the workqueue item, of rcu_read_unlock() by the end of execution of the workqueue item, and of synchronize_rcu() by flush_workqueue()/flush_work(). In the future we might need to apply some gcc attribute or sparse annotation to the function passed to INIT_WORK(). Paul's ack below is for this RCU usage. (Includes fixes by Alan Cox , David L Stevens , Chris Wright ) Acked-by: Rusty Russell Acked-by: Arnd Bergmann Acked-by: "Paul E. McKenney" Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- arch/ia64/kvm/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index ef3e7be..01c7579 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -47,6 +47,7 @@ config KVM_INTEL Provides support for KVM on Itanium 2 processors equipped with the VT extensions. +source drivers/vhost/Kconfig source drivers/virtio/Kconfig endif # VIRTUALIZATION -- cgit v1.1 From 439913fffd39374c3737186b22d2d56c3a0ae526 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Thu, 28 Jan 2010 10:53:19 +0800 Subject: ACPI: replace acpi_integer by u64 acpi_integer is now obsolete and removed from the ACPICA code base, replaced by u64. Signed-off-by: Lin Ming Signed-off-by: Len Brown --- arch/ia64/hp/common/aml_nfw.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c index 4abd2c79..2207848 100644 --- a/arch/ia64/hp/common/aml_nfw.c +++ b/arch/ia64/hp/common/aml_nfw.c @@ -77,7 +77,7 @@ static void aml_nfw_execute(struct ia64_nfw_context *c) c->arg[4], c->arg[5], c->arg[6], c->arg[7]); } -static void aml_nfw_read_arg(u8 *offset, u32 bit_width, acpi_integer *value) +static void aml_nfw_read_arg(u8 *offset, u32 bit_width, u64 *value) { switch (bit_width) { case 8: @@ -95,7 +95,7 @@ static void aml_nfw_read_arg(u8 *offset, u32 bit_width, acpi_integer *value) } } -static void aml_nfw_write_arg(u8 *offset, u32 bit_width, acpi_integer *value) +static void aml_nfw_write_arg(u8 *offset, u32 bit_width, u64 *value) { switch (bit_width) { case 8: @@ -114,7 +114,7 @@ static void aml_nfw_write_arg(u8 *offset, u32 bit_width, acpi_integer *value) } static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address, - u32 bit_width, acpi_integer *value, void *handler_context, + u32 bit_width, u64 *value, void *handler_context, void *region_context) { struct ia64_nfw_context *context = handler_context; -- cgit v1.1 From 615d0ebbc782b67296e3226c293f520f93f93515 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 2 Feb 2010 16:49:04 -0500 Subject: kprobes: Disable booster when CONFIG_PREEMPT=y Disable kprobe booster when CONFIG_PREEMPT=y at this time, because it can't ensure that all kernel threads preempted on kprobe's boosted slot run out from the slot even using freeze_processes(). The booster on preemptive kernel will be resumed if synchronize_tasks() or something like that is introduced. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Ananth N Mavinakayanahalli Cc: Frederic Weisbecker Cc: Jim Keniston Cc: Mathieu Desnoyers Cc: Steven Rostedt LKML-Reference: <20100202214904.4694.24330.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/ia64/kernel/kprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 9adac44..7026b29 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; ss_probe: -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) +#if !defined(CONFIG_PREEMPT) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; -- cgit v1.1 From 17622339af2536b32cf29699ddd4ba0fe79a61d5 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 2 Feb 2010 14:41:39 -0800 Subject: clocksource: add argument to resume callback Pass the clocksource as an argument to the clocksource resume callback. Needed so we can point out which CMT channel the sh_cmt.c driver shall resume. Signed-off-by: Magnus Damm Cc: john stultz Cc: Paul Mundt Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- arch/ia64/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index a35c661..47a1927 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -61,7 +61,7 @@ unsigned long long sched_clock(void) #ifdef CONFIG_PARAVIRT static void -paravirt_clocksource_resume(void) +paravirt_clocksource_resume(struct clocksource *cs) { if (pv_time_ops.clocksource_resume) pv_time_ops.clocksource_resume(); -- cgit v1.1 From 32974ad4907cdde6c9de612cd1b2ee0568fb9409 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 8 Feb 2010 10:42:17 -0800 Subject: [IA64] Remove COMPAT_IA32 support This has been broken since May 2008 when Al Viro killed altroot support. Since nobody has complained, it would appear that there are no users of this code (A plausible theory since the main OSVs that support ia64 prefer to use the IA32-EL software emulation). Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 17 - arch/ia64/Makefile | 1 - arch/ia64/configs/bigsur_defconfig | 2 - arch/ia64/configs/generic_defconfig | 2 - arch/ia64/configs/gensparse_defconfig | 2 - arch/ia64/configs/sim_defconfig | 2 - arch/ia64/configs/tiger_defconfig | 1 - arch/ia64/configs/xen_domu_defconfig | 2 - arch/ia64/configs/zx1_defconfig | 2 - arch/ia64/ia32/Makefile | 11 - arch/ia64/ia32/audit.c | 42 - arch/ia64/ia32/binfmt_elf32.c | 245 --- arch/ia64/ia32/elfcore32.h | 148 -- arch/ia64/ia32/ia32_entry.S | 468 ------ arch/ia64/ia32/ia32_ldt.c | 146 -- arch/ia64/ia32/ia32_signal.c | 1010 ------------ arch/ia64/ia32/ia32_support.c | 253 --- arch/ia64/ia32/ia32_traps.c | 156 -- arch/ia64/ia32/ia32priv.h | 532 ------- arch/ia64/ia32/sys_ia32.c | 2765 --------------------------------- arch/ia64/include/asm/ia32.h | 40 - arch/ia64/include/asm/processor.h | 46 - arch/ia64/include/asm/syscall.h | 81 - arch/ia64/include/asm/system.h | 11 +- arch/ia64/include/asm/unistd.h | 14 - arch/ia64/kernel/audit.c | 21 - arch/ia64/kernel/entry.S | 39 - arch/ia64/kernel/ivt.S | 114 -- arch/ia64/kernel/process.c | 55 +- arch/ia64/kernel/ptrace.c | 14 +- arch/ia64/kernel/setup.c | 5 - arch/ia64/kernel/signal.c | 54 +- arch/ia64/kernel/smpboot.c | 5 - arch/ia64/kernel/traps.c | 9 - arch/ia64/mm/init.c | 5 - arch/ia64/xen/hypercall.S | 5 - arch/ia64/xen/xen_pv_ops.c | 16 - 37 files changed, 20 insertions(+), 6321 deletions(-) delete mode 100644 arch/ia64/ia32/Makefile delete mode 100644 arch/ia64/ia32/audit.c delete mode 100644 arch/ia64/ia32/binfmt_elf32.c delete mode 100644 arch/ia64/ia32/elfcore32.h delete mode 100644 arch/ia64/ia32/ia32_entry.S delete mode 100644 arch/ia64/ia32/ia32_ldt.c delete mode 100644 arch/ia64/ia32/ia32_signal.c delete mode 100644 arch/ia64/ia32/ia32_support.c delete mode 100644 arch/ia64/ia32/ia32_traps.c delete mode 100644 arch/ia64/ia32/ia32priv.h delete mode 100644 arch/ia64/ia32/sys_ia32.c delete mode 100644 arch/ia64/include/asm/ia32.h (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 2d7f56a..9a50d7d 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -499,23 +499,6 @@ config ARCH_PROC_KCORE_TEXT def_bool y depends on PROC_KCORE -config IA32_SUPPORT - bool "Support for Linux/x86 binaries" - help - IA-64 processors can execute IA-32 (X86) instructions. By - saying Y here, the kernel will include IA-32 system call - emulation support which makes it possible to transparently - run IA-32 Linux binaries on an IA-64 Linux system. - If in doubt, say Y. - -config COMPAT - bool - depends on IA32_SUPPORT - default y - -config COMPAT_FOR_U64_ALIGNMENT - def_bool COMPAT - config IA64_MCA_RECOVERY tristate "MCA recovery from errors other than TLB." diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 475e272..8ae0d26 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o libs-y += arch/ia64/lib/ core-y += arch/ia64/kernel/ arch/ia64/mm/ -core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index ace4109..312b120 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y # CONFIG_VIRTUAL_MEM_MAP is not set -CONFIG_IA32_SUPPORT=y -CONFIG_COMPAT=y # CONFIG_IA64_MCA_RECOVERY is not set CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 7564549..6a4cc50 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y CONFIG_HOLES_IN_ZONE=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y -CONFIG_IA32_SUPPORT=y -CONFIG_COMPAT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_IA64_MCA_RECOVERY=y CONFIG_PERFMON=y diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index e86fbd3..2dc185b 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y CONFIG_NUMA=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y -CONFIG_IA32_SUPPORT=y -CONFIG_COMPAT=y CONFIG_IA64_MCA_RECOVERY=y CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig index 546a772..21a23cd 100644 --- a/arch/ia64/configs/sim_defconfig +++ b/arch/ia64/configs/sim_defconfig @@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y # CONFIG_VIRTUAL_MEM_MAP is not set -CONFIG_IA32_SUPPORT=y -CONFIG_COMPAT=y # CONFIG_IA64_MCA_RECOVERY is not set # CONFIG_PERFMON is not set CONFIG_IA64_PALINFO=m diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index c522edf..c5a5ea9 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig @@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_VIRTUAL_MEM_MAP=y CONFIG_HOLES_IN_ZONE=y -# CONFIG_IA32_SUPPORT is not set CONFIG_IA64_MCA_RECOVERY=y CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig index 0bb0714..c67eafc 100644 --- a/arch/ia64/configs/xen_domu_defconfig +++ b/arch/ia64/configs/xen_domu_defconfig @@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_VIRTUAL_MEM_MAP=y CONFIG_HOLES_IN_ZONE=y -# CONFIG_IA32_SUPPORT is not set -# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set CONFIG_IA64_MCA_RECOVERY=y CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 514f063..3cec65b 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_VIRTUAL_MEM_MAP=y CONFIG_HOLES_IN_ZONE=y -CONFIG_IA32_SUPPORT=y -CONFIG_COMPAT=y CONFIG_IA64_MCA_RECOVERY=y CONFIG_PERFMON=y CONFIG_IA64_PALINFO=y diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile deleted file mode 100644 index baad8c7..0000000 --- a/arch/ia64/ia32/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# -# Makefile for the ia32 kernel emulation subsystem. -# - -obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \ - ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o -obj-$(CONFIG_AUDIT) += audit.o - -# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and -# restore_ia32_fpstate_live() can be sure the live register contain user-level state. -CFLAGS_ia32_signal.o += -mfixed-range=f16-f31 diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c deleted file mode 100644 index 5c93ddd..0000000 --- a/arch/ia64/ia32/audit.c +++ /dev/null @@ -1,42 +0,0 @@ -#include "../../x86/include/asm/unistd_32.h" - -unsigned ia32_dir_class[] = { -#include -~0U -}; - -unsigned ia32_chattr_class[] = { -#include -~0U -}; - -unsigned ia32_write_class[] = { -#include -~0U -}; - -unsigned ia32_read_class[] = { -#include -~0U -}; - -unsigned ia32_signal_class[] = { -#include -~0U -}; - -int ia32_classify_syscall(unsigned syscall) -{ - switch(syscall) { - case __NR_open: - return 2; - case __NR_openat: - return 3; - case __NR_socketcall: - return 4; - case __NR_execve: - return 5; - default: - return 1; - } -} diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c deleted file mode 100644 index c69552b..0000000 --- a/arch/ia64/ia32/binfmt_elf32.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * IA-32 ELF support. - * - * Copyright (C) 1999 Arun Sharma - * Copyright (C) 2001 Hewlett-Packard Co - * David Mosberger-Tang - * - * 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state - * 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed - * 09/14/01 D. Mosberger fixed memory management for gdt/tss page - */ - -#include -#include -#include - -#include -#include - -#include "ia32priv.h" -#include "elfcore32.h" - -/* Override some function names */ -#undef start_thread -#define start_thread ia32_start_thread -#define elf_format elf32_format -#define init_elf_binfmt init_elf32_binfmt -#define exit_elf_binfmt exit_elf32_binfmt - -#undef CLOCKS_PER_SEC -#define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC - -extern void ia64_elf32_init (struct pt_regs *regs); - -static void elf32_set_personality (void); - -static unsigned long __attribute ((unused)) -randomize_stack_top(unsigned long stack_top); - -#define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec) -#define elf_map elf32_map - -#undef SET_PERSONALITY -#define SET_PERSONALITY(ex) elf32_set_personality() - -#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack)) - -/* Ugly but avoids duplication */ -#include "../../../fs/binfmt_elf.c" - -extern struct page *ia32_shared_page[]; -extern unsigned long *ia32_gdt; -extern struct page *ia32_gate_page; - -int -ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf) -{ - vmf->page = ia32_shared_page[smp_processor_id()]; - get_page(vmf->page); - return 0; -} - -int -ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf) -{ - vmf->page = ia32_gate_page; - get_page(vmf->page); - return 0; -} - - -static const struct vm_operations_struct ia32_shared_page_vm_ops = { - .fault = ia32_install_shared_page -}; - -static const struct vm_operations_struct ia32_gate_page_vm_ops = { - .fault = ia32_install_gate_page -}; - -void -ia64_elf32_init (struct pt_regs *regs) -{ - struct vm_area_struct *vma; - - /* - * Map GDT below 4GB, where the processor can find it. We need to map - * it with privilege level 3 because the IVE uses non-privileged accesses to these - * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (vma) { - vma->vm_mm = current->mm; - vma->vm_start = IA32_GDT_OFFSET; - vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_page_prot = PAGE_SHARED; - vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED; - vma->vm_ops = &ia32_shared_page_vm_ops; - down_write(¤t->mm->mmap_sem); - { - if (insert_vm_struct(current->mm, vma)) { - kmem_cache_free(vm_area_cachep, vma); - up_write(¤t->mm->mmap_sem); - BUG(); - } - } - up_write(¤t->mm->mmap_sem); - } - - /* - * When user stack is not executable, push sigreturn code to stack makes - * segmentation fault raised when returning to kernel. So now sigreturn - * code is locked in specific gate page, which is pointed by pretcode - * when setup_frame_ia32 - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (vma) { - vma->vm_mm = current->mm; - vma->vm_start = IA32_GATE_OFFSET; - vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_page_prot = PAGE_COPY_EXEC; - vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC - | VM_MAYEXEC | VM_RESERVED; - vma->vm_ops = &ia32_gate_page_vm_ops; - down_write(¤t->mm->mmap_sem); - { - if (insert_vm_struct(current->mm, vma)) { - kmem_cache_free(vm_area_cachep, vma); - up_write(¤t->mm->mmap_sem); - BUG(); - } - } - up_write(¤t->mm->mmap_sem); - } - - /* - * Install LDT as anonymous memory. This gives us all-zero segment descriptors - * until a task modifies them via modify_ldt(). - */ - vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); - if (vma) { - vma->vm_mm = current->mm; - vma->vm_start = IA32_LDT_OFFSET; - vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); - vma->vm_page_prot = PAGE_SHARED; - vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE; - down_write(¤t->mm->mmap_sem); - { - if (insert_vm_struct(current->mm, vma)) { - kmem_cache_free(vm_area_cachep, vma); - up_write(¤t->mm->mmap_sem); - BUG(); - } - } - up_write(¤t->mm->mmap_sem); - } - - ia64_psr(regs)->ac = 0; /* turn off alignment checking */ - regs->loadrs = 0; - /* - * According to the ABI %edx points to an `atexit' handler. Since we don't have - * one we'll set it to 0 and initialize all the other registers just to make - * things more deterministic, ala the i386 implementation. - */ - regs->r8 = 0; /* %eax */ - regs->r11 = 0; /* %ebx */ - regs->r9 = 0; /* %ecx */ - regs->r10 = 0; /* %edx */ - regs->r13 = 0; /* %ebp */ - regs->r14 = 0; /* %esi */ - regs->r15 = 0; /* %edi */ - - current->thread.eflag = IA32_EFLAG; - current->thread.fsr = IA32_FSR_DEFAULT; - current->thread.fcr = IA32_FCR_DEFAULT; - current->thread.fir = 0; - current->thread.fdr = 0; - - /* - * Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor - * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32 - * architecture manual. Also note that the only fields that are not ignored are - * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0). - */ - regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1, - 0, 0, 0, 1, 0, 0, 0)); - /* Setup the segment selectors */ - regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */ - regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */ - - ia32_load_segment_descriptors(current); - ia32_load_state(current); -} - -/* - * Undo the override of setup_arg_pages() without this ia32_setup_arg_pages() - * will suffer infinite self recursion. - */ -#undef setup_arg_pages - -int -ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) -{ - int ret; - - ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack); - if (!ret) { - /* - * Can't do it in ia64_elf32_init(). Needs to be done before - * calls to elf32_map() - */ - current->thread.ppl = ia32_init_pp_list(); - } - - return ret; -} - -static void -elf32_set_personality (void) -{ - set_personality(PER_LINUX32); - current->thread.map_base = IA32_PAGE_OFFSET/3; -} - -static unsigned long -elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt, - int prot, int type, unsigned long unused) -{ - unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; - - return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type, - eppnt->p_offset - pgoff); -} - -#define cpu_uses_ia32el() (local_cpu_data->family > 0x1f) - -static int __init check_elf32_binfmt(void) -{ - if (cpu_uses_ia32el()) { - printk("Please use IA-32 EL for executing IA-32 binaries\n"); - unregister_binfmt(&elf_format); - } - return 0; -} - -module_init(check_elf32_binfmt) diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h deleted file mode 100644 index 6577257..0000000 --- a/arch/ia64/ia32/elfcore32.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * IA-32 ELF core dump support. - * - * Copyright (C) 2003 Arun Sharma - * - * Derived from the x86_64 version - */ -#ifndef _ELFCORE32_H_ -#define _ELFCORE32_H_ - -#include -#include - -/* Override elfcore.h */ -#define _LINUX_ELFCORE_H 1 -typedef unsigned int elf_greg_t; - -#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; - -typedef struct ia32_user_i387_struct elf_fpregset_t; -typedef struct ia32_user_fxsr_struct elf_fpxregset_t; - -struct elf_siginfo -{ - int si_signo; /* signal number */ - int si_code; /* extra code */ - int si_errno; /* errno */ -}; - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -/* - * Hacks are here since types between compat_timeval (= pair of s32) and - * ia64-native timeval (= pair of s64) are not compatible, at least a file - * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on - * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval(). - */ -#define cputime_to_timeval(a,b) \ - do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0) -#else -#define jiffies_to_timeval(a,b) \ - do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0) -#endif - -struct elf_prstatus -{ - struct elf_siginfo pr_info; /* Info associated with signal */ - short pr_cursig; /* Current signal */ - unsigned int pr_sigpend; /* Set of pending signals */ - unsigned int pr_sighold; /* Set of held signals */ - pid_t pr_pid; - pid_t pr_ppid; - pid_t pr_pgrp; - pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime; /* Cumulative user time */ - struct compat_timeval pr_cstime; /* Cumulative system time */ - elf_gregset_t pr_reg; /* GP registers */ - int pr_fpvalid; /* True if math co-processor being used. */ -}; - -#define ELF_PRARGSZ (80) /* Number of chars for args */ - -struct elf_prpsinfo -{ - char pr_state; /* numeric process state */ - char pr_sname; /* char for pr_state */ - char pr_zomb; /* zombie */ - char pr_nice; /* nice val */ - unsigned int pr_flag; /* flags */ - __u16 pr_uid; - __u16 pr_gid; - pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; - /* Lots missing */ - char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ -}; - -#define ELF_CORE_COPY_REGS(pr_reg, regs) \ - pr_reg[0] = regs->r11; \ - pr_reg[1] = regs->r9; \ - pr_reg[2] = regs->r10; \ - pr_reg[3] = regs->r14; \ - pr_reg[4] = regs->r15; \ - pr_reg[5] = regs->r13; \ - pr_reg[6] = regs->r8; \ - pr_reg[7] = regs->r16 & 0xffff; \ - pr_reg[8] = (regs->r16 >> 16) & 0xffff; \ - pr_reg[9] = (regs->r16 >> 32) & 0xffff; \ - pr_reg[10] = (regs->r16 >> 48) & 0xffff; \ - pr_reg[11] = regs->r1; \ - pr_reg[12] = regs->cr_iip; \ - pr_reg[13] = regs->r17 & 0xffff; \ - pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \ - pr_reg[15] = regs->r12; \ - pr_reg[16] = (regs->r17 >> 16) & 0xffff; - -static inline void elf_core_copy_regs(elf_gregset_t *elfregs, - struct pt_regs *regs) -{ - ELF_CORE_COPY_REGS((*elfregs), regs) -} - -static inline int elf_core_copy_task_regs(struct task_struct *t, - elf_gregset_t* elfregs) -{ - ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t)); - return 1; -} - -static inline int -elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu) -{ - struct ia32_user_i387_struct *fpstate = (void*)fpu; - mm_segment_t old_fs; - - if (!tsk_used_math(tsk)) - return 0; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate); - set_fs(old_fs); - - return 1; -} - -#define ELF_CORE_COPY_XFPREGS 1 -#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG -static inline int -elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu) -{ - struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu; - mm_segment_t old_fs; - - if (!tsk_used_math(tsk)) - return 0; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate); - set_fs(old_fs); - - return 1; -} - -#endif /* _ELFCORE32_H_ */ diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S deleted file mode 100644 index 2fd7479..0000000 --- a/arch/ia64/ia32/ia32_entry.S +++ /dev/null @@ -1,468 +0,0 @@ -#include -#include -#include -#include -#include - -#include "../kernel/minstate.h" - - /* - * execve() is special because in case of success, we need to - * setup a null register window frame (in case an IA-32 process - * is exec'ing an IA-64 program). - */ -ENTRY(ia32_execve) - .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3) - alloc loc1=ar.pfs,3,2,4,0 - mov loc0=rp - .body - zxt4 out0=in0 // filename - ;; // stop bit between alloc and call - zxt4 out1=in1 // argv - zxt4 out2=in2 // envp - add out3=16,sp // regs - br.call.sptk.few rp=sys32_execve -1: cmp.ge p6,p0=r8,r0 - mov ar.pfs=loc1 // restore ar.pfs - ;; -(p6) mov ar.pfs=r0 // clear ar.pfs in case of success - sxt4 r8=r8 // return 64-bit result - mov rp=loc0 - br.ret.sptk.few rp -END(ia32_execve) - -ENTRY(ia32_clone) - .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) - alloc r16=ar.pfs,5,2,6,0 - DO_SAVE_SWITCH_STACK - mov loc0=rp - mov loc1=r16 // save ar.pfs across do_fork - .body - zxt4 out1=in1 // newsp - mov out3=16 // stacksize (compensates for 16-byte scratch area) - adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s - mov out0=in0 // out0 = clone_flags - zxt4 out4=in2 // out4 = parent_tidptr - zxt4 out5=in4 // out5 = child_tidptr - br.call.sptk.many rp=do_fork -.ret0: .restore sp - adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack - mov ar.pfs=loc1 - mov rp=loc0 - br.ret.sptk.many rp -END(ia32_clone) - -GLOBAL_ENTRY(ia32_ret_from_clone) - PT_REGS_UNWIND_INFO(0) -{ /* - * Some versions of gas generate bad unwind info if the first instruction of a - * procedure doesn't go into the first slot of a bundle. This is a workaround. - */ - nop.m 0 - nop.i 0 - /* - * We need to call schedule_tail() to complete the scheduling process. - * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the - * address of the previously executing task. - */ - br.call.sptk.many rp=ia64_invoke_schedule_tail -} -.ret1: - adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 - ;; - ld4 r2=[r2] - ;; - mov r8=0 - and r2=_TIF_SYSCALL_TRACEAUDIT,r2 - ;; - cmp.ne p6,p0=r2,r0 -(p6) br.cond.spnt .ia32_strace_check_retval - ;; // prevent RAW on r8 -END(ia32_ret_from_clone) - // fall through -GLOBAL_ENTRY(ia32_ret_from_syscall) - PT_REGS_UNWIND_INFO(0) - - cmp.ge p6,p7=r8,r0 // syscall executed successfully? - adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 - ;; - alloc r3=ar.pfs,0,0,0,0 // drop the syscall argument frame - st8 [r2]=r8 // store return value in slot for r8 - br.cond.sptk.many ia64_leave_kernel -END(ia32_ret_from_syscall) - - // - // Invoke a system call, but do some tracing before and after the call. - // We MUST preserve the current register frame throughout this routine - // because some system calls (such as ia64_execve) directly - // manipulate ar.pfs. - // - // Input: - // r8 = syscall number - // b6 = syscall entry point - // -GLOBAL_ENTRY(ia32_trace_syscall) - PT_REGS_UNWIND_INFO(0) - mov r3=-38 - adds r2=IA64_PT_REGS_R8_OFFSET+16,sp - ;; - st8 [r2]=r3 // initialize return code to -ENOSYS - br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args - cmp.lt p6,p0=r8,r0 // check tracehook - adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 - ;; -(p6) st8.spill [r2]=r8 // store return value in slot for r8 -(p6) br.spnt.few .ret4 -.ret2: // Need to reload arguments (they may be changed by the tracing process) - adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1 - adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 - mov r15=IA32_NR_syscalls - ;; - ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET - movl r16=ia32_syscall_table - ;; - ld4 r33=[r2],8 // r9 == ecx - ld4 r37=[r3],16 // r13 == ebp - cmp.ltu.unc p6,p7=r8,r15 - ;; - ld4 r34=[r2],8 // r10 == edx - ld4 r36=[r3],8 // r15 == edi -(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number - ;; - ld8 r16=[r16] - ;; - ld4 r32=[r2],8 // r11 == ebx - mov b6=r16 - ld4 r35=[r3],8 // r14 == esi - br.call.sptk.few rp=b6 // do the syscall -.ia32_strace_check_retval: - cmp.lt p6,p0=r8,r0 // syscall failed? - adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 - ;; - st8.spill [r2]=r8 // store return value in slot for r8 - br.call.sptk.few rp=syscall_trace_leave // give parent a chance to catch return value -.ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame - br.cond.sptk.many ia64_leave_kernel -END(ia32_trace_syscall) - -GLOBAL_ENTRY(sys32_vfork) - alloc r16=ar.pfs,2,2,4,0;; - mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags - br.cond.sptk.few .fork1 // do the work -END(sys32_vfork) - -GLOBAL_ENTRY(sys32_fork) - .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) - alloc r16=ar.pfs,2,2,4,0 - mov out0=SIGCHLD // out0 = clone_flags - ;; -.fork1: - mov loc0=rp - mov loc1=r16 // save ar.pfs across do_fork - DO_SAVE_SWITCH_STACK - - .body - - mov out1=0 - mov out3=0 - adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s - br.call.sptk.few rp=do_fork -.ret5: .restore sp - adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack - mov ar.pfs=loc1 - mov rp=loc0 - br.ret.sptk.many rp -END(sys32_fork) - - .rodata - .align 8 - .globl ia32_syscall_table -ia32_syscall_table: - data8 sys_ni_syscall /* 0 - old "setup(" system call*/ - data8 sys_exit - data8 sys32_fork - data8 sys_read - data8 sys_write - data8 compat_sys_open /* 5 */ - data8 sys_close - data8 sys32_waitpid - data8 sys_creat - data8 sys_link - data8 sys_unlink /* 10 */ - data8 ia32_execve - data8 sys_chdir - data8 compat_sys_time - data8 sys_mknod - data8 sys_chmod /* 15 */ - data8 sys_lchown /* 16-bit version */ - data8 sys_ni_syscall /* old break syscall holder */ - data8 sys_ni_syscall - data8 sys32_lseek - data8 sys_getpid /* 20 */ - data8 compat_sys_mount - data8 sys_oldumount - data8 sys_setuid /* 16-bit version */ - data8 sys_getuid /* 16-bit version */ - data8 compat_sys_stime /* 25 */ - data8 compat_sys_ptrace - data8 sys32_alarm - data8 sys_ni_syscall - data8 sys_pause - data8 compat_sys_utime /* 30 */ - data8 sys_ni_syscall /* old stty syscall holder */ - data8 sys_ni_syscall /* old gtty syscall holder */ - data8 sys_access - data8 sys_nice - data8 sys_ni_syscall /* 35 */ /* old ftime syscall holder */ - data8 sys_sync - data8 sys_kill - data8 sys_rename - data8 sys_mkdir - data8 sys_rmdir /* 40 */ - data8 sys_dup - data8 sys_ia64_pipe - data8 compat_sys_times - data8 sys_ni_syscall /* old prof syscall holder */ - data8 sys32_brk /* 45 */ - data8 sys_setgid /* 16-bit version */ - data8 sys_getgid /* 16-bit version */ - data8 sys32_signal - data8 sys_geteuid /* 16-bit version */ - data8 sys_getegid /* 16-bit version */ /* 50 */ - data8 sys_acct - data8 sys_umount /* recycled never used phys( */ - data8 sys_ni_syscall /* old lock syscall holder */ - data8 compat_sys_ioctl - data8 compat_sys_fcntl /* 55 */ - data8 sys_ni_syscall /* old mpx syscall holder */ - data8 sys_setpgid - data8 sys_ni_syscall /* old ulimit syscall holder */ - data8 sys_ni_syscall - data8 sys_umask /* 60 */ - data8 sys_chroot - data8 compat_sys_ustat - data8 sys_dup2 - data8 sys_getppid - data8 sys_getpgrp /* 65 */ - data8 sys_setsid - data8 sys32_sigaction - data8 sys_ni_syscall - data8 sys_ni_syscall - data8 sys_setreuid /* 16-bit version */ /* 70 */ - data8 sys_setregid /* 16-bit version */ - data8 sys32_sigsuspend - data8 compat_sys_sigpending - data8 sys_sethostname - data8 compat_sys_setrlimit /* 75 */ - data8 compat_sys_old_getrlimit - data8 compat_sys_getrusage - data8 compat_sys_gettimeofday - data8 compat_sys_settimeofday - data8 sys32_getgroups16 /* 80 */ - data8 sys32_setgroups16 - data8 sys32_old_select - data8 sys_symlink - data8 sys_ni_syscall - data8 sys_readlink /* 85 */ - data8 sys_uselib - data8 sys_swapon - data8 sys_reboot - data8 compat_sys_old_readdir - data8 sys32_mmap /* 90 */ - data8 sys32_munmap - data8 sys_truncate - data8 sys_ftruncate - data8 sys_fchmod - data8 sys_fchown /* 16-bit version */ /* 95 */ - data8 sys_getpriority - data8 sys_setpriority - data8 sys_ni_syscall /* old profil syscall holder */ - data8 compat_sys_statfs - data8 compat_sys_fstatfs /* 100 */ - data8 sys_ni_syscall /* ioperm */ - data8 compat_sys_socketcall - data8 sys_syslog - data8 compat_sys_setitimer - data8 compat_sys_getitimer /* 105 */ - data8 compat_sys_newstat - data8 compat_sys_newlstat - data8 compat_sys_newfstat - data8 sys_ni_syscall - data8 sys_ni_syscall /* iopl */ /* 110 */ - data8 sys_vhangup - data8 sys_ni_syscall /* used to be sys_idle */ - data8 sys_ni_syscall - data8 compat_sys_wait4 - data8 sys_swapoff /* 115 */ - data8 compat_sys_sysinfo - data8 sys32_ipc - data8 sys_fsync - data8 sys32_sigreturn - data8 ia32_clone /* 120 */ - data8 sys_setdomainname - data8 sys32_newuname - data8 sys32_modify_ldt - data8 compat_sys_adjtimex - data8 sys32_mprotect /* 125 */ - data8 compat_sys_sigprocmask - data8 sys_ni_syscall /* create_module */ - data8 sys_ni_syscall /* init_module */ - data8 sys_ni_syscall /* delete_module */ - data8 sys_ni_syscall /* get_kernel_syms */ /* 130 */ - data8 sys32_quotactl - data8 sys_getpgid - data8 sys_fchdir - data8 sys_ni_syscall /* sys_bdflush */ - data8 sys_sysfs /* 135 */ - data8 sys32_personality - data8 sys_ni_syscall /* for afs_syscall */ - data8 sys_setfsuid /* 16-bit version */ - data8 sys_setfsgid /* 16-bit version */ - data8 sys_llseek /* 140 */ - data8 compat_sys_getdents - data8 compat_sys_select - data8 sys_flock - data8 sys32_msync - data8 compat_sys_readv /* 145 */ - data8 compat_sys_writev - data8 sys_getsid - data8 sys_fdatasync - data8 compat_sys_sysctl - data8 sys_mlock /* 150 */ - data8 sys_munlock - data8 sys_mlockall - data8 sys_munlockall - data8 sys_sched_setparam - data8 sys_sched_getparam /* 155 */ - data8 sys_sched_setscheduler - data8 sys_sched_getscheduler - data8 sys_sched_yield - data8 sys_sched_get_priority_max - data8 sys_sched_get_priority_min /* 160 */ - data8 sys32_sched_rr_get_interval - data8 compat_sys_nanosleep - data8 sys32_mremap - data8 sys_setresuid /* 16-bit version */ - data8 sys32_getresuid16 /* 16-bit version */ /* 165 */ - data8 sys_ni_syscall /* vm86 */ - data8 sys_ni_syscall /* sys_query_module */ - data8 sys_poll - data8 sys_ni_syscall /* nfsservctl */ - data8 sys_setresgid /* 170 */ - data8 sys32_getresgid16 - data8 sys_prctl - data8 sys32_rt_sigreturn - data8 sys32_rt_sigaction - data8 sys32_rt_sigprocmask /* 175 */ - data8 sys_rt_sigpending - data8 compat_sys_rt_sigtimedwait - data8 sys32_rt_sigqueueinfo - data8 compat_sys_rt_sigsuspend - data8 sys32_pread /* 180 */ - data8 sys32_pwrite - data8 sys_chown /* 16-bit version */ - data8 sys_getcwd - data8 sys_capget - data8 sys_capset /* 185 */ - data8 sys32_sigaltstack - data8 sys32_sendfile - data8 sys_ni_syscall /* streams1 */ - data8 sys_ni_syscall /* streams2 */ - data8 sys32_vfork /* 190 */ - data8 compat_sys_getrlimit - data8 sys32_mmap2 - data8 sys32_truncate64 - data8 sys32_ftruncate64 - data8 sys32_stat64 /* 195 */ - data8 sys32_lstat64 - data8 sys32_fstat64 - data8 sys_lchown - data8 sys_getuid - data8 sys_getgid /* 200 */ - data8 sys_geteuid - data8 sys_getegid - data8 sys_setreuid - data8 sys_setregid - data8 sys_getgroups /* 205 */ - data8 sys_setgroups - data8 sys_fchown - data8 sys_setresuid - data8 sys_getresuid - data8 sys_setresgid /* 210 */ - data8 sys_getresgid - data8 sys_chown - data8 sys_setuid - data8 sys_setgid - data8 sys_setfsuid /* 215 */ - data8 sys_setfsgid - data8 sys_pivot_root - data8 sys_mincore - data8 sys_madvise - data8 compat_sys_getdents64 /* 220 */ - data8 compat_sys_fcntl64 - data8 sys_ni_syscall /* reserved for TUX */ - data8 sys_ni_syscall /* reserved for Security */ - data8 sys_gettid - data8 sys_readahead /* 225 */ - data8 sys_setxattr - data8 sys_lsetxattr - data8 sys_fsetxattr - data8 sys_getxattr - data8 sys_lgetxattr /* 230 */ - data8 sys_fgetxattr - data8 sys_listxattr - data8 sys_llistxattr - data8 sys_flistxattr - data8 sys_removexattr /* 235 */ - data8 sys_lremovexattr - data8 sys_fremovexattr - data8 sys_tkill - data8 sys_sendfile64 - data8 compat_sys_futex /* 240 */ - data8 compat_sys_sched_setaffinity - data8 compat_sys_sched_getaffinity - data8 sys32_set_thread_area - data8 sys32_get_thread_area - data8 compat_sys_io_setup /* 245 */ - data8 sys_io_destroy - data8 compat_sys_io_getevents - data8 compat_sys_io_submit - data8 sys_io_cancel - data8 sys_fadvise64 /* 250 */ - data8 sys_ni_syscall - data8 sys_exit_group - data8 sys_lookup_dcookie - data8 sys_epoll_create - data8 sys32_epoll_ctl /* 255 */ - data8 sys32_epoll_wait - data8 sys_remap_file_pages - data8 sys_set_tid_address - data8 compat_sys_timer_create - data8 compat_sys_timer_settime /* 260 */ - data8 compat_sys_timer_gettime - data8 sys_timer_getoverrun - data8 sys_timer_delete - data8 compat_sys_clock_settime - data8 compat_sys_clock_gettime /* 265 */ - data8 compat_sys_clock_getres - data8 compat_sys_clock_nanosleep - data8 compat_sys_statfs64 - data8 compat_sys_fstatfs64 - data8 sys_tgkill /* 270 */ - data8 compat_sys_utimes - data8 sys32_fadvise64_64 - data8 sys_ni_syscall - data8 sys_ni_syscall - data8 sys_ni_syscall /* 275 */ - data8 sys_ni_syscall - data8 compat_sys_mq_open - data8 sys_mq_unlink - data8 compat_sys_mq_timedsend - data8 compat_sys_mq_timedreceive /* 280 */ - data8 compat_sys_mq_notify - data8 compat_sys_mq_getsetattr - data8 sys_ni_syscall /* reserved for kexec */ - data8 compat_sys_waitid - - // guard against failures to increase IA32_NR_syscalls - .org ia32_syscall_table + 8*IA32_NR_syscalls diff --git a/arch/ia64/ia32/ia32_ldt.c b/arch/ia64/ia32/ia32_ldt.c deleted file mode 100644 index 16d51c1..0000000 --- a/arch/ia64/ia32/ia32_ldt.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (C) 2001, 2004 Hewlett-Packard Co - * David Mosberger-Tang - * - * Adapted from arch/i386/kernel/ldt.c - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "ia32priv.h" - -/* - * read_ldt() is not really atomic - this is not a problem since synchronization of reads - * and writes done to the LDT has to be assured by user-space anyway. Writes are atomic, - * to protect the security checks done on new descriptors. - */ -static int -read_ldt (void __user *ptr, unsigned long bytecount) -{ - unsigned long bytes_left, n; - char __user *src, *dst; - char buf[256]; /* temporary buffer (don't overflow kernel stack!) */ - - if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE) - bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE; - - bytes_left = bytecount; - - src = (void __user *) IA32_LDT_OFFSET; - dst = ptr; - - while (bytes_left) { - n = sizeof(buf); - if (n > bytes_left) - n = bytes_left; - - /* - * We know we're reading valid memory, but we still must guard against - * running out of memory. - */ - if (__copy_from_user(buf, src, n)) - return -EFAULT; - - if (copy_to_user(dst, buf, n)) - return -EFAULT; - - src += n; - dst += n; - bytes_left -= n; - } - return bytecount; -} - -static int -read_default_ldt (void __user * ptr, unsigned long bytecount) -{ - unsigned long size; - int err; - - /* XXX fix me: should return equivalent of default_ldt[0] */ - err = 0; - size = 8; - if (size > bytecount) - size = bytecount; - - err = size; - if (clear_user(ptr, size)) - err = -EFAULT; - - return err; -} - -static int -write_ldt (void __user * ptr, unsigned long bytecount, int oldmode) -{ - struct ia32_user_desc ldt_info; - __u64 entry; - int ret; - - if (bytecount != sizeof(ldt_info)) - return -EINVAL; - if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) - return -EFAULT; - - if (ldt_info.entry_number >= IA32_LDT_ENTRIES) - return -EINVAL; - if (ldt_info.contents == 3) { - if (oldmode) - return -EINVAL; - if (ldt_info.seg_not_present == 0) - return -EINVAL; - } - - if (ldt_info.base_addr == 0 && ldt_info.limit == 0 - && (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1 - && ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0 - && ldt_info.seg_not_present == 1 && ldt_info.useable == 0))) - /* allow LDTs to be cleared by the user */ - entry = 0; - else - /* we must set the "Accessed" bit as IVE doesn't emulate it */ - entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit, - (((ldt_info.read_exec_only ^ 1) << 1) - | (ldt_info.contents << 2)) | 1, - 1, 3, ldt_info.seg_not_present ^ 1, - (oldmode ? 0 : ldt_info.useable), - ldt_info.seg_32bit, - ldt_info.limit_in_pages); - /* - * Install the new entry. We know we're accessing valid (mapped) user-level - * memory, but we still need to guard against out-of-memory, hence we must use - * put_user(). - */ - ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number); - ia32_load_segment_descriptors(current); - return ret; -} - -asmlinkage int -sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount) -{ - int ret = -ENOSYS; - - switch (func) { - case 0: - ret = read_ldt(compat_ptr(ptr), bytecount); - break; - case 1: - ret = write_ldt(compat_ptr(ptr), bytecount, 1); - break; - case 2: - ret = read_default_ldt(compat_ptr(ptr), bytecount); - break; - case 0x11: - ret = write_ldt(compat_ptr(ptr), bytecount, 0); - break; - } - return ret; -} diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c deleted file mode 100644 index b763ca1..0000000 --- a/arch/ia64/ia32/ia32_signal.c +++ /dev/null @@ -1,1010 +0,0 @@ -/* - * IA32 Architecture-specific signal handling support. - * - * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co - * David Mosberger-Tang - * Copyright (C) 1999 Arun Sharma - * Copyright (C) 2000 VA Linux Co - * Copyright (C) 2000 Don Dugger - * - * Derived from i386 and Alpha versions. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "ia32priv.h" - -#include "../kernel/sigframe.h" - -#define A(__x) ((unsigned long)(__x)) - -#define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - -#define __IA32_NR_sigreturn 119 -#define __IA32_NR_rt_sigreturn 173 - -struct sigframe_ia32 -{ - int pretcode; - int sig; - struct sigcontext_ia32 sc; - struct _fpstate_ia32 fpstate; - unsigned int extramask[_COMPAT_NSIG_WORDS-1]; - char retcode[8]; -}; - -struct rt_sigframe_ia32 -{ - int pretcode; - int sig; - int pinfo; - int puc; - compat_siginfo_t info; - struct ucontext_ia32 uc; - struct _fpstate_ia32 fpstate; - char retcode[8]; -}; - -int -copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) -{ - unsigned long tmp; - int err; - - if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) - return -EFAULT; - - err = __get_user(to->si_signo, &from->si_signo); - err |= __get_user(to->si_errno, &from->si_errno); - err |= __get_user(to->si_code, &from->si_code); - - if (to->si_code < 0) - err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); - else { - switch (to->si_code >> 16) { - case __SI_CHLD >> 16: - err |= __get_user(to->si_utime, &from->si_utime); - err |= __get_user(to->si_stime, &from->si_stime); - err |= __get_user(to->si_status, &from->si_status); - default: - err |= __get_user(to->si_pid, &from->si_pid); - err |= __get_user(to->si_uid, &from->si_uid); - break; - case __SI_FAULT >> 16: - err |= __get_user(tmp, &from->si_addr); - to->si_addr = (void __user *) tmp; - break; - case __SI_POLL >> 16: - err |= __get_user(to->si_band, &from->si_band); - err |= __get_user(to->si_fd, &from->si_fd); - break; - case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: - err |= __get_user(to->si_pid, &from->si_pid); - err |= __get_user(to->si_uid, &from->si_uid); - err |= __get_user(to->si_int, &from->si_int); - break; - } - } - return err; -} - -int -copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from) -{ - unsigned int addr; - int err; - - if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) - return -EFAULT; - - /* If you change siginfo_t structure, please be sure - this code is fixed accordingly. - It should never copy any pad contained in the structure - to avoid security leaks, but must copy the generic - 3 ints plus the relevant union member. - This routine must convert siginfo from 64bit to 32bit as well - at the same time. */ - err = __put_user(from->si_signo, &to->si_signo); - err |= __put_user(from->si_errno, &to->si_errno); - err |= __put_user((short)from->si_code, &to->si_code); - if (from->si_code < 0) - err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); - else { - switch (from->si_code >> 16) { - case __SI_CHLD >> 16: - err |= __put_user(from->si_utime, &to->si_utime); - err |= __put_user(from->si_stime, &to->si_stime); - err |= __put_user(from->si_status, &to->si_status); - default: - err |= __put_user(from->si_pid, &to->si_pid); - err |= __put_user(from->si_uid, &to->si_uid); - break; - case __SI_FAULT >> 16: - /* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */ - err |= __put_user(from->_sifields._pad[0], &to->si_addr); - break; - case __SI_POLL >> 16: - err |= __put_user(from->si_band, &to->si_band); - err |= __put_user(from->si_fd, &to->si_fd); - break; - case __SI_TIMER >> 16: - err |= __put_user(from->si_tid, &to->si_tid); - err |= __put_user(from->si_overrun, &to->si_overrun); - addr = (unsigned long) from->si_ptr; - err |= __put_user(addr, &to->si_ptr); - break; - case __SI_RT >> 16: /* Not generated by the kernel as of now. */ - case __SI_MESGQ >> 16: - err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user(from->si_pid, &to->si_pid); - addr = (unsigned long) from->si_ptr; - err |= __put_user(addr, &to->si_ptr); - break; - } - } - return err; -} - - -/* - * SAVE and RESTORE of ia32 fpstate info, from ia64 current state - * Used in exception handler to pass the fpstate to the user, and restore - * the fpstate while returning from the exception handler. - * - * fpstate info and their mapping to IA64 regs: - * fpstate REG(BITS) Attribute Comments - * cw ar.fcr(0:12) with bits 7 and 6 not used - * sw ar.fsr(0:15) - * tag ar.fsr(16:31) with odd numbered bits not used - * (read returns 0, writes ignored) - * ipoff ar.fir(0:31) - * cssel ar.fir(32:47) - * dataoff ar.fdr(0:31) - * datasel ar.fdr(32:47) - * - * _st[(0+TOS)%8] f8 - * _st[(1+TOS)%8] f9 - * _st[(2+TOS)%8] f10 - * _st[(3+TOS)%8] f11 (f8..f11 from ptregs) - * : : : (f12..f15 from live reg) - * : : : - * _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13) - * - * status Same as sw RO - * magic 0 as X86_FXSR_MAGIC in ia32 - * mxcsr Bits(7:15)=ar.fcr(39:47) - * Bits(0:5) =ar.fsr(32:37) with bit 6 reserved - * _xmm[0..7] f16..f31 (live registers) - * with _xmm[0] - * Bit(64:127)=f17(0:63) - * Bit(0:63)=f16(0:63) - * All other fields unused... - */ - -static int -save_ia32_fpstate_live (struct _fpstate_ia32 __user *save) -{ - struct task_struct *tsk = current; - struct pt_regs *ptp; - struct _fpreg_ia32 *fpregp; - char buf[32]; - unsigned long fsr, fcr, fir, fdr; - unsigned long new_fsr; - unsigned long num128[2]; - unsigned long mxcsr=0; - int fp_tos, fr8_st_map; - - if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) - return -EFAULT; - - /* Read in fsr, fcr, fir, fdr and copy onto fpstate */ - fsr = ia64_getreg(_IA64_REG_AR_FSR); - fcr = ia64_getreg(_IA64_REG_AR_FCR); - fir = ia64_getreg(_IA64_REG_AR_FIR); - fdr = ia64_getreg(_IA64_REG_AR_FDR); - - /* - * We need to clear the exception state before calling the signal handler. Clear - * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex - * instruction. - */ - new_fsr = fsr & ~0x80ff; - ia64_setreg(_IA64_REG_AR_FSR, new_fsr); - - __put_user(fcr & 0xffff, &save->cw); - __put_user(fsr & 0xffff, &save->sw); - __put_user((fsr>>16) & 0xffff, &save->tag); - __put_user(fir, &save->ipoff); - __put_user((fir>>32) & 0xffff, &save->cssel); - __put_user(fdr, &save->dataoff); - __put_user((fdr>>32) & 0xffff, &save->datasel); - __put_user(fsr & 0xffff, &save->status); - - mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f); - __put_user(mxcsr & 0xffff, &save->mxcsr); - __put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000 - - /* - * save f8..f11 from pt_regs - * save f12..f15 from live register set - */ - /* - * Find the location where f8 has to go in fp reg stack. This depends on - * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps - * to. - */ - fp_tos = (fsr>>11)&0x7; - fr8_st_map = (8-fp_tos)&0x7; - ptp = task_pt_regs(tsk); - fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); - ia64f2ia32f(fpregp, &ptp->f8); - copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - ia64f2ia32f(fpregp, &ptp->f9); - copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - ia64f2ia32f(fpregp, &ptp->f10); - copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - ia64f2ia32f(fpregp, &ptp->f11); - copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - - ia64_stfe(fpregp, 12); - copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - ia64_stfe(fpregp, 13); - copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - ia64_stfe(fpregp, 14); - copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - ia64_stfe(fpregp, 15); - copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); - - ia64_stf8(&num128[0], 16); - ia64_stf8(&num128[1], 17); - copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 18); - ia64_stf8(&num128[1], 19); - copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 20); - ia64_stf8(&num128[1], 21); - copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 22); - ia64_stf8(&num128[1], 23); - copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 24); - ia64_stf8(&num128[1], 25); - copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 26); - ia64_stf8(&num128[1], 27); - copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 28); - ia64_stf8(&num128[1], 29); - copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32)); - - ia64_stf8(&num128[0], 30); - ia64_stf8(&num128[1], 31); - copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32)); - return 0; -} - -static int -restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save) -{ - struct task_struct *tsk = current; - struct pt_regs *ptp; - unsigned int lo, hi; - unsigned long num128[2]; - unsigned long num64, mxcsr; - struct _fpreg_ia32 *fpregp; - char buf[32]; - unsigned long fsr, fcr, fir, fdr; - int fp_tos, fr8_st_map; - - if (!access_ok(VERIFY_READ, save, sizeof(*save))) - return(-EFAULT); - - /* - * Updating fsr, fcr, fir, fdr. - * Just a bit more complicated than save. - * - Need to make sure that we don't write any value other than the - * specific fpstate info - * - Need to make sure that the untouched part of frs, fdr, fir, fcr - * should remain same while writing. - * So, we do a read, change specific fields and write. - */ - fsr = ia64_getreg(_IA64_REG_AR_FSR); - fcr = ia64_getreg(_IA64_REG_AR_FCR); - fir = ia64_getreg(_IA64_REG_AR_FIR); - fdr = ia64_getreg(_IA64_REG_AR_FDR); - - __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); - /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */ - __get_user(lo, (unsigned int __user *)&save->cw); - num64 = mxcsr & 0xff10; - num64 = (num64 << 32) | (lo & 0x1f3f); - fcr = (fcr & (~0xff1000001f3fUL)) | num64; - - /* setting bits 0..31 with sw and tag and 32..37 from mxcsr */ - __get_user(lo, (unsigned int __user *)&save->sw); - /* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */ - if ( !(lo & 0x7f) ) - lo &= (~0x8080); - __get_user(hi, (unsigned int __user *)&save->tag); - num64 = mxcsr & 0x3f; - num64 = (num64 << 16) | (hi & 0xffff); - num64 = (num64 << 16) | (lo & 0xffff); - fsr = (fsr & (~0x3fffffffffUL)) | num64; - - /* setting bits 0..47 with cssel and ipoff */ - __get_user(lo, (unsigned int __user *)&save->ipoff); - __get_user(hi, (unsigned int __user *)&save->cssel); - num64 = hi & 0xffff; - num64 = (num64 << 32) | lo; - fir = (fir & (~0xffffffffffffUL)) | num64; - - /* setting bits 0..47 with datasel and dataoff */ - __get_user(lo, (unsigned int __user *)&save->dataoff); - __get_user(hi, (unsigned int __user *)&save->datasel); - num64 = hi & 0xffff; - num64 = (num64 << 32) | lo; - fdr = (fdr & (~0xffffffffffffUL)) | num64; - - ia64_setreg(_IA64_REG_AR_FSR, fsr); - ia64_setreg(_IA64_REG_AR_FCR, fcr); - ia64_setreg(_IA64_REG_AR_FIR, fir); - ia64_setreg(_IA64_REG_AR_FDR, fdr); - - /* - * restore f8..f11 onto pt_regs - * restore f12..f15 onto live registers - */ - /* - * Find the location where f8 has to go in fp reg stack. This depends on - * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps - * to. - */ - fp_tos = (fsr>>11)&0x7; - fr8_st_map = (8-fp_tos)&0x7; - fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); - - ptp = task_pt_regs(tsk); - copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia32f2ia64f(&ptp->f8, fpregp); - copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia32f2ia64f(&ptp->f9, fpregp); - copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia32f2ia64f(&ptp->f10, fpregp); - copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia32f2ia64f(&ptp->f11, fpregp); - - copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia64_ldfe(12, fpregp); - copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia64_ldfe(13, fpregp); - copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia64_ldfe(14, fpregp); - copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); - ia64_ldfe(15, fpregp); - - copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(16, &num128[0]); - ia64_ldf8(17, &num128[1]); - - copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(18, &num128[0]); - ia64_ldf8(19, &num128[1]); - - copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(20, &num128[0]); - ia64_ldf8(21, &num128[1]); - - copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(22, &num128[0]); - ia64_ldf8(23, &num128[1]); - - copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(24, &num128[0]); - ia64_ldf8(25, &num128[1]); - - copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(26, &num128[0]); - ia64_ldf8(27, &num128[1]); - - copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(28, &num128[0]); - ia64_ldf8(29, &num128[1]); - - copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32)); - ia64_ldf8(30, &num128[0]); - ia64_ldf8(31, &num128[1]); - return 0; -} - -static inline void -sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer) -{ - if (handler + 1 <= 2) - /* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */ - sa->sa.sa_handler = (__sighandler_t) A((int) handler); - else - sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); -} - -asmlinkage long -sys32_sigsuspend (int history0, int history1, old_sigset_t mask) -{ - mask &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, mask); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - current->state = TASK_INTERRUPTIBLE; - schedule(); - set_restore_sigmask(); - return -ERESTARTNOHAND; -} - -asmlinkage long -sys32_signal (int sig, unsigned int handler) -{ - struct k_sigaction new_sa, old_sa; - int ret; - - sigact_set_handler(&new_sa, handler, 0); - new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; - sigemptyset(&new_sa.sa.sa_mask); - - ret = do_sigaction(sig, &new_sa, &old_sa); - - return ret ? ret : IA32_SA_HANDLER(&old_sa); -} - -asmlinkage long -sys32_rt_sigaction (int sig, struct sigaction32 __user *act, - struct sigaction32 __user *oact, unsigned int sigsetsize) -{ - struct k_sigaction new_ka, old_ka; - unsigned int handler, restorer; - int ret; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; - - if (act) { - ret = get_user(handler, &act->sa_handler); - ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); - ret |= get_user(restorer, &act->sa_restorer); - ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t)); - if (ret) - return -EFAULT; - - sigact_set_handler(&new_ka, handler, restorer); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler); - ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); - ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer); - ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t)); - } - return ret; -} - - -asmlinkage long -sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset, - unsigned int sigsetsize) -{ - mm_segment_t old_fs = get_fs(); - sigset_t s; - long ret; - - if (sigsetsize > sizeof(s)) - return -EINVAL; - - if (set) { - memset(&s, 0, sizeof(s)); - if (copy_from_user(&s.sig, set, sigsetsize)) - return -EFAULT; - } - set_fs(KERNEL_DS); - ret = sys_rt_sigprocmask(how, - set ? (sigset_t __user *) &s : NULL, - oset ? (sigset_t __user *) &s : NULL, sizeof(s)); - set_fs(old_fs); - if (ret) - return ret; - if (oset) { - if (copy_to_user(oset, &s.sig, sigsetsize)) - return -EFAULT; - } - return 0; -} - -asmlinkage long -sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo) -{ - mm_segment_t old_fs = get_fs(); - siginfo_t info; - int ret; - - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; - set_fs(KERNEL_DS); - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); - set_fs(old_fs); - return ret; -} - -asmlinkage long -sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact) -{ - struct k_sigaction new_ka, old_ka; - unsigned int handler, restorer; - int ret; - - if (act) { - compat_old_sigset_t mask; - - ret = get_user(handler, &act->sa_handler); - ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); - ret |= get_user(restorer, &act->sa_restorer); - ret |= get_user(mask, &act->sa_mask); - if (ret) - return ret; - - sigact_set_handler(&new_ka, handler, restorer); - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler); - ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); - ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer); - ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); - } - - return ret; -} - -static int -setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate, - struct pt_regs *regs, unsigned long mask) -{ - int err = 0; - unsigned long flag; - - if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc))) - return -EFAULT; - - err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs); - err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs); - err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es); - err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds); - err |= __put_user(regs->r15, &sc->edi); - err |= __put_user(regs->r14, &sc->esi); - err |= __put_user(regs->r13, &sc->ebp); - err |= __put_user(regs->r12, &sc->esp); - err |= __put_user(regs->r11, &sc->ebx); - err |= __put_user(regs->r10, &sc->edx); - err |= __put_user(regs->r9, &sc->ecx); - err |= __put_user(regs->r8, &sc->eax); -#if 0 - err |= __put_user(current->tss.trap_no, &sc->trapno); - err |= __put_user(current->tss.error_code, &sc->err); -#endif - err |= __put_user(regs->cr_iip, &sc->eip); - err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs); - /* - * `eflags' is in an ar register for this context - */ - flag = ia64_getreg(_IA64_REG_AR_EFLAG); - err |= __put_user((unsigned int)flag, &sc->eflags); - err |= __put_user(regs->r12, &sc->esp_at_signal); - err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss); - - if ( save_ia32_fpstate_live(fpstate) < 0 ) - err = -EFAULT; - else - err |= __put_user((u32)(u64)fpstate, &sc->fpstate); - -#if 0 - tmp = save_i387(fpstate); - if (tmp < 0) - err = 1; - else - err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); - - /* non-iBCS2 extensions.. */ -#endif - err |= __put_user(mask, &sc->oldmask); -#if 0 - err |= __put_user(current->tss.cr2, &sc->cr2); -#endif - return err; -} - -static int -restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax) -{ - unsigned int err = 0; - - /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; - - if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) - return(-EFAULT); - -#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x) - -#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48) -#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32) -#define copyseg_cs(tmp) (regs->r17 |= tmp) -#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16) -#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16) -#define copyseg_ds(tmp) (regs->r16 |= tmp) - -#define COPY_SEG(seg) \ - { \ - unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ - copyseg_##seg(tmp); \ - } -#define COPY_SEG_STRICT(seg) \ - { \ - unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ - copyseg_##seg(tmp|3); \ - } - - /* To make COPY_SEGs easier, we zero r16, r17 */ - regs->r16 = 0; - regs->r17 = 0; - - COPY_SEG(gs); - COPY_SEG(fs); - COPY_SEG(es); - COPY_SEG(ds); - COPY(r15, edi); - COPY(r14, esi); - COPY(r13, ebp); - COPY(r12, esp); - COPY(r11, ebx); - COPY(r10, edx); - COPY(r9, ecx); - COPY(cr_iip, eip); - COPY_SEG_STRICT(cs); - COPY_SEG_STRICT(ss); - ia32_load_segment_descriptors(current); - { - unsigned int tmpflags; - unsigned long flag; - - /* - * IA32 `eflags' is not part of `pt_regs', it's in an ar register which - * is part of the thread context. Fortunately, we are executing in the - * IA32 process's context. - */ - err |= __get_user(tmpflags, &sc->eflags); - flag = ia64_getreg(_IA64_REG_AR_EFLAG); - flag &= ~0x40DD5; - flag |= (tmpflags & 0x40DD5); - ia64_setreg(_IA64_REG_AR_EFLAG, flag); - - regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */ - } - - { - struct _fpstate_ia32 __user *buf = NULL; - u32 fpstate_ptr; - err |= get_user(fpstate_ptr, &(sc->fpstate)); - buf = compat_ptr(fpstate_ptr); - if (buf) { - err |= restore_ia32_fpstate_live(buf); - } - } - -#if 0 - { - struct _fpstate * buf; - err |= __get_user(buf, &sc->fpstate); - if (buf) { - if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) - goto badframe; - err |= restore_i387(buf); - } - } -#endif - - err |= __get_user(*peax, &sc->eax); - return err; - -#if 0 - badframe: - return 1; -#endif -} - -/* - * Determine which stack to use.. - */ -static inline void __user * -get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) -{ - unsigned long esp; - - /* Default to using normal stack (truncate off sign-extension of bit 31: */ - esp = (unsigned int) regs->r12; - - /* This is the X/Open sanctioned signal stack switching. */ - if (ka->sa.sa_flags & SA_ONSTACK) { - int onstack = sas_ss_flags(esp); - - if (onstack == 0) - esp = current->sas_ss_sp + current->sas_ss_size; - else if (onstack == SS_ONSTACK) { - /* - * If we are on the alternate signal stack and would - * overflow it, don't. Return an always-bogus address - * instead so we will die with SIGSEGV. - */ - if (!likely(on_sig_stack(esp - frame_size))) - return (void __user *) -1L; - } - } - /* Legacy stack switching not supported */ - - esp -= frame_size; - /* Align the stack pointer according to the i386 ABI, - * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - esp = ((esp + 4) & -16ul) - 4; - return (void __user *) esp; -} - -static int -setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) -{ - struct exec_domain *ed = current_thread_info()->exec_domain; - struct sigframe_ia32 __user *frame; - int err = 0; - - frame = get_sigframe(ka, regs, sizeof(*frame)); - - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto give_sigsegv; - - err |= __put_user((ed && ed->signal_invmap && sig < 32 - ? (int)(ed->signal_invmap[sig]) : sig), &frame->sig); - - err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]); - - if (_COMPAT_NSIG_WORDS > 1) - err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4, - sizeof(frame->extramask)); - - /* Set up to return from userspace. If provided, use a stub - already in userspace. */ - if (ka->sa.sa_flags & SA_RESTORER) { - unsigned int restorer = IA32_SA_RESTORER(ka); - err |= __put_user(restorer, &frame->pretcode); - } else { - /* Pointing to restorer in ia32 gate page */ - err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode); - } - - /* This is popl %eax ; movl $,%eax ; int $0x80 - * and there for historical reasons only. - * See arch/i386/kernel/signal.c - */ - - err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); - err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2)); - err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); - - if (err) - goto give_sigsegv; - - /* Set up registers for signal handler */ - regs->r12 = (unsigned long) frame; - regs->cr_iip = IA32_SA_HANDLER(ka); - - set_fs(USER_DS); - -#if 0 - regs->eflags &= ~TF_MASK; -#endif - -#if 0 - printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n", - current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode); -#endif - - return 1; - - give_sigsegv: - force_sigsegv(sig, current); - return 0; -} - -static int -setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs * regs) -{ - struct exec_domain *ed = current_thread_info()->exec_domain; - compat_uptr_t pinfo, puc; - struct rt_sigframe_ia32 __user *frame; - int err = 0; - - frame = get_sigframe(ka, regs, sizeof(*frame)); - - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto give_sigsegv; - - err |= __put_user((ed && ed->signal_invmap - && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig); - - pinfo = (long __user) &frame->info; - puc = (long __user) &frame->uc; - err |= __put_user(pinfo, &frame->pinfo); - err |= __put_user(puc, &frame->puc); - err |= copy_siginfo_to_user32(&frame->info, info); - - /* Create the ucontext. */ - err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags); - err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); - err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) - goto give_sigsegv; - - /* Set up to return from userspace. If provided, use a stub - already in userspace. */ - if (ka->sa.sa_flags & SA_RESTORER) { - unsigned int restorer = IA32_SA_RESTORER(ka); - err |= __put_user(restorer, &frame->pretcode); - } else { - /* Pointing to rt_restorer in ia32 gate page */ - err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode); - } - - /* This is movl $,%eax ; int $0x80 - * and there for historical reasons only. - * See arch/i386/kernel/signal.c - */ - - err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); - err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1)); - err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); - - if (err) - goto give_sigsegv; - - /* Set up registers for signal handler */ - regs->r12 = (unsigned long) frame; - regs->cr_iip = IA32_SA_HANDLER(ka); - - set_fs(USER_DS); - -#if 0 - regs->eflags &= ~TF_MASK; -#endif - -#if 0 - printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n", - current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode); -#endif - - return 1; - -give_sigsegv: - force_sigsegv(sig, current); - return 0; -} - -int -ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) -{ - /* Set up the stack frame */ - if (ka->sa.sa_flags & SA_SIGINFO) - return setup_rt_frame_ia32(sig, ka, info, set, regs); - else - return setup_frame_ia32(sig, ka, set, regs); -} - -asmlinkage long -sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, - int arg6, int arg7, struct pt_regs regs) -{ - unsigned long esp = (unsigned int) regs.r12; - struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8); - sigset_t set; - int eax; - - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - - if (__get_user(set.sig[0], &frame->sc.oldmask) - || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask, - sizeof(frame->extramask)))) - goto badframe; - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (restore_sigcontext_ia32(®s, &frame->sc, &eax)) - goto badframe; - return eax; - - badframe: - force_sig(SIGSEGV, current); - return 0; -} - -asmlinkage long -sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, - int arg5, int arg6, int arg7, struct pt_regs regs) -{ - unsigned long esp = (unsigned int) regs.r12; - struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4); - sigset_t set; - int eax; - - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) - goto badframe; - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - if (restore_sigcontext_ia32(®s, &frame->uc.uc_mcontext, &eax)) - goto badframe; - - /* It is more difficult to avoid calling this function than to - call it and ignore errors. */ - do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp); - - return eax; - - badframe: - force_sig(SIGSEGV, current); - return 0; -} diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c deleted file mode 100644 index a6965dd..0000000 --- a/arch/ia64/ia32/ia32_support.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * IA32 helper functions - * - * Copyright (C) 1999 Arun Sharma - * Copyright (C) 2000 Asit K. Mallick - * Copyright (C) 2001-2002 Hewlett-Packard Co - * David Mosberger-Tang - * - * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context - * 02/19/01 D. Mosberger dropped tssd; it's not needed - * 09/14/01 D. Mosberger fixed memory management for gdt/tss page - * 09/29/01 D. Mosberger added ia32_load_segment_descriptors() - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "ia32priv.h" - -extern int die_if_kernel (char *str, struct pt_regs *regs, long err); - -struct page *ia32_shared_page[NR_CPUS]; -unsigned long *ia32_boot_gdt; -unsigned long *cpu_gdt_table[NR_CPUS]; -struct page *ia32_gate_page; - -static unsigned long -load_desc (u16 selector) -{ - unsigned long *table, limit, index; - - if (!selector) - return 0; - if (selector & IA32_SEGSEL_TI) { - table = (unsigned long *) IA32_LDT_OFFSET; - limit = IA32_LDT_ENTRIES; - } else { - table = cpu_gdt_table[smp_processor_id()]; - limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]); - } - index = selector >> IA32_SEGSEL_INDEX_SHIFT; - if (index >= limit) - return 0; - return IA32_SEG_UNSCRAMBLE(table[index]); -} - -void -ia32_load_segment_descriptors (struct task_struct *task) -{ - struct pt_regs *regs = task_pt_regs(task); - - /* Setup the segment descriptors */ - regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ - regs->r27 = load_desc(regs->r16 >> 0); /* DSD */ - regs->r28 = load_desc(regs->r16 >> 32); /* FSD */ - regs->r29 = load_desc(regs->r16 >> 48); /* GSD */ - regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */ - regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */ -} - -int -ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs) -{ - struct desc_struct *desc; - struct ia32_user_desc info; - int idx; - - if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info))) - return -EFAULT; - if (LDT_empty(&info)) - return -EINVAL; - - idx = info.entry_number; - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - - desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; - desc->a = LDT_entry_a(&info); - desc->b = LDT_entry_b(&info); - - /* XXX: can this be done in a cleaner way ? */ - load_TLS(&child->thread, smp_processor_id()); - ia32_load_segment_descriptors(child); - load_TLS(¤t->thread, smp_processor_id()); - - return 0; -} - -void -ia32_save_state (struct task_struct *t) -{ - t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG); - t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR); - t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR); - t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR); - t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR); - ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); - ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); -} - -void -ia32_load_state (struct task_struct *t) -{ - unsigned long eflag, fsr, fcr, fir, fdr, tssd; - struct pt_regs *regs = task_pt_regs(t); - - eflag = t->thread.eflag; - fsr = t->thread.fsr; - fcr = t->thread.fcr; - fir = t->thread.fir; - fdr = t->thread.fdr; - tssd = load_desc(_TSS); /* TSSD */ - - ia64_setreg(_IA64_REG_AR_EFLAG, eflag); - ia64_setreg(_IA64_REG_AR_FSR, fsr); - ia64_setreg(_IA64_REG_AR_FCR, fcr); - ia64_setreg(_IA64_REG_AR_FIR, fir); - ia64_setreg(_IA64_REG_AR_FDR, fdr); - current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); - current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); - ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); - ia64_set_kr(IA64_KR_TSSD, tssd); - - regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17; - regs->r30 = load_desc(_LDT); /* LDTD */ - load_TLS(&t->thread, smp_processor_id()); -} - -/* - * Setup IA32 GDT and TSS - */ -void -ia32_gdt_init (void) -{ - int cpu = smp_processor_id(); - - ia32_shared_page[cpu] = alloc_page(GFP_KERNEL); - if (!ia32_shared_page[cpu]) - panic("failed to allocate ia32_shared_page[%d]\n", cpu); - - cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]); - - /* Copy from the boot cpu's GDT */ - memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE); -} - - -/* - * Setup IA32 GDT and TSS - */ -static void -ia32_boot_gdt_init (void) -{ - unsigned long ldt_size; - - ia32_shared_page[0] = alloc_page(GFP_KERNEL); - if (!ia32_shared_page[0]) - panic("failed to allocate ia32_shared_page[0]\n"); - - ia32_boot_gdt = page_address(ia32_shared_page[0]); - cpu_gdt_table[0] = ia32_boot_gdt; - - /* CS descriptor in IA-32 (scrambled) format */ - ia32_boot_gdt[__USER_CS >> 3] - = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, - 0xb, 1, 3, 1, 1, 1, 1); - - /* DS descriptor in IA-32 (scrambled) format */ - ia32_boot_gdt[__USER_DS >> 3] - = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, - 0x3, 1, 3, 1, 1, 1, 1); - - ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); - ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235, - 0xb, 0, 3, 1, 1, 1, 0); - ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1, - 0x2, 0, 3, 1, 1, 1, 0); -} - -static void -ia32_gate_page_init(void) -{ - unsigned long *sr; - - ia32_gate_page = alloc_page(GFP_KERNEL); - sr = page_address(ia32_gate_page); - /* This is popl %eax ; movl $,%eax ; int $0x80 */ - *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48); - - /* This is movl $,%eax ; int $0x80 */ - *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40); -} - -void -ia32_mem_init(void) -{ - ia32_boot_gdt_init(); - ia32_gate_page_init(); -} - -/* - * Handle bad IA32 interrupt via syscall - */ -void -ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs) -{ - siginfo_t siginfo; - - if (die_if_kernel("Bad IA-32 interrupt", regs, int_num)) - return; - - siginfo.si_signo = SIGTRAP; - siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */ - siginfo.si_flags = 0; - siginfo.si_isr = 0; - siginfo.si_addr = NULL; - siginfo.si_imm = 0; - siginfo.si_code = TRAP_BRKPT; - force_sig_info(SIGTRAP, &siginfo, current); -} - -void -ia32_cpu_init (void) -{ - /* initialize global ia32 state - CR0 and CR4 */ - ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0)); -} - -static int __init -ia32_init (void) -{ -#if PAGE_SHIFT > IA32_PAGE_SHIFT - { - extern struct kmem_cache *ia64_partial_page_cachep; - - ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache", - sizeof(struct ia64_partial_page), - 0, SLAB_PANIC, NULL); - } -#endif - return 0; -} - -__initcall(ia32_init); diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c deleted file mode 100644 index e486042..0000000 --- a/arch/ia64/ia32/ia32_traps.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * IA-32 exception handlers - * - * Copyright (C) 2000 Asit K. Mallick - * Copyright (C) 2001-2002 Hewlett-Packard Co - * David Mosberger-Tang - * - * 06/16/00 A. Mallick added siginfo for most cases (close to IA32) - * 09/29/00 D. Mosberger added ia32_intercept() - */ - -#include -#include - -#include "ia32priv.h" - -#include -#include - -int -ia32_intercept (struct pt_regs *regs, unsigned long isr) -{ - switch ((isr >> 16) & 0xff) { - case 0: /* Instruction intercept fault */ - case 4: /* Locked Data reference fault */ - case 1: /* Gate intercept trap */ - return -1; - - case 2: /* System flag trap */ - if (((isr >> 14) & 0x3) >= 2) { - /* MOV SS, POP SS instructions */ - ia64_psr(regs)->id = 1; - return 0; - } else - return -1; - } - return -1; -} - -int -ia32_exception (struct pt_regs *regs, unsigned long isr) -{ - struct siginfo siginfo; - - /* initialize these fields to avoid leaking kernel bits to user space: */ - siginfo.si_errno = 0; - siginfo.si_flags = 0; - siginfo.si_isr = 0; - siginfo.si_imm = 0; - switch ((isr >> 16) & 0xff) { - case 1: - case 2: - siginfo.si_signo = SIGTRAP; - if (isr == 0) - siginfo.si_code = TRAP_TRACE; - else if (isr & 0x4) - siginfo.si_code = TRAP_BRANCH; - else - siginfo.si_code = TRAP_BRKPT; - break; - - case 3: - siginfo.si_signo = SIGTRAP; - siginfo.si_code = TRAP_BRKPT; - break; - - case 0: /* Divide fault */ - siginfo.si_signo = SIGFPE; - siginfo.si_code = FPE_INTDIV; - break; - - case 4: /* Overflow */ - case 5: /* Bounds fault */ - siginfo.si_signo = SIGFPE; - siginfo.si_code = 0; - break; - - case 6: /* Invalid Op-code */ - siginfo.si_signo = SIGILL; - siginfo.si_code = ILL_ILLOPN; - break; - - case 7: /* FP DNA */ - case 8: /* Double Fault */ - case 9: /* Invalid TSS */ - case 11: /* Segment not present */ - case 12: /* Stack fault */ - case 13: /* General Protection Fault */ - siginfo.si_signo = SIGSEGV; - siginfo.si_code = 0; - break; - - case 16: /* Pending FP error */ - { - unsigned long fsr, fcr; - - fsr = ia64_getreg(_IA64_REG_AR_FSR); - fcr = ia64_getreg(_IA64_REG_AR_FCR); - - siginfo.si_signo = SIGFPE; - /* - * (~cwd & swd) will mask out exceptions that are not set to unmasked - * status. 0x3f is the exception bits in these regs, 0x200 is the - * C1 reg you need in case of a stack fault, 0x040 is the stack - * fault bit. We should only be taking one exception at a time, - * so if this combination doesn't produce any single exception, - * then we have a bad program that isn't synchronizing its FPU usage - * and it will suffer the consequences since we won't be able to - * fully reproduce the context of the exception - */ - siginfo.si_isr = isr; - siginfo.si_flags = __ISR_VALID; - switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) { - case 0x000: - default: - siginfo.si_code = 0; - break; - case 0x001: /* Invalid Op */ - case 0x040: /* Stack Fault */ - case 0x240: /* Stack Fault | Direction */ - siginfo.si_code = FPE_FLTINV; - break; - case 0x002: /* Denormalize */ - case 0x010: /* Underflow */ - siginfo.si_code = FPE_FLTUND; - break; - case 0x004: /* Zero Divide */ - siginfo.si_code = FPE_FLTDIV; - break; - case 0x008: /* Overflow */ - siginfo.si_code = FPE_FLTOVF; - break; - case 0x020: /* Precision */ - siginfo.si_code = FPE_FLTRES; - break; - } - - break; - } - - case 17: /* Alignment check */ - siginfo.si_signo = SIGSEGV; - siginfo.si_code = BUS_ADRALN; - break; - - case 19: /* SSE Numeric error */ - siginfo.si_signo = SIGFPE; - siginfo.si_code = 0; - break; - - default: - return -1; - } - force_sig_info(siginfo.si_signo, &siginfo, current); - return 0; -} diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h deleted file mode 100644 index 0f15349..0000000 --- a/arch/ia64/ia32/ia32priv.h +++ /dev/null @@ -1,532 +0,0 @@ -#ifndef _ASM_IA64_IA32_PRIV_H -#define _ASM_IA64_IA32_PRIV_H - - -#include - -#ifdef CONFIG_IA32_SUPPORT - -#include -#include -#include - -#include - -/* - * 32 bit structures for IA32 support. - */ - -#define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT) -#define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1)) -#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK) -#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */ - -/* - * partially mapped pages provide precise accounting of which 4k sub pages - * are mapped and which ones are not, thereby improving IA-32 compatibility. - */ -struct ia64_partial_page { - struct ia64_partial_page *next; /* linked list, sorted by address */ - struct rb_node pp_rb; - /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 - * should suffice.*/ - unsigned long bitmap; - unsigned int base; -}; - -struct ia64_partial_page_list { - struct ia64_partial_page *pp_head; /* list head, points to the lowest - * addressed partial page */ - struct rb_root ppl_rb; - struct ia64_partial_page *pp_hint; /* pp_hint->next is the last - * accessed partial page */ - atomic_t pp_count; /* reference count */ -}; - -#if PAGE_SHIFT > IA32_PAGE_SHIFT -struct ia64_partial_page_list* ia32_init_pp_list (void); -#else -# define ia32_init_pp_list() 0 -#endif - -/* sigcontext.h */ -/* - * As documented in the iBCS2 standard.. - * - * The first part of "struct _fpstate" is just the - * normal i387 hardware setup, the extra "status" - * word is used to save the coprocessor status word - * before entering the handler. - */ -struct _fpreg_ia32 { - unsigned short significand[4]; - unsigned short exponent; -}; - -struct _fpxreg_ia32 { - unsigned short significand[4]; - unsigned short exponent; - unsigned short padding[3]; -}; - -struct _xmmreg_ia32 { - unsigned int element[4]; -}; - - -struct _fpstate_ia32 { - unsigned int cw, - sw, - tag, - ipoff, - cssel, - dataoff, - datasel; - struct _fpreg_ia32 _st[8]; - unsigned short status; - unsigned short magic; /* 0xffff = regular FPU data only */ - - /* FXSR FPU environment */ - unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */ - unsigned int mxcsr; - unsigned int reserved; - struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */ - struct _xmmreg_ia32 _xmm[8]; - unsigned int padding[56]; -}; - -struct sigcontext_ia32 { - unsigned short gs, __gsh; - unsigned short fs, __fsh; - unsigned short es, __esh; - unsigned short ds, __dsh; - unsigned int edi; - unsigned int esi; - unsigned int ebp; - unsigned int esp; - unsigned int ebx; - unsigned int edx; - unsigned int ecx; - unsigned int eax; - unsigned int trapno; - unsigned int err; - unsigned int eip; - unsigned short cs, __csh; - unsigned int eflags; - unsigned int esp_at_signal; - unsigned short ss, __ssh; - unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ - unsigned int oldmask; - unsigned int cr2; -}; - -/* user.h */ -/* - * IA32 (Pentium III/4) FXSR, SSE support - * - * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for - * interacting with the FXSR-format floating point environment. Floating - * point data can be accessed in the regular format in the usual manner, - * and both the standard and SIMD floating point data can be accessed via - * the new ptrace requests. In either case, changes to the FPU environment - * will be reflected in the task's state as expected. - */ -struct ia32_user_i387_struct { - int cwd; - int swd; - int twd; - int fip; - int fcs; - int foo; - int fos; - /* 8*10 bytes for each FP-reg = 80 bytes */ - struct _fpreg_ia32 st_space[8]; -}; - -struct ia32_user_fxsr_struct { - unsigned short cwd; - unsigned short swd; - unsigned short twd; - unsigned short fop; - int fip; - int fcs; - int foo; - int fos; - int mxcsr; - int reserved; - int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ - int padding[56]; -}; - -/* signal.h */ -#define IA32_SET_SA_HANDLER(ka,handler,restorer) \ - ((ka)->sa.sa_handler = (__sighandler_t) \ - (((unsigned long)(restorer) << 32) \ - | ((handler) & 0xffffffff))) -#define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff) -#define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32) - -#define __IA32_NR_sigreturn 119 -#define __IA32_NR_rt_sigreturn 173 - -struct sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ - unsigned int sa_flags; - unsigned int sa_restorer; /* Another 32 bit pointer */ - compat_sigset_t sa_mask; /* A 32 bit mask */ -}; - -struct old_sigaction32 { - unsigned int sa_handler; /* Really a pointer, but need to deal - with 32 bits */ - compat_old_sigset_t sa_mask; /* A 32 bit mask */ - unsigned int sa_flags; - unsigned int sa_restorer; /* Another 32 bit pointer */ -}; - -typedef struct sigaltstack_ia32 { - unsigned int ss_sp; - int ss_flags; - unsigned int ss_size; -} stack_ia32_t; - -struct ucontext_ia32 { - unsigned int uc_flags; - unsigned int uc_link; - stack_ia32_t uc_stack; - struct sigcontext_ia32 uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -struct stat64 { - unsigned long long st_dev; - unsigned char __pad0[4]; - unsigned int __st_ino; - unsigned int st_mode; - unsigned int st_nlink; - unsigned int st_uid; - unsigned int st_gid; - unsigned long long st_rdev; - unsigned char __pad3[4]; - unsigned int st_size_lo; - unsigned int st_size_hi; - unsigned int st_blksize; - unsigned int st_blocks; /* Number 512-byte blocks allocated. */ - unsigned int __pad4; /* future possible st_blocks high bits */ - unsigned int st_atime; - unsigned int st_atime_nsec; - unsigned int st_mtime; - unsigned int st_mtime_nsec; - unsigned int st_ctime; - unsigned int st_ctime_nsec; - unsigned int st_ino_lo; - unsigned int st_ino_hi; -}; - -typedef struct compat_siginfo { - int si_signo; - int si_errno; - int si_code; - - union { - int _pad[((128/sizeof(int)) - 3)]; - - /* kill() */ - struct { - unsigned int _pid; /* sender's pid */ - unsigned int _uid; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - compat_timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - char _pad[sizeof(unsigned int) - sizeof(int)]; - compat_sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ - } _timer; - - /* POSIX.1b signals */ - struct { - unsigned int _pid; /* sender's pid */ - unsigned int _uid; /* sender's uid */ - compat_sigval_t _sigval; - } _rt; - - /* SIGCHLD */ - struct { - unsigned int _pid; /* which child */ - unsigned int _uid; /* sender's uid */ - int _status; /* exit code */ - compat_clock_t _utime; - compat_clock_t _stime; - } _sigchld; - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ - struct { - unsigned int _addr; /* faulting insn/memory ref. */ - } _sigfault; - - /* SIGPOLL */ - struct { - int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; - } _sifields; -} compat_siginfo_t; - -/* - * IA-32 ELF specific definitions for IA-64. - */ - -#define _ASM_IA64_ELF_H /* Don't include elf.h */ - -#include - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ((x)->e_machine == EM_386) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_386 - -#define IA32_STACK_TOP IA32_PAGE_OFFSET -#define IA32_GATE_OFFSET IA32_PAGE_OFFSET -#define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE - -/* - * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can - * access them. - */ -#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE) -#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE) -#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE) - -#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE - -/* - * This is the location that an ET_DYN program is loaded if exec'ed. - * Typical use of this is to invoke "./ld.so someprog" to test out a - * new version of the loader. We need to make sure that it is out of - * the way of the program that it will "exec", and that there is - * sufficient room for the brk. - */ -#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000) - -void ia64_elf32_init(struct pt_regs *regs); -#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) - -/* This macro yields a bitmask that programs can use to figure out - what instruction set this CPU supports. */ -#define ELF_HWCAP 0 - -/* This macro yields a string that ld.so will use to load - implementation specific libraries for optimization. Not terribly - relevant until we have real hardware to play with... */ -#define ELF_PLATFORM NULL - -#ifdef __KERNEL__ -# define SET_PERSONALITY(EX) \ - (current->personality = PER_LINUX) -#endif - -#define IA32_EFLAG 0x200 - -/* - * IA-32 ELF specific definitions for IA-64. - */ - -#define __USER_CS 0x23 -#define __USER_DS 0x2B - -/* - * The per-cpu GDT has 32 entries: see - */ -#define GDT_ENTRIES 32 - -#define GDT_SIZE (GDT_ENTRIES * 8) - -#define TSS_ENTRY 14 -#define LDT_ENTRY (TSS_ENTRY + 1) - -#define IA32_SEGSEL_RPL (0x3 << 0) -#define IA32_SEGSEL_TI (0x1 << 2) -#define IA32_SEGSEL_INDEX_SHIFT 3 - -#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT) -#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT) - -#define IA32_SEG_BASE 16 -#define IA32_SEG_TYPE 40 -#define IA32_SEG_SYS 44 -#define IA32_SEG_DPL 45 -#define IA32_SEG_P 47 -#define IA32_SEG_HIGH_LIMIT 48 -#define IA32_SEG_AVL 52 -#define IA32_SEG_DB 54 -#define IA32_SEG_G 55 -#define IA32_SEG_HIGH_BASE 56 - -#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \ - (((limit) & 0xffff) \ - | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \ - | ((unsigned long) (segtype) << IA32_SEG_TYPE) \ - | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \ - | ((unsigned long) (dpl) << IA32_SEG_DPL) \ - | ((unsigned long) (segpresent) << IA32_SEG_P) \ - | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \ - | ((unsigned long) (avl) << IA32_SEG_AVL) \ - | ((unsigned long) (segdb) << IA32_SEG_DB) \ - | ((unsigned long) (gran) << IA32_SEG_G) \ - | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE)) - -#define SEG_LIM 32 -#define SEG_TYPE 52 -#define SEG_SYS 56 -#define SEG_DPL 57 -#define SEG_P 59 -#define SEG_AVL 60 -#define SEG_DB 62 -#define SEG_G 63 - -/* Unscramble an IA-32 segment descriptor into the IA-64 format. */ -#define IA32_SEG_UNSCRAMBLE(sd) \ - ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \ - | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \ - | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \ - | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \ - | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \ - | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \ - | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \ - | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \ - | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G)) - -#define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */ - -#define IA32_CR0 0x80000001 /* Enable PG and PE bits */ -#define IA32_CR4 0x600 /* MMXEX and FXSR on */ - -/* - * IA32 floating point control registers starting values - */ - -#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */ -#define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */ - -#define IA32_PTRACE_GETREGS 12 -#define IA32_PTRACE_SETREGS 13 -#define IA32_PTRACE_GETFPREGS 14 -#define IA32_PTRACE_SETFPREGS 15 -#define IA32_PTRACE_GETFPXREGS 18 -#define IA32_PTRACE_SETFPXREGS 19 - -#define ia32_start_thread(regs,new_ip,new_sp) do { \ - set_fs(USER_DS); \ - ia64_psr(regs)->cpl = 3; /* set user mode */ \ - ia64_psr(regs)->ri = 0; /* clear return slot number */ \ - ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \ - regs->cr_iip = new_ip; \ - regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \ - regs->ar_rnat = 0; \ - regs->loadrs = 0; \ - regs->r12 = new_sp; \ -} while (0) - -/* - * Local Descriptor Table (LDT) related declarations. - */ - -#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */ -#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */ - -#define LDT_entry_a(info) \ - ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) - -#define LDT_entry_b(info) \ - (((info)->base_addr & 0xff000000) | \ - (((info)->base_addr & 0x00ff0000) >> 16) | \ - ((info)->limit & 0xf0000) | \ - (((info)->read_exec_only ^ 1) << 9) | \ - ((info)->contents << 10) | \ - (((info)->seg_not_present ^ 1) << 15) | \ - ((info)->seg_32bit << 22) | \ - ((info)->limit_in_pages << 23) | \ - ((info)->useable << 20) | \ - 0x7100) - -#define LDT_empty(info) ( \ - (info)->base_addr == 0 && \ - (info)->limit == 0 && \ - (info)->contents == 0 && \ - (info)->read_exec_only == 1 && \ - (info)->seg_32bit == 0 && \ - (info)->limit_in_pages == 0 && \ - (info)->seg_not_present == 1 && \ - (info)->useable == 0 ) - -static inline void -load_TLS (struct thread_struct *t, unsigned int cpu) -{ - extern unsigned long *cpu_gdt_table[NR_CPUS]; - - memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long)); - memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long)); - memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long)); -} - -struct ia32_user_desc { - unsigned int entry_number; - unsigned int base_addr; - unsigned int limit; - unsigned int seg_32bit:1; - unsigned int contents:2; - unsigned int read_exec_only:1; - unsigned int limit_in_pages:1; - unsigned int seg_not_present:1; - unsigned int useable:1; -}; - -struct linux_binprm; - -extern void ia32_init_addr_space (struct pt_regs *regs); -extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack); -extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t); -extern void ia32_load_segment_descriptors (struct task_struct *task); - -#define ia32f2ia64f(dst,src) \ -do { \ - ia64_ldfe(6,src); \ - ia64_stop(); \ - ia64_stf_spill(dst, 6); \ -} while(0) - -#define ia64f2ia32f(dst,src) \ -do { \ - ia64_ldf_fill(6, src); \ - ia64_stop(); \ - ia64_stfe(dst, 6); \ -} while(0) - -struct user_regs_struct32 { - __u32 ebx, ecx, edx, esi, edi, ebp, eax; - unsigned short ds, __ds, es, __es; - unsigned short fs, __fs, gs, __gs; - __u32 orig_eax, eip; - unsigned short cs, __cs; - __u32 eflags, esp; - unsigned short ss, __ss; -}; - -/* Prototypes for use in elfcore32.h */ -extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *); -extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *); - -#endif /* !CONFIG_IA32_SUPPORT */ - -#endif /* _ASM_IA64_IA32_PRIV_H */ diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c deleted file mode 100644 index 045b746b..0000000 --- a/arch/ia64/ia32/sys_ia32.c +++ /dev/null @@ -1,2765 +0,0 @@ -/* - * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c. - * - * Copyright (C) 2000 VA Linux Co - * Copyright (C) 2000 Don Dugger - * Copyright (C) 1999 Arun Sharma - * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co - * David Mosberger-Tang - * Copyright (C) 2004 Gordon Jin - * - * These routines maintain argument size conversion between 32bit and 64bit - * environment. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "ia32priv.h" - -#include -#include - -#define DEBUG 0 - -#if DEBUG -# define DBG(fmt...) printk(KERN_DEBUG fmt) -#else -# define DBG(fmt...) -#endif - -#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1))) - -#define OFFSET4K(a) ((a) & 0xfff) -#define PAGE_START(addr) ((addr) & PAGE_MASK) -#define MINSIGSTKSZ_IA32 2048 - -#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid)) -#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid)) - -/* - * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore - * while doing so. - */ -/* XXX make per-mm: */ -static DEFINE_MUTEX(ia32_mmap_mutex); - -asmlinkage long -sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, - struct pt_regs *regs) -{ - long error; - char *filename; - unsigned long old_map_base, old_task_size, tssd; - - filename = getname(name); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - return error; - - old_map_base = current->thread.map_base; - old_task_size = current->thread.task_size; - tssd = ia64_get_kr(IA64_KR_TSSD); - - /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */ - current->thread.map_base = DEFAULT_MAP_BASE; - current->thread.task_size = DEFAULT_TASK_SIZE; - ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); - ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); - - error = compat_do_execve(filename, argv, envp, regs); - putname(filename); - - if (error < 0) { - /* oops, execve failed, switch back to old values... */ - ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); - ia64_set_kr(IA64_KR_TSSD, tssd); - current->thread.map_base = old_map_base; - current->thread.task_size = old_task_size; - } - - return error; -} - - -#if PAGE_SHIFT > IA32_PAGE_SHIFT - - -static int -get_page_prot (struct vm_area_struct *vma, unsigned long addr) -{ - int prot = 0; - - if (!vma || vma->vm_start > addr) - return 0; - - if (vma->vm_flags & VM_READ) - prot |= PROT_READ; - if (vma->vm_flags & VM_WRITE) - prot |= PROT_WRITE; - if (vma->vm_flags & VM_EXEC) - prot |= PROT_EXEC; - return prot; -} - -/* - * Map a subpage by creating an anonymous page that contains the union of the old page and - * the subpage. - */ -static unsigned long -mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags, - loff_t off) -{ - void *page = NULL; - struct inode *inode; - unsigned long ret = 0; - struct vm_area_struct *vma = find_vma(current->mm, start); - int old_prot = get_page_prot(vma, start); - - DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n", - file, start, end, prot, flags, off); - - - /* Optimize the case where the old mmap and the new mmap are both anonymous */ - if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) { - if (clear_user((void __user *) start, end - start)) { - ret = -EFAULT; - goto out; - } - goto skip_mmap; - } - - page = (void *) get_zeroed_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - if (old_prot) - copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE); - - down_write(¤t->mm->mmap_sem); - { - ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE, - flags | MAP_FIXED | MAP_ANONYMOUS, 0); - } - up_write(¤t->mm->mmap_sem); - - if (IS_ERR((void *) ret)) - goto out; - - if (old_prot) { - /* copy back the old page contents. */ - if (offset_in_page(start)) - copy_to_user((void __user *) PAGE_START(start), page, - offset_in_page(start)); - if (offset_in_page(end)) - copy_to_user((void __user *) end, page + offset_in_page(end), - PAGE_SIZE - offset_in_page(end)); - } - - if (!(flags & MAP_ANONYMOUS)) { - /* read the file contents */ - inode = file->f_path.dentry->d_inode; - if (!inode->i_fop || !file->f_op->read - || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0)) - { - ret = -EINVAL; - goto out; - } - } - - skip_mmap: - if (!(prot & PROT_WRITE)) - ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot); - out: - if (page) - free_page((unsigned long) page); - return ret; -} - -/* SLAB cache for ia64_partial_page structures */ -struct kmem_cache *ia64_partial_page_cachep; - -/* - * init ia64_partial_page_list. - * return 0 means kmalloc fail. - */ -struct ia64_partial_page_list* -ia32_init_pp_list(void) -{ - struct ia64_partial_page_list *p; - - if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) - return p; - p->pp_head = NULL; - p->ppl_rb = RB_ROOT; - p->pp_hint = NULL; - atomic_set(&p->pp_count, 1); - return p; -} - -/* - * Search for the partial page with @start in partial page list @ppl. - * If finds the partial page, return the found partial page. - * Else, return 0 and provide @pprev, @rb_link, @rb_parent to - * be used by later __ia32_insert_pp(). - */ -static struct ia64_partial_page * -__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start, - struct ia64_partial_page **pprev, struct rb_node ***rb_link, - struct rb_node **rb_parent) -{ - struct ia64_partial_page *pp; - struct rb_node **__rb_link, *__rb_parent, *rb_prev; - - pp = ppl->pp_hint; - if (pp && pp->base == start) - return pp; - - __rb_link = &ppl->ppl_rb.rb_node; - rb_prev = __rb_parent = NULL; - - while (*__rb_link) { - __rb_parent = *__rb_link; - pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb); - - if (pp->base == start) { - ppl->pp_hint = pp; - return pp; - } else if (pp->base < start) { - rb_prev = __rb_parent; - __rb_link = &__rb_parent->rb_right; - } else { - __rb_link = &__rb_parent->rb_left; - } - } - - *rb_link = __rb_link; - *rb_parent = __rb_parent; - *pprev = NULL; - if (rb_prev) - *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb); - return NULL; -} - -/* - * insert @pp into @ppl. - */ -static void -__ia32_insert_pp(struct ia64_partial_page_list *ppl, - struct ia64_partial_page *pp, struct ia64_partial_page *prev, - struct rb_node **rb_link, struct rb_node *rb_parent) -{ - /* link list */ - if (prev) { - pp->next = prev->next; - prev->next = pp; - } else { - ppl->pp_head = pp; - if (rb_parent) - pp->next = rb_entry(rb_parent, - struct ia64_partial_page, pp_rb); - else - pp->next = NULL; - } - - /* link rb */ - rb_link_node(&pp->pp_rb, rb_parent, rb_link); - rb_insert_color(&pp->pp_rb, &ppl->ppl_rb); - - ppl->pp_hint = pp; -} - -/* - * delete @pp from partial page list @ppl. - */ -static void -__ia32_delete_pp(struct ia64_partial_page_list *ppl, - struct ia64_partial_page *pp, struct ia64_partial_page *prev) -{ - if (prev) { - prev->next = pp->next; - if (ppl->pp_hint == pp) - ppl->pp_hint = prev; - } else { - ppl->pp_head = pp->next; - if (ppl->pp_hint == pp) - ppl->pp_hint = pp->next; - } - rb_erase(&pp->pp_rb, &ppl->ppl_rb); - kmem_cache_free(ia64_partial_page_cachep, pp); -} - -static struct ia64_partial_page * -__pp_prev(struct ia64_partial_page *pp) -{ - struct rb_node *prev = rb_prev(&pp->pp_rb); - if (prev) - return rb_entry(prev, struct ia64_partial_page, pp_rb); - else - return NULL; -} - -/* - * Delete partial pages with address between @start and @end. - * @start and @end are page aligned. - */ -static void -__ia32_delete_pp_range(unsigned int start, unsigned int end) -{ - struct ia64_partial_page *pp, *prev; - struct rb_node **rb_link, *rb_parent; - - if (start >= end) - return; - - pp = __ia32_find_pp(current->thread.ppl, start, &prev, - &rb_link, &rb_parent); - if (pp) - prev = __pp_prev(pp); - else { - if (prev) - pp = prev->next; - else - pp = current->thread.ppl->pp_head; - } - - while (pp && pp->base < end) { - struct ia64_partial_page *tmp = pp->next; - __ia32_delete_pp(current->thread.ppl, pp, prev); - pp = tmp; - } -} - -/* - * Set the range between @start and @end in bitmap. - * @start and @end should be IA32 page aligned and in the same IA64 page. - */ -static int -__ia32_set_pp(unsigned int start, unsigned int end, int flags) -{ - struct ia64_partial_page *pp, *prev; - struct rb_node ** rb_link, *rb_parent; - unsigned int pstart, start_bit, end_bit, i; - - pstart = PAGE_START(start); - start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; - end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; - if (end_bit == 0) - end_bit = PAGE_SIZE / IA32_PAGE_SIZE; - pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, - &rb_link, &rb_parent); - if (pp) { - for (i = start_bit; i < end_bit; i++) - set_bit(i, &pp->bitmap); - /* - * Check: if this partial page has been set to a full page, - * then delete it. - */ - if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >= - PAGE_SIZE/IA32_PAGE_SIZE) { - __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); - } - return 0; - } - - /* - * MAP_FIXED may lead to overlapping mmap. - * In this case, the requested mmap area may already mmaped as a full - * page. So check vma before adding a new partial page. - */ - if (flags & MAP_FIXED) { - struct vm_area_struct *vma = find_vma(current->mm, pstart); - if (vma && vma->vm_start <= pstart) - return 0; - } - - /* new a ia64_partial_page */ - pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); - if (!pp) - return -ENOMEM; - pp->base = pstart; - pp->bitmap = 0; - for (i=start_bit; ibitmap)); - pp->next = NULL; - __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); - return 0; -} - -/* - * @start and @end should be IA32 page aligned, but don't need to be in the - * same IA64 page. Split @start and @end to make sure they're in the same IA64 - * page, then call __ia32_set_pp(). - */ -static void -ia32_set_pp(unsigned int start, unsigned int end, int flags) -{ - down_write(¤t->mm->mmap_sem); - if (flags & MAP_FIXED) { - /* - * MAP_FIXED may lead to overlapping mmap. When this happens, - * a series of complete IA64 pages results in deletion of - * old partial pages in that range. - */ - __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); - } - - if (end < PAGE_ALIGN(start)) { - __ia32_set_pp(start, end, flags); - } else { - if (offset_in_page(start)) - __ia32_set_pp(start, PAGE_ALIGN(start), flags); - if (offset_in_page(end)) - __ia32_set_pp(PAGE_START(end), end, flags); - } - up_write(¤t->mm->mmap_sem); -} - -/* - * Unset the range between @start and @end in bitmap. - * @start and @end should be IA32 page aligned and in the same IA64 page. - * After doing that, if the bitmap is 0, then free the page and return 1, - * else return 0; - * If not find the partial page in the list, then - * If the vma exists, then the full page is set to a partial page; - * Else return -ENOMEM. - */ -static int -__ia32_unset_pp(unsigned int start, unsigned int end) -{ - struct ia64_partial_page *pp, *prev; - struct rb_node ** rb_link, *rb_parent; - unsigned int pstart, start_bit, end_bit, i; - struct vm_area_struct *vma; - - pstart = PAGE_START(start); - start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; - end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; - if (end_bit == 0) - end_bit = PAGE_SIZE / IA32_PAGE_SIZE; - - pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, - &rb_link, &rb_parent); - if (pp) { - for (i = start_bit; i < end_bit; i++) - clear_bit(i, &pp->bitmap); - if (pp->bitmap == 0) { - __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); - return 1; - } - return 0; - } - - vma = find_vma(current->mm, pstart); - if (!vma || vma->vm_start > pstart) { - return -ENOMEM; - } - - /* new a ia64_partial_page */ - pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); - if (!pp) - return -ENOMEM; - pp->base = pstart; - pp->bitmap = 0; - for (i = 0; i < start_bit; i++) - set_bit(i, &(pp->bitmap)); - for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++) - set_bit(i, &(pp->bitmap)); - pp->next = NULL; - __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); - return 0; -} - -/* - * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling - * __ia32_delete_pp_range(). Unset possible partial pages by calling - * __ia32_unset_pp(). - * The returned value see __ia32_unset_pp(). - */ -static int -ia32_unset_pp(unsigned int *startp, unsigned int *endp) -{ - unsigned int start = *startp, end = *endp; - int ret = 0; - - down_write(¤t->mm->mmap_sem); - - __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); - - if (end < PAGE_ALIGN(start)) { - ret = __ia32_unset_pp(start, end); - if (ret == 1) { - *startp = PAGE_START(start); - *endp = PAGE_ALIGN(end); - } - if (ret == 0) { - /* to shortcut sys_munmap() in sys32_munmap() */ - *startp = PAGE_START(start); - *endp = PAGE_START(end); - } - } else { - if (offset_in_page(start)) { - ret = __ia32_unset_pp(start, PAGE_ALIGN(start)); - if (ret == 1) - *startp = PAGE_START(start); - if (ret == 0) - *startp = PAGE_ALIGN(start); - if (ret < 0) - goto out; - } - if (offset_in_page(end)) { - ret = __ia32_unset_pp(PAGE_START(end), end); - if (ret == 1) - *endp = PAGE_ALIGN(end); - if (ret == 0) - *endp = PAGE_START(end); - } - } - - out: - up_write(¤t->mm->mmap_sem); - return ret; -} - -/* - * Compare the range between @start and @end with bitmap in partial page. - * @start and @end should be IA32 page aligned and in the same IA64 page. - */ -static int -__ia32_compare_pp(unsigned int start, unsigned int end) -{ - struct ia64_partial_page *pp, *prev; - struct rb_node ** rb_link, *rb_parent; - unsigned int pstart, start_bit, end_bit, size; - unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ - - pstart = PAGE_START(start); - - pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, - &rb_link, &rb_parent); - if (!pp) - return 1; - - start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; - end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; - size = sizeof(pp->bitmap) * 8; - first_bit = find_first_bit(&pp->bitmap, size); - next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit); - if ((start_bit < first_bit) || (end_bit > next_zero_bit)) { - /* exceeds the first range in bitmap */ - return -ENOMEM; - } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) { - first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit); - if ((next_zero_bit < first_bit) && (first_bit < size)) - return 1; /* has next range */ - else - return 0; /* no next range */ - } else - return 1; -} - -/* - * @start and @end should be IA32 page aligned, but don't need to be in the - * same IA64 page. Split @start and @end to make sure they're in the same IA64 - * page, then call __ia32_compare_pp(). - * - * Take this as example: the range is the 1st and 2nd 4K page. - * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011; - * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111; - * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or - * bitmap = 00000101. - */ -static int -ia32_compare_pp(unsigned int *startp, unsigned int *endp) -{ - unsigned int start = *startp, end = *endp; - int retval = 0; - - down_write(¤t->mm->mmap_sem); - - if (end < PAGE_ALIGN(start)) { - retval = __ia32_compare_pp(start, end); - if (retval == 0) { - *startp = PAGE_START(start); - *endp = PAGE_ALIGN(end); - } - } else { - if (offset_in_page(start)) { - retval = __ia32_compare_pp(start, - PAGE_ALIGN(start)); - if (retval == 0) - *startp = PAGE_START(start); - if (retval < 0) - goto out; - } - if (offset_in_page(end)) { - retval = __ia32_compare_pp(PAGE_START(end), end); - if (retval == 0) - *endp = PAGE_ALIGN(end); - } - } - - out: - up_write(¤t->mm->mmap_sem); - return retval; -} - -static void -__ia32_drop_pp_list(struct ia64_partial_page_list *ppl) -{ - struct ia64_partial_page *pp = ppl->pp_head; - - while (pp) { - struct ia64_partial_page *next = pp->next; - kmem_cache_free(ia64_partial_page_cachep, pp); - pp = next; - } - - kfree(ppl); -} - -void -ia32_drop_ia64_partial_page_list(struct task_struct *task) -{ - struct ia64_partial_page_list* ppl = task->thread.ppl; - - if (ppl && atomic_dec_and_test(&ppl->pp_count)) - __ia32_drop_pp_list(ppl); -} - -/* - * Copy current->thread.ppl to ppl (already initialized). - */ -static int -__ia32_copy_pp_list(struct ia64_partial_page_list *ppl) -{ - struct ia64_partial_page *pp, *tmp, *prev; - struct rb_node **rb_link, *rb_parent; - - ppl->pp_head = NULL; - ppl->pp_hint = NULL; - ppl->ppl_rb = RB_ROOT; - rb_link = &ppl->ppl_rb.rb_node; - rb_parent = NULL; - prev = NULL; - - for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { - tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - *tmp = *pp; - __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent); - prev = tmp; - rb_link = &tmp->pp_rb.rb_right; - rb_parent = &tmp->pp_rb; - } - return 0; -} - -int -ia32_copy_ia64_partial_page_list(struct task_struct *p, - unsigned long clone_flags) -{ - int retval = 0; - - if (clone_flags & CLONE_VM) { - atomic_inc(¤t->thread.ppl->pp_count); - p->thread.ppl = current->thread.ppl; - } else { - p->thread.ppl = ia32_init_pp_list(); - if (!p->thread.ppl) - return -ENOMEM; - down_write(¤t->mm->mmap_sem); - { - retval = __ia32_copy_pp_list(p->thread.ppl); - } - up_write(¤t->mm->mmap_sem); - } - - return retval; -} - -static unsigned long -emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags, - loff_t off) -{ - unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0; - struct inode *inode; - loff_t poff; - - end = start + len; - pstart = PAGE_START(start); - pend = PAGE_ALIGN(end); - - if (flags & MAP_FIXED) { - ia32_set_pp((unsigned int)start, (unsigned int)end, flags); - if (start > pstart) { - if (flags & MAP_SHARED) - printk(KERN_INFO - "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", - current->comm, task_pid_nr(current), start); - ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, - off); - if (IS_ERR((void *) ret)) - return ret; - pstart += PAGE_SIZE; - if (pstart >= pend) - goto out; /* done */ - } - if (end < pend) { - if (flags & MAP_SHARED) - printk(KERN_INFO - "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", - current->comm, task_pid_nr(current), end); - ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, - (off + len) - offset_in_page(end)); - if (IS_ERR((void *) ret)) - return ret; - pend -= PAGE_SIZE; - if (pstart >= pend) - goto out; /* done */ - } - } else { - /* - * If a start address was specified, use it if the entire rounded out area - * is available. - */ - if (start && !pstart) - fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */ - tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags); - if (tmp != pstart) { - pstart = tmp; - start = pstart + offset_in_page(off); /* make start congruent with off */ - end = start + len; - pend = PAGE_ALIGN(end); - } - } - - poff = off + (pstart - start); /* note: (pstart - start) may be negative */ - is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0); - - if ((flags & MAP_SHARED) && !is_congruent) - printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " - "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off); - - DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, - is_congruent ? "congruent" : "not congruent", poff); - - down_write(¤t->mm->mmap_sem); - { - if (!(flags & MAP_ANONYMOUS) && is_congruent) - ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff); - else - ret = do_mmap(NULL, pstart, pend - pstart, - prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE), - flags | MAP_FIXED | MAP_ANONYMOUS, 0); - } - up_write(¤t->mm->mmap_sem); - - if (IS_ERR((void *) ret)) - return ret; - - if (!is_congruent) { - /* read the file contents */ - inode = file->f_path.dentry->d_inode; - if (!inode->i_fop || !file->f_op->read - || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff) - < 0)) - { - sys_munmap(pstart, pend - pstart); - return -EINVAL; - } - if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0) - return -EINVAL; - } - - if (!(flags & MAP_FIXED)) - ia32_set_pp((unsigned int)start, (unsigned int)end, flags); -out: - return start; -} - -#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ - -static inline unsigned int -get_prot32 (unsigned int prot) -{ - if (prot & PROT_WRITE) - /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */ - prot |= PROT_READ | PROT_WRITE | PROT_EXEC; - else if (prot & (PROT_READ | PROT_EXEC)) - /* on x86, there is no distinction between PROT_READ and PROT_EXEC */ - prot |= (PROT_READ | PROT_EXEC); - - return prot; -} - -unsigned long -ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags, - loff_t offset) -{ - DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n", - file, addr, len, prot, flags, offset); - - if (file && (!file->f_op || !file->f_op->mmap)) - return -ENODEV; - - len = IA32_PAGE_ALIGN(len); - if (len == 0) - return addr; - - if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len) - { - if (flags & MAP_FIXED) - return -ENOMEM; - else - return -EINVAL; - } - - if (OFFSET4K(offset)) - return -EINVAL; - - prot = get_prot32(prot); - - if (flags & MAP_HUGETLB) - return -ENOMEM; - -#if PAGE_SHIFT > IA32_PAGE_SHIFT - mutex_lock(&ia32_mmap_mutex); - { - addr = emulate_mmap(file, addr, len, prot, flags, offset); - } - mutex_unlock(&ia32_mmap_mutex); -#else - down_write(¤t->mm->mmap_sem); - { - addr = do_mmap(file, addr, len, prot, flags, offset); - } - up_write(¤t->mm->mmap_sem); -#endif - DBG("ia32_do_mmap: returning 0x%lx\n", addr); - return addr; -} - -/* - * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these - * system calls used a memory block for parameter passing.. - */ - -struct mmap_arg_struct { - unsigned int addr; - unsigned int len; - unsigned int prot; - unsigned int flags; - unsigned int fd; - unsigned int offset; -}; - -asmlinkage long -sys32_mmap (struct mmap_arg_struct __user *arg) -{ - struct mmap_arg_struct a; - struct file *file = NULL; - unsigned long addr; - int flags; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - - if (OFFSET4K(a.offset)) - return -EINVAL; - - flags = a.flags; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(a.fd); - if (!file) - return -EBADF; - } - - addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset); - - if (file) - fput(file); - return addr; -} - -asmlinkage long -sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags, - unsigned int fd, unsigned int pgoff) -{ - struct file *file = NULL; - unsigned long retval; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - return -EBADF; - } - - retval = ia32_do_mmap(file, addr, len, prot, flags, - (unsigned long) pgoff << IA32_PAGE_SHIFT); - - if (file) - fput(file); - return retval; -} - -asmlinkage long -sys32_munmap (unsigned int start, unsigned int len) -{ - unsigned int end = start + len; - long ret; - -#if PAGE_SHIFT <= IA32_PAGE_SHIFT - ret = sys_munmap(start, end - start); -#else - if (OFFSET4K(start)) - return -EINVAL; - - end = IA32_PAGE_ALIGN(end); - if (start >= end) - return -EINVAL; - - ret = ia32_unset_pp(&start, &end); - if (ret < 0) - return ret; - - if (start >= end) - return 0; - - mutex_lock(&ia32_mmap_mutex); - ret = sys_munmap(start, end - start); - mutex_unlock(&ia32_mmap_mutex); -#endif - return ret; -} - -#if PAGE_SHIFT > IA32_PAGE_SHIFT - -/* - * When mprotect()ing a partial page, we set the permission to the union of the old - * settings and the new settings. In other words, it's only possible to make access to a - * partial page less restrictive. - */ -static long -mprotect_subpage (unsigned long address, int new_prot) -{ - int old_prot; - struct vm_area_struct *vma; - - if (new_prot == PROT_NONE) - return 0; /* optimize case where nothing changes... */ - vma = find_vma(current->mm, address); - old_prot = get_page_prot(vma, address); - return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot); -} - -#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ - -asmlinkage long -sys32_mprotect (unsigned int start, unsigned int len, int prot) -{ - unsigned int end = start + len; -#if PAGE_SHIFT > IA32_PAGE_SHIFT - long retval = 0; -#endif - - prot = get_prot32(prot); - -#if PAGE_SHIFT <= IA32_PAGE_SHIFT - return sys_mprotect(start, end - start, prot); -#else - if (OFFSET4K(start)) - return -EINVAL; - - end = IA32_PAGE_ALIGN(end); - if (end < start) - return -EINVAL; - - retval = ia32_compare_pp(&start, &end); - - if (retval < 0) - return retval; - - mutex_lock(&ia32_mmap_mutex); - { - if (offset_in_page(start)) { - /* start address is 4KB aligned but not page aligned. */ - retval = mprotect_subpage(PAGE_START(start), prot); - if (retval < 0) - goto out; - - start = PAGE_ALIGN(start); - if (start >= end) - goto out; /* retval is already zero... */ - } - - if (offset_in_page(end)) { - /* end address is 4KB aligned but not page aligned. */ - retval = mprotect_subpage(PAGE_START(end), prot); - if (retval < 0) - goto out; - - end = PAGE_START(end); - } - retval = sys_mprotect(start, end - start, prot); - } - out: - mutex_unlock(&ia32_mmap_mutex); - return retval; -#endif -} - -asmlinkage long -sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, - unsigned int flags, unsigned int new_addr) -{ - long ret; - -#if PAGE_SHIFT <= IA32_PAGE_SHIFT - ret = sys_mremap(addr, old_len, new_len, flags, new_addr); -#else - unsigned int old_end, new_end; - - if (OFFSET4K(addr)) - return -EINVAL; - - old_len = IA32_PAGE_ALIGN(old_len); - new_len = IA32_PAGE_ALIGN(new_len); - old_end = addr + old_len; - new_end = addr + new_len; - - if (!new_len) - return -EINVAL; - - if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr))) - return -EINVAL; - - if (old_len >= new_len) { - ret = sys32_munmap(addr + new_len, old_len - new_len); - if (ret && old_len != new_len) - return ret; - ret = addr; - if (!(flags & MREMAP_FIXED) || (new_addr == addr)) - return ret; - old_len = new_len; - } - - addr = PAGE_START(addr); - old_len = PAGE_ALIGN(old_end) - addr; - new_len = PAGE_ALIGN(new_end) - addr; - - mutex_lock(&ia32_mmap_mutex); - ret = sys_mremap(addr, old_len, new_len, flags, new_addr); - mutex_unlock(&ia32_mmap_mutex); - - if ((ret >= 0) && (old_len < new_len)) { - /* mremap expanded successfully */ - ia32_set_pp(old_end, new_end, flags); - } -#endif - return ret; -} - -asmlinkage unsigned long -sys32_alarm (unsigned int seconds) -{ - return alarm_setitimer(seconds); -} - -struct sel_arg_struct { - unsigned int n; - unsigned int inp; - unsigned int outp; - unsigned int exp; - unsigned int tvp; -}; - -asmlinkage long -sys32_old_select (struct sel_arg_struct __user *arg) -{ - struct sel_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), - compat_ptr(a.exp), compat_ptr(a.tvp)); -} - -#define SEMOP 1 -#define SEMGET 2 -#define SEMCTL 3 -#define SEMTIMEDOP 4 -#define MSGSND 11 -#define MSGRCV 12 -#define MSGGET 13 -#define MSGCTL 14 -#define SHMAT 21 -#define SHMDT 22 -#define SHMGET 23 -#define SHMCTL 24 - -asmlinkage long -sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) -{ - int version; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - case SEMTIMEDOP: - if (fifth) - return compat_sys_semtimedop(first, compat_ptr(ptr), - second, compat_ptr(fifth)); - /* else fall through for normal semop() */ - case SEMOP: - /* struct sembuf is the same on 32 and 64bit :)) */ - return sys_semtimedop(first, compat_ptr(ptr), second, - NULL); - case SEMGET: - return sys_semget(first, second, third); - case SEMCTL: - return compat_sys_semctl(first, second, third, compat_ptr(ptr)); - - case MSGSND: - return compat_sys_msgsnd(first, second, third, compat_ptr(ptr)); - case MSGRCV: - return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr)); - case MSGGET: - return sys_msgget((key_t) first, second); - case MSGCTL: - return compat_sys_msgctl(first, second, compat_ptr(ptr)); - - case SHMAT: - return compat_sys_shmat(first, second, third, version, compat_ptr(ptr)); - break; - case SHMDT: - return sys_shmdt(compat_ptr(ptr)); - case SHMGET: - return sys_shmget(first, (unsigned)second, third); - case SHMCTL: - return compat_sys_shmctl(first, second, compat_ptr(ptr)); - - default: - return -ENOSYS; - } - return -EINVAL; -} - -asmlinkage long -compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options, - struct compat_rusage *ru); - -asmlinkage long -sys32_waitpid (int pid, unsigned int *stat_addr, int options) -{ - return compat_sys_wait4(pid, stat_addr, options, NULL); -} - -/* - * The order in which registers are stored in the ptrace regs structure - */ -#define PT_EBX 0 -#define PT_ECX 1 -#define PT_EDX 2 -#define PT_ESI 3 -#define PT_EDI 4 -#define PT_EBP 5 -#define PT_EAX 6 -#define PT_DS 7 -#define PT_ES 8 -#define PT_FS 9 -#define PT_GS 10 -#define PT_ORIG_EAX 11 -#define PT_EIP 12 -#define PT_CS 13 -#define PT_EFL 14 -#define PT_UESP 15 -#define PT_SS 16 - -static unsigned int -getreg (struct task_struct *child, int regno) -{ - struct pt_regs *child_regs; - - child_regs = task_pt_regs(child); - switch (regno / sizeof(int)) { - case PT_EBX: return child_regs->r11; - case PT_ECX: return child_regs->r9; - case PT_EDX: return child_regs->r10; - case PT_ESI: return child_regs->r14; - case PT_EDI: return child_regs->r15; - case PT_EBP: return child_regs->r13; - case PT_EAX: return child_regs->r8; - case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */ - case PT_EIP: return child_regs->cr_iip; - case PT_UESP: return child_regs->r12; - case PT_EFL: return child->thread.eflag; - case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: - return __USER_DS; - case PT_CS: return __USER_CS; - default: - printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno); - break; - } - return 0; -} - -static void -putreg (struct task_struct *child, int regno, unsigned int value) -{ - struct pt_regs *child_regs; - - child_regs = task_pt_regs(child); - switch (regno / sizeof(int)) { - case PT_EBX: child_regs->r11 = value; break; - case PT_ECX: child_regs->r9 = value; break; - case PT_EDX: child_regs->r10 = value; break; - case PT_ESI: child_regs->r14 = value; break; - case PT_EDI: child_regs->r15 = value; break; - case PT_EBP: child_regs->r13 = value; break; - case PT_EAX: child_regs->r8 = value; break; - case PT_ORIG_EAX: child_regs->r1 = value; break; - case PT_EIP: child_regs->cr_iip = value; break; - case PT_UESP: child_regs->r12 = value; break; - case PT_EFL: child->thread.eflag = value; break; - case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: - if (value != __USER_DS) - printk(KERN_ERR - "ia32.putreg: attempt to set invalid segment register %d = %x\n", - regno, value); - break; - case PT_CS: - if (value != __USER_CS) - printk(KERN_ERR - "ia32.putreg: attempt to set invalid segment register %d = %x\n", - regno, value); - break; - default: - printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno); - break; - } -} - -static void -put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, - struct switch_stack *swp, int tos) -{ - struct _fpreg_ia32 *f; - char buf[32]; - - f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); - if ((regno += tos) >= 8) - regno -= 8; - switch (regno) { - case 0: - ia64f2ia32f(f, &ptp->f8); - break; - case 1: - ia64f2ia32f(f, &ptp->f9); - break; - case 2: - ia64f2ia32f(f, &ptp->f10); - break; - case 3: - ia64f2ia32f(f, &ptp->f11); - break; - case 4: - case 5: - case 6: - case 7: - ia64f2ia32f(f, &swp->f12 + (regno - 4)); - break; - } - copy_to_user(reg, f, sizeof(*reg)); -} - -static void -get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, - struct switch_stack *swp, int tos) -{ - - if ((regno += tos) >= 8) - regno -= 8; - switch (regno) { - case 0: - copy_from_user(&ptp->f8, reg, sizeof(*reg)); - break; - case 1: - copy_from_user(&ptp->f9, reg, sizeof(*reg)); - break; - case 2: - copy_from_user(&ptp->f10, reg, sizeof(*reg)); - break; - case 3: - copy_from_user(&ptp->f11, reg, sizeof(*reg)); - break; - case 4: - case 5: - case 6: - case 7: - copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg)); - break; - } - return; -} - -int -save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) -{ - struct switch_stack *swp; - struct pt_regs *ptp; - int i, tos; - - if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) - return -EFAULT; - - __put_user(tsk->thread.fcr & 0xffff, &save->cwd); - __put_user(tsk->thread.fsr & 0xffff, &save->swd); - __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); - __put_user(tsk->thread.fir, &save->fip); - __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); - __put_user(tsk->thread.fdr, &save->foo); - __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); - - /* - * Stack frames start with 16-bytes of temp space - */ - swp = (struct switch_stack *)(tsk->thread.ksp + 16); - ptp = task_pt_regs(tsk); - tos = (tsk->thread.fsr >> 11) & 7; - for (i = 0; i < 8; i++) - put_fpreg(i, &save->st_space[i], ptp, swp, tos); - return 0; -} - -static int -restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) -{ - struct switch_stack *swp; - struct pt_regs *ptp; - int i, tos; - unsigned int fsrlo, fsrhi, num32; - - if (!access_ok(VERIFY_READ, save, sizeof(*save))) - return(-EFAULT); - - __get_user(num32, (unsigned int __user *)&save->cwd); - tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); - __get_user(fsrlo, (unsigned int __user *)&save->swd); - __get_user(fsrhi, (unsigned int __user *)&save->twd); - num32 = (fsrhi << 16) | fsrlo; - tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; - __get_user(num32, (unsigned int __user *)&save->fip); - tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; - __get_user(num32, (unsigned int __user *)&save->foo); - tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; - - /* - * Stack frames start with 16-bytes of temp space - */ - swp = (struct switch_stack *)(tsk->thread.ksp + 16); - ptp = task_pt_regs(tsk); - tos = (tsk->thread.fsr >> 11) & 7; - for (i = 0; i < 8; i++) - get_fpreg(i, &save->st_space[i], ptp, swp, tos); - return 0; -} - -int -save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) -{ - struct switch_stack *swp; - struct pt_regs *ptp; - int i, tos; - unsigned long mxcsr=0; - unsigned long num128[2]; - - if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) - return -EFAULT; - - __put_user(tsk->thread.fcr & 0xffff, &save->cwd); - __put_user(tsk->thread.fsr & 0xffff, &save->swd); - __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); - __put_user(tsk->thread.fir, &save->fip); - __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); - __put_user(tsk->thread.fdr, &save->foo); - __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); - - /* - * Stack frames start with 16-bytes of temp space - */ - swp = (struct switch_stack *)(tsk->thread.ksp + 16); - ptp = task_pt_regs(tsk); - tos = (tsk->thread.fsr >> 11) & 7; - for (i = 0; i < 8; i++) - put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); - - mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f); - __put_user(mxcsr & 0xffff, &save->mxcsr); - for (i = 0; i < 8; i++) { - memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long)); - memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long)); - copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32)); - } - return 0; -} - -static int -restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) -{ - struct switch_stack *swp; - struct pt_regs *ptp; - int i, tos; - unsigned int fsrlo, fsrhi, num32; - int mxcsr; - unsigned long num64; - unsigned long num128[2]; - - if (!access_ok(VERIFY_READ, save, sizeof(*save))) - return(-EFAULT); - - __get_user(num32, (unsigned int __user *)&save->cwd); - tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); - __get_user(fsrlo, (unsigned int __user *)&save->swd); - __get_user(fsrhi, (unsigned int __user *)&save->twd); - num32 = (fsrhi << 16) | fsrlo; - tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; - __get_user(num32, (unsigned int __user *)&save->fip); - tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; - __get_user(num32, (unsigned int __user *)&save->foo); - tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; - - /* - * Stack frames start with 16-bytes of temp space - */ - swp = (struct switch_stack *)(tsk->thread.ksp + 16); - ptp = task_pt_regs(tsk); - tos = (tsk->thread.fsr >> 11) & 7; - for (i = 0; i < 8; i++) - get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); - - __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); - num64 = mxcsr & 0xff10; - tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32); - num64 = mxcsr & 0x3f; - tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32); - - for (i = 0; i < 8; i++) { - copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32)); - memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long)); - memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long)); - } - return 0; -} - -long compat_arch_ptrace(struct task_struct *child, compat_long_t request, - compat_ulong_t caddr, compat_ulong_t cdata) -{ - unsigned long addr = caddr; - unsigned long data = cdata; - unsigned int tmp; - long i, ret; - - switch (request) { - case PTRACE_PEEKUSR: /* read word at addr in USER area */ - ret = -EIO; - if ((addr & 3) || addr > 17*sizeof(int)) - break; - - tmp = getreg(child, addr); - if (!put_user(tmp, (unsigned int __user *) compat_ptr(data))) - ret = 0; - break; - - case PTRACE_POKEUSR: /* write word at addr in USER area */ - ret = -EIO; - if ((addr & 3) || addr > 17*sizeof(int)) - break; - - putreg(child, addr, data); - ret = 0; - break; - - case IA32_PTRACE_GETREGS: - if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) { - ret = -EIO; - break; - } - for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { - put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data)); - data += sizeof(int); - } - ret = 0; - break; - - case IA32_PTRACE_SETREGS: - if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) { - ret = -EIO; - break; - } - for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { - get_user(tmp, (unsigned int __user *) compat_ptr(data)); - putreg(child, i, tmp); - data += sizeof(int); - } - ret = 0; - break; - - case IA32_PTRACE_GETFPREGS: - ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) - compat_ptr(data)); - break; - - case IA32_PTRACE_GETFPXREGS: - ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) - compat_ptr(data)); - break; - - case IA32_PTRACE_SETFPREGS: - ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) - compat_ptr(data)); - break; - - case IA32_PTRACE_SETFPXREGS: - ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) - compat_ptr(data)); - break; - - default: - return compat_ptrace_request(child, request, caddr, cdata); - } - return ret; -} - -typedef struct { - unsigned int ss_sp; - unsigned int ss_flags; - unsigned int ss_size; -} ia32_stack_t; - -asmlinkage long -sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32, - long arg2, long arg3, long arg4, long arg5, long arg6, - long arg7, struct pt_regs pt) -{ - stack_t uss, uoss; - ia32_stack_t buf32; - int ret; - mm_segment_t old_fs = get_fs(); - - if (uss32) { - if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t))) - return -EFAULT; - uss.ss_sp = (void __user *) (long) buf32.ss_sp; - uss.ss_flags = buf32.ss_flags; - /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the - check and set it to the user requested value later */ - if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) { - ret = -ENOMEM; - goto out; - } - uss.ss_size = MINSIGSTKSZ; - } - set_fs(KERNEL_DS); - ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL, - (stack_t __user *) &uoss, pt.r12); - current->sas_ss_size = buf32.ss_size; - set_fs(old_fs); -out: - if (ret < 0) - return(ret); - if (uoss32) { - buf32.ss_sp = (long __user) uoss.ss_sp; - buf32.ss_flags = uoss.ss_flags; - buf32.ss_size = uoss.ss_size; - if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t))) - return -EFAULT; - } - return ret; -} - -asmlinkage int -sys32_msync (unsigned int start, unsigned int len, int flags) -{ - unsigned int addr; - - if (OFFSET4K(start)) - return -EINVAL; - addr = PAGE_START(start); - return sys_msync(addr, len + (start - addr), flags); -} - -asmlinkage long -sys32_newuname (struct new_utsname __user *name) -{ - int ret = sys_newuname(name); - - if (!ret) - if (copy_to_user(name->machine, "i686\0\0\0", 8)) - ret = -EFAULT; - return ret; -} - -asmlinkage long -sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid) -{ - uid_t a, b, c; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c); - set_fs(old_fs); - - if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid)) - return -EFAULT; - return ret; -} - -asmlinkage long -sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid) -{ - gid_t a, b, c; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c); - set_fs(old_fs); - - if (ret) - return ret; - - return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid); -} - -asmlinkage long -sys32_lseek (unsigned int fd, int offset, unsigned int whence) -{ - /* Sign-extension of "offset" is important here... */ - return sys_lseek(fd, offset, whence); -} - -static int -groups16_to_user(short __user *grouplist, struct group_info *group_info) -{ - int i; - short group; - - for (i = 0; i < group_info->ngroups; i++) { - group = (short)GROUP_AT(group_info, i); - if (put_user(group, grouplist+i)) - return -EFAULT; - } - - return 0; -} - -static int -groups16_from_user(struct group_info *group_info, short __user *grouplist) -{ - int i; - short group; - - for (i = 0; i < group_info->ngroups; i++) { - if (get_user(group, grouplist+i)) - return -EFAULT; - GROUP_AT(group_info, i) = (gid_t)group; - } - - return 0; -} - -asmlinkage long -sys32_getgroups16 (int gidsetsize, short __user *grouplist) -{ - const struct cred *cred = current_cred(); - int i; - - if (gidsetsize < 0) - return -EINVAL; - - i = cred->group_info->ngroups; - if (gidsetsize) { - if (i > gidsetsize) { - i = -EINVAL; - goto out; - } - if (groups16_to_user(grouplist, cred->group_info)) { - i = -EFAULT; - goto out; - } - } -out: - return i; -} - -asmlinkage long -sys32_setgroups16 (int gidsetsize, short __user *grouplist) -{ - struct group_info *group_info; - int retval; - - if (!capable(CAP_SETGID)) - return -EPERM; - if ((unsigned)gidsetsize > NGROUPS_MAX) - return -EINVAL; - - group_info = groups_alloc(gidsetsize); - if (!group_info) - return -ENOMEM; - retval = groups16_from_user(group_info, grouplist); - if (retval) { - put_group_info(group_info); - return retval; - } - - retval = set_current_groups(group_info); - put_group_info(group_info); - - return retval; -} - -asmlinkage long -sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi) -{ - return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo); -} - -asmlinkage long -sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi) -{ - return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo); -} - -static int -putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf) -{ - int err; - u64 hdev; - - if (clear_user(ubuf, sizeof(*ubuf))) - return -EFAULT; - - hdev = huge_encode_dev(kbuf->dev); - err = __put_user(hdev, (u32 __user*)&ubuf->st_dev); - err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1); - err |= __put_user(kbuf->ino, &ubuf->__st_ino); - err |= __put_user(kbuf->ino, &ubuf->st_ino_lo); - err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi); - err |= __put_user(kbuf->mode, &ubuf->st_mode); - err |= __put_user(kbuf->nlink, &ubuf->st_nlink); - err |= __put_user(kbuf->uid, &ubuf->st_uid); - err |= __put_user(kbuf->gid, &ubuf->st_gid); - hdev = huge_encode_dev(kbuf->rdev); - err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev); - err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1); - err |= __put_user(kbuf->size, &ubuf->st_size_lo); - err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi); - err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime); - err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec); - err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime); - err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec); - err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime); - err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec); - err |= __put_user(kbuf->blksize, &ubuf->st_blksize); - err |= __put_user(kbuf->blocks, &ubuf->st_blocks); - return err; -} - -asmlinkage long -sys32_stat64 (char __user *filename, struct stat64 __user *statbuf) -{ - struct kstat s; - long ret = vfs_stat(filename, &s); - if (!ret) - ret = putstat64(statbuf, &s); - return ret; -} - -asmlinkage long -sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf) -{ - struct kstat s; - long ret = vfs_lstat(filename, &s); - if (!ret) - ret = putstat64(statbuf, &s); - return ret; -} - -asmlinkage long -sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf) -{ - struct kstat s; - long ret = vfs_fstat(fd, &s); - if (!ret) - ret = putstat64(statbuf, &s); - return ret; -} - -asmlinkage long -sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval) -{ - mm_segment_t old_fs = get_fs(); - struct timespec t; - long ret; - - set_fs(KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); - set_fs(old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - -asmlinkage long -sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) -{ - return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); -} - -asmlinkage long -sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) -{ - return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); -} - -asmlinkage long -sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count) -{ - mm_segment_t old_fs = get_fs(); - long ret; - off_t of; - - if (offset && get_user(of, offset)) - return -EFAULT; - - set_fs(KERNEL_DS); - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count); - set_fs(old_fs); - - if (offset && put_user(of, offset)) - return -EFAULT; - - return ret; -} - -asmlinkage long -sys32_personality (unsigned int personality) -{ - long ret; - - if (current->personality == PER_LINUX32 && personality == PER_LINUX) - personality = PER_LINUX32; - ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; - return ret; -} - -asmlinkage unsigned long -sys32_brk (unsigned int brk) -{ - unsigned long ret, obrk; - struct mm_struct *mm = current->mm; - - obrk = mm->brk; - ret = sys_brk(brk); - if (ret < obrk) - clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret); - return ret; -} - -/* Structure for ia32 emulation on ia64 */ -struct epoll_event32 -{ - u32 events; - u32 data[2]; -}; - -asmlinkage long -sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event) -{ - mm_segment_t old_fs = get_fs(); - struct epoll_event event64; - int error; - u32 data_halfword; - - if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32))) - return -EFAULT; - - __get_user(event64.events, &event->events); - __get_user(data_halfword, &event->data[0]); - event64.data = data_halfword; - __get_user(data_halfword, &event->data[1]); - event64.data |= (u64)data_halfword << 32; - - set_fs(KERNEL_DS); - error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64); - set_fs(old_fs); - - return error; -} - -asmlinkage long -sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents, - int timeout) -{ - struct epoll_event *events64 = NULL; - mm_segment_t old_fs = get_fs(); - int numevents, size; - int evt_idx; - int do_free_pages = 0; - - if (maxevents <= 0) { - return -EINVAL; - } - - /* Verify that the area passed by the user is writeable */ - if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32))) - return -EFAULT; - - /* - * Allocate space for the intermediate copy. If the space needed - * is large enough to cause kmalloc to fail, then try again with - * __get_free_pages. - */ - size = maxevents * sizeof(struct epoll_event); - events64 = kmalloc(size, GFP_KERNEL); - if (events64 == NULL) { - events64 = (struct epoll_event *) - __get_free_pages(GFP_KERNEL, get_order(size)); - if (events64 == NULL) - return -ENOMEM; - do_free_pages = 1; - } - - /* Do the system call */ - set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/ - numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64, - maxevents, timeout); - set_fs(old_fs); - - /* Don't modify userspace memory if we're returning an error */ - if (numevents > 0) { - /* Translate the 64-bit structures back into the 32-bit - structures */ - for (evt_idx = 0; evt_idx < numevents; evt_idx++) { - __put_user(events64[evt_idx].events, - &events[evt_idx].events); - __put_user((u32)events64[evt_idx].data, - &events[evt_idx].data[0]); - __put_user((u32)(events64[evt_idx].data >> 32), - &events[evt_idx].data[1]); - } - } - - if (do_free_pages) - free_pages((unsigned long) events64, get_order(size)); - else - kfree(events64); - return numevents; -} - -/* - * Get a yet unused TLS descriptor index. - */ -static int -get_free_idx (void) -{ - struct thread_struct *t = ¤t->thread; - int idx; - - for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) - if (desc_empty(t->tls_array + idx)) - return idx + GDT_ENTRY_TLS_MIN; - return -ESRCH; -} - -static void set_tls_desc(struct task_struct *p, int idx, - const struct ia32_user_desc *info, int n) -{ - struct thread_struct *t = &p->thread; - struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; - int cpu; - - /* - * We must not get preempted while modifying the TLS. - */ - cpu = get_cpu(); - - while (n-- > 0) { - if (LDT_empty(info)) { - desc->a = 0; - desc->b = 0; - } else { - desc->a = LDT_entry_a(info); - desc->b = LDT_entry_b(info); - } - - ++info; - ++desc; - } - - if (t == ¤t->thread) - load_TLS(t, cpu); - - put_cpu(); -} - -/* - * Set a given TLS descriptor: - */ -asmlinkage int -sys32_set_thread_area (struct ia32_user_desc __user *u_info) -{ - struct ia32_user_desc info; - int idx; - - if (copy_from_user(&info, u_info, sizeof(info))) - return -EFAULT; - idx = info.entry_number; - - /* - * index -1 means the kernel should try to find and allocate an empty descriptor: - */ - if (idx == -1) { - idx = get_free_idx(); - if (idx < 0) - return idx; - if (put_user(idx, &u_info->entry_number)) - return -EFAULT; - } - - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - - set_tls_desc(current, idx, &info, 1); - return 0; -} - -/* - * Get the current Thread-Local Storage area: - */ - -#define GET_BASE(desc) ( \ - (((desc)->a >> 16) & 0x0000ffff) | \ - (((desc)->b << 16) & 0x00ff0000) | \ - ( (desc)->b & 0xff000000) ) - -#define GET_LIMIT(desc) ( \ - ((desc)->a & 0x0ffff) | \ - ((desc)->b & 0xf0000) ) - -#define GET_32BIT(desc) (((desc)->b >> 22) & 1) -#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) -#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) -#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) -#define GET_PRESENT(desc) (((desc)->b >> 15) & 1) -#define GET_USEABLE(desc) (((desc)->b >> 20) & 1) - -static void fill_user_desc(struct ia32_user_desc *info, int idx, - const struct desc_struct *desc) -{ - info->entry_number = idx; - info->base_addr = GET_BASE(desc); - info->limit = GET_LIMIT(desc); - info->seg_32bit = GET_32BIT(desc); - info->contents = GET_CONTENTS(desc); - info->read_exec_only = !GET_WRITABLE(desc); - info->limit_in_pages = GET_LIMIT_PAGES(desc); - info->seg_not_present = !GET_PRESENT(desc); - info->useable = GET_USEABLE(desc); -} - -asmlinkage int -sys32_get_thread_area (struct ia32_user_desc __user *u_info) -{ - struct ia32_user_desc info; - struct desc_struct *desc; - int idx; - - if (get_user(idx, &u_info->entry_number)) - return -EFAULT; - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - - desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; - fill_user_desc(&info, idx, desc); - - if (copy_to_user(u_info, &info, sizeof(info))) - return -EFAULT; - return 0; -} - -struct regset_get { - void *kbuf; - void __user *ubuf; -}; - -struct regset_set { - const void *kbuf; - const void __user *ubuf; -}; - -struct regset_getset { - struct task_struct *target; - const struct user_regset *regset; - union { - struct regset_get get; - struct regset_set set; - } u; - unsigned int pos; - unsigned int count; - int ret; -}; - -static void getfpreg(struct task_struct *task, int regno, int *val) -{ - switch (regno / sizeof(int)) { - case 0: - *val = task->thread.fcr & 0xffff; - break; - case 1: - *val = task->thread.fsr & 0xffff; - break; - case 2: - *val = (task->thread.fsr>>16) & 0xffff; - break; - case 3: - *val = task->thread.fir; - break; - case 4: - *val = (task->thread.fir>>32) & 0xffff; - break; - case 5: - *val = task->thread.fdr; - break; - case 6: - *val = (task->thread.fdr >> 32) & 0xffff; - break; - } -} - -static void setfpreg(struct task_struct *task, int regno, int val) -{ - switch (regno / sizeof(int)) { - case 0: - task->thread.fcr = (task->thread.fcr & (~0x1f3f)) - | (val & 0x1f3f); - break; - case 1: - task->thread.fsr = (task->thread.fsr & (~0xffff)) | val; - break; - case 2: - task->thread.fsr = (task->thread.fsr & (~0xffff0000)) - | (val << 16); - break; - case 3: - task->thread.fir = (task->thread.fir & (~0xffffffff)) | val; - break; - case 5: - task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val; - break; - } -} - -static void access_fpreg_ia32(int regno, void *reg, - struct pt_regs *pt, struct switch_stack *sw, - int tos, int write) -{ - void *f; - - if ((regno += tos) >= 8) - regno -= 8; - if (regno < 4) - f = &pt->f8 + regno; - else if (regno <= 7) - f = &sw->f12 + (regno - 4); - else { - printk(KERN_ERR "regno must be less than 7 \n"); - return; - } - - if (write) - memcpy(f, reg, sizeof(struct _fpreg_ia32)); - else - memcpy(reg, f, sizeof(struct _fpreg_ia32)); -} - -static void do_fpregs_get(struct unw_frame_info *info, void *arg) -{ - struct regset_getset *dst = arg; - struct task_struct *task = dst->target; - struct pt_regs *pt; - int start, end, tos; - char buf[80]; - - if (dst->count == 0 || unw_unwind_to_user(info) < 0) - return; - if (dst->pos < 7 * sizeof(int)) { - end = min((dst->pos + dst->count), - (unsigned int)(7 * sizeof(int))); - for (start = dst->pos; start < end; start += sizeof(int)) - getfpreg(task, start, (int *)(buf + start)); - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, buf, - 0, 7 * sizeof(int)); - if (dst->ret || dst->count == 0) - return; - } - if (dst->pos < sizeof(struct ia32_user_i387_struct)) { - pt = task_pt_regs(task); - tos = (task->thread.fsr >> 11) & 7; - end = min(dst->pos + dst->count, - (unsigned int)(sizeof(struct ia32_user_i387_struct))); - start = (dst->pos - 7 * sizeof(int)) / - sizeof(struct _fpreg_ia32); - end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32); - for (; start < end; start++) - access_fpreg_ia32(start, - (struct _fpreg_ia32 *)buf + start, - pt, info->sw, tos, 0); - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, - buf, 7 * sizeof(int), - sizeof(struct ia32_user_i387_struct)); - if (dst->ret || dst->count == 0) - return; - } -} - -static void do_fpregs_set(struct unw_frame_info *info, void *arg) -{ - struct regset_getset *dst = arg; - struct task_struct *task = dst->target; - struct pt_regs *pt; - char buf[80]; - int end, start, tos; - - if (dst->count == 0 || unw_unwind_to_user(info) < 0) - return; - - if (dst->pos < 7 * sizeof(int)) { - start = dst->pos; - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, buf, - 0, 7 * sizeof(int)); - if (dst->ret) - return; - for (; start < dst->pos; start += sizeof(int)) - setfpreg(task, start, *((int *)(buf + start))); - if (dst->count == 0) - return; - } - if (dst->pos < sizeof(struct ia32_user_i387_struct)) { - start = (dst->pos - 7 * sizeof(int)) / - sizeof(struct _fpreg_ia32); - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, - buf, 7 * sizeof(int), - sizeof(struct ia32_user_i387_struct)); - if (dst->ret) - return; - pt = task_pt_regs(task); - tos = (task->thread.fsr >> 11) & 7; - end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32); - for (; start < end; start++) - access_fpreg_ia32(start, - (struct _fpreg_ia32 *)buf + start, - pt, info->sw, tos, 1); - if (dst->count == 0) - return; - } -} - -#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member))) -static void getfpxreg(struct task_struct *task, int start, int end, char *buf) -{ - int min_val; - - min_val = min(end, OFFSET(fop)); - while (start < min_val) { - if (start == OFFSET(cwd)) - *((short *)buf) = task->thread.fcr & 0xffff; - else if (start == OFFSET(swd)) - *((short *)buf) = task->thread.fsr & 0xffff; - else if (start == OFFSET(twd)) - *((short *)buf) = (task->thread.fsr>>16) & 0xffff; - buf += 2; - start += 2; - } - /* skip fop element */ - if (start == OFFSET(fop)) { - start += 2; - buf += 2; - } - while (start < end) { - if (start == OFFSET(fip)) - *((int *)buf) = task->thread.fir; - else if (start == OFFSET(fcs)) - *((int *)buf) = (task->thread.fir>>32) & 0xffff; - else if (start == OFFSET(foo)) - *((int *)buf) = task->thread.fdr; - else if (start == OFFSET(fos)) - *((int *)buf) = (task->thread.fdr>>32) & 0xffff; - else if (start == OFFSET(mxcsr)) - *((int *)buf) = ((task->thread.fcr>>32) & 0xff80) - | ((task->thread.fsr>>32) & 0x3f); - buf += 4; - start += 4; - } -} - -static void setfpxreg(struct task_struct *task, int start, int end, char *buf) -{ - int min_val, num32; - short num; - unsigned long num64; - - min_val = min(end, OFFSET(fop)); - while (start < min_val) { - num = *((short *)buf); - if (start == OFFSET(cwd)) { - task->thread.fcr = (task->thread.fcr & (~0x1f3f)) - | (num & 0x1f3f); - } else if (start == OFFSET(swd)) { - task->thread.fsr = (task->thread.fsr & (~0xffff)) | num; - } else if (start == OFFSET(twd)) { - task->thread.fsr = (task->thread.fsr & (~0xffff0000)) - | (((int)num) << 16); - } - buf += 2; - start += 2; - } - /* skip fop element */ - if (start == OFFSET(fop)) { - start += 2; - buf += 2; - } - while (start < end) { - num32 = *((int *)buf); - if (start == OFFSET(fip)) - task->thread.fir = (task->thread.fir & (~0xffffffff)) - | num32; - else if (start == OFFSET(foo)) - task->thread.fdr = (task->thread.fdr & (~0xffffffff)) - | num32; - else if (start == OFFSET(mxcsr)) { - num64 = num32 & 0xff10; - task->thread.fcr = (task->thread.fcr & - (~0xff1000000000UL)) | (num64<<32); - num64 = num32 & 0x3f; - task->thread.fsr = (task->thread.fsr & - (~0x3f00000000UL)) | (num64<<32); - } - buf += 4; - start += 4; - } -} - -static void do_fpxregs_get(struct unw_frame_info *info, void *arg) -{ - struct regset_getset *dst = arg; - struct task_struct *task = dst->target; - struct pt_regs *pt; - char buf[128]; - int start, end, tos; - - if (dst->count == 0 || unw_unwind_to_user(info) < 0) - return; - if (dst->pos < OFFSET(st_space[0])) { - end = min(dst->pos + dst->count, (unsigned int)32); - getfpxreg(task, dst->pos, end, buf); - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, buf, - 0, OFFSET(st_space[0])); - if (dst->ret || dst->count == 0) - return; - } - if (dst->pos < OFFSET(xmm_space[0])) { - pt = task_pt_regs(task); - tos = (task->thread.fsr >> 11) & 7; - end = min(dst->pos + dst->count, - (unsigned int)OFFSET(xmm_space[0])); - start = (dst->pos - OFFSET(st_space[0])) / 16; - end = (end - OFFSET(st_space[0])) / 16; - for (; start < end; start++) - access_fpreg_ia32(start, buf + 16 * start, pt, - info->sw, tos, 0); - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, - buf, OFFSET(st_space[0]), OFFSET(xmm_space[0])); - if (dst->ret || dst->count == 0) - return; - } - if (dst->pos < OFFSET(padding[0])) - dst->ret = user_regset_copyout(&dst->pos, &dst->count, - &dst->u.get.kbuf, &dst->u.get.ubuf, - &info->sw->f16, OFFSET(xmm_space[0]), - OFFSET(padding[0])); -} - -static void do_fpxregs_set(struct unw_frame_info *info, void *arg) -{ - struct regset_getset *dst = arg; - struct task_struct *task = dst->target; - char buf[128]; - int start, end; - - if (dst->count == 0 || unw_unwind_to_user(info) < 0) - return; - - if (dst->pos < OFFSET(st_space[0])) { - start = dst->pos; - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, - buf, 0, OFFSET(st_space[0])); - if (dst->ret) - return; - setfpxreg(task, start, dst->pos, buf); - if (dst->count == 0) - return; - } - if (dst->pos < OFFSET(xmm_space[0])) { - struct pt_regs *pt; - int tos; - pt = task_pt_regs(task); - tos = (task->thread.fsr >> 11) & 7; - start = (dst->pos - OFFSET(st_space[0])) / 16; - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, - buf, OFFSET(st_space[0]), OFFSET(xmm_space[0])); - if (dst->ret) - return; - end = (dst->pos - OFFSET(st_space[0])) / 16; - for (; start < end; start++) - access_fpreg_ia32(start, buf + 16 * start, pt, info->sw, - tos, 1); - if (dst->count == 0) - return; - } - if (dst->pos < OFFSET(padding[0])) - dst->ret = user_regset_copyin(&dst->pos, &dst->count, - &dst->u.set.kbuf, &dst->u.set.ubuf, - &info->sw->f16, OFFSET(xmm_space[0]), - OFFSET(padding[0])); -} -#undef OFFSET - -static int do_regset_call(void (*call)(struct unw_frame_info *, void *), - struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - struct regset_getset info = { .target = target, .regset = regset, - .pos = pos, .count = count, - .u.set = { .kbuf = kbuf, .ubuf = ubuf }, - .ret = 0 }; - - if (target == current) - unw_init_running(call, &info); - else { - struct unw_frame_info ufi; - memset(&ufi, 0, sizeof(ufi)); - unw_init_from_blocked_task(&ufi, target); - (*call)(&ufi, &info); - } - - return info.ret; -} - -static int ia32_fpregs_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) -{ - return do_regset_call(do_fpregs_get, target, regset, pos, count, - kbuf, ubuf); -} - -static int ia32_fpregs_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - return do_regset_call(do_fpregs_set, target, regset, pos, count, - kbuf, ubuf); -} - -static int ia32_fpxregs_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) -{ - return do_regset_call(do_fpxregs_get, target, regset, pos, count, - kbuf, ubuf); -} - -static int ia32_fpxregs_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - return do_regset_call(do_fpxregs_set, target, regset, pos, count, - kbuf, ubuf); -} - -static int ia32_genregs_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) -{ - if (kbuf) { - u32 *kp = kbuf; - while (count > 0) { - *kp++ = getreg(target, pos); - pos += 4; - count -= 4; - } - } else { - u32 __user *up = ubuf; - while (count > 0) { - if (__put_user(getreg(target, pos), up++)) - return -EFAULT; - pos += 4; - count -= 4; - } - } - return 0; -} - -static int ia32_genregs_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) -{ - int ret = 0; - - if (kbuf) { - const u32 *kp = kbuf; - while (!ret && count > 0) { - putreg(target, pos, *kp++); - pos += 4; - count -= 4; - } - } else { - const u32 __user *up = ubuf; - u32 val; - while (!ret && count > 0) { - ret = __get_user(val, up++); - if (!ret) - putreg(target, pos, val); - pos += 4; - count -= 4; - } - } - return ret; -} - -static int ia32_tls_active(struct task_struct *target, - const struct user_regset *regset) -{ - struct thread_struct *t = &target->thread; - int n = GDT_ENTRY_TLS_ENTRIES; - while (n > 0 && desc_empty(&t->tls_array[n -1])) - --n; - return n; -} - -static int ia32_tls_get(struct task_struct *target, - const struct user_regset *regset, unsigned int pos, - unsigned int count, void *kbuf, void __user *ubuf) -{ - const struct desc_struct *tls; - - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) || - (pos % sizeof(struct ia32_user_desc)) != 0 || - (count % sizeof(struct ia32_user_desc)) != 0) - return -EINVAL; - - pos /= sizeof(struct ia32_user_desc); - count /= sizeof(struct ia32_user_desc); - - tls = &target->thread.tls_array[pos]; - - if (kbuf) { - struct ia32_user_desc *info = kbuf; - while (count-- > 0) - fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, - tls++); - } else { - struct ia32_user_desc __user *u_info = ubuf; - while (count-- > 0) { - struct ia32_user_desc info; - fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); - if (__copy_to_user(u_info++, &info, sizeof(info))) - return -EFAULT; - } - } - - return 0; -} - -static int ia32_tls_set(struct task_struct *target, - const struct user_regset *regset, unsigned int pos, - unsigned int count, const void *kbuf, const void __user *ubuf) -{ - struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; - const struct ia32_user_desc *info; - - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) || - (pos % sizeof(struct ia32_user_desc)) != 0 || - (count % sizeof(struct ia32_user_desc)) != 0) - return -EINVAL; - - if (kbuf) - info = kbuf; - else if (__copy_from_user(infobuf, ubuf, count)) - return -EFAULT; - else - info = infobuf; - - set_tls_desc(target, - GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)), - info, count / sizeof(struct ia32_user_desc)); - - return 0; -} - -/* - * This should match arch/i386/kernel/ptrace.c:native_regsets. - * XXX ioperm? vm86? - */ -static const struct user_regset ia32_regsets[] = { - { - .core_note_type = NT_PRSTATUS, - .n = sizeof(struct user_regs_struct32)/4, - .size = 4, .align = 4, - .get = ia32_genregs_get, .set = ia32_genregs_set - }, - { - .core_note_type = NT_PRFPREG, - .n = sizeof(struct ia32_user_i387_struct) / 4, - .size = 4, .align = 4, - .get = ia32_fpregs_get, .set = ia32_fpregs_set - }, - { - .core_note_type = NT_PRXFPREG, - .n = sizeof(struct ia32_user_fxsr_struct) / 4, - .size = 4, .align = 4, - .get = ia32_fpxregs_get, .set = ia32_fpxregs_set - }, - { - .core_note_type = NT_386_TLS, - .n = GDT_ENTRY_TLS_ENTRIES, - .bias = GDT_ENTRY_TLS_MIN, - .size = sizeof(struct ia32_user_desc), - .align = sizeof(struct ia32_user_desc), - .active = ia32_tls_active, - .get = ia32_tls_get, .set = ia32_tls_set, - }, -}; - -const struct user_regset_view user_ia32_view = { - .name = "i386", .e_machine = EM_386, - .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets) -}; - -long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, - __u32 len_low, __u32 len_high, int advice) -{ - return sys_fadvise64_64(fd, - (((u64)offset_high)<<32) | offset_low, - (((u64)len_high)<<32) | len_low, - advice); -} - -#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ - -asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid) -{ - uid_t sruid, seuid; - - sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); - seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); - return sys_setreuid(sruid, seuid); -} - -asmlinkage long -sys32_setresuid(compat_uid_t ruid, compat_uid_t euid, - compat_uid_t suid) -{ - uid_t sruid, seuid, ssuid; - - sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); - seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); - ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid); - return sys_setresuid(sruid, seuid, ssuid); -} - -asmlinkage long -sys32_setregid(compat_gid_t rgid, compat_gid_t egid) -{ - gid_t srgid, segid; - - srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); - segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); - return sys_setregid(srgid, segid); -} - -asmlinkage long -sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, - compat_gid_t sgid) -{ - gid_t srgid, segid, ssgid; - - srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); - segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); - ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); - return sys_setresgid(srgid, segid, ssgid); -} -#endif /* NOTYET */ diff --git a/arch/ia64/include/asm/ia32.h b/arch/ia64/include/asm/ia32.h deleted file mode 100644 index 2390ee1..0000000 --- a/arch/ia64/include/asm/ia32.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef _ASM_IA64_IA32_H -#define _ASM_IA64_IA32_H - - -#include -#include - -#define IA32_NR_syscalls 285 /* length of syscall table */ -#define IA32_PAGE_SHIFT 12 /* 4KB pages */ - -#ifndef __ASSEMBLY__ - -# ifdef CONFIG_IA32_SUPPORT - -#define IA32_PAGE_OFFSET 0xc0000000 - -extern void ia32_cpu_init (void); -extern void ia32_mem_init (void); -extern void ia32_gdt_init (void); -extern int ia32_exception (struct pt_regs *regs, unsigned long isr); -extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); -extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs); - -# endif /* !CONFIG_IA32_SUPPORT */ - -/* Declare this unconditionally, so we don't get warnings for unreachable code. */ -extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs); -#if PAGE_SHIFT > IA32_PAGE_SHIFT -extern int ia32_copy_ia64_partial_page_list(struct task_struct *, - unsigned long); -extern void ia32_drop_ia64_partial_page_list(struct task_struct *); -#else -# define ia32_copy_ia64_partial_page_list(a1, a2) 0 -# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0) -#endif - -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_IA64_IA32_H */ diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 7fa90f7..348e44d 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -270,23 +270,6 @@ typedef struct { (int __user *) (addr)); \ }) -#ifdef CONFIG_IA32_SUPPORT -struct desc_struct { - unsigned int a, b; -}; - -#define desc_empty(desc) (!((desc)->a | (desc)->b)) -#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) - -#define GDT_ENTRY_TLS_ENTRIES 3 -#define GDT_ENTRY_TLS_MIN 6 -#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) - -#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) - -struct ia64_partial_page_list; -#endif - struct thread_struct { __u32 flags; /* various thread flags (see IA64_THREAD_*) */ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ @@ -298,29 +281,6 @@ struct thread_struct { __u64 rbs_bot; /* the base address for the RBS */ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ -#ifdef CONFIG_IA32_SUPPORT - __u64 eflag; /* IA32 EFLAGS reg */ - __u64 fsr; /* IA32 floating pt status reg */ - __u64 fcr; /* IA32 floating pt control reg */ - __u64 fir; /* IA32 fp except. instr. reg */ - __u64 fdr; /* IA32 fp except. data reg */ - __u64 old_k1; /* old value of ar.k1 */ - __u64 old_iob; /* old IOBase value */ - struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */ - /* cached TLS descriptors. */ - struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; - -# define INIT_THREAD_IA32 .eflag = 0, \ - .fsr = 0, \ - .fcr = 0x17800000037fULL, \ - .fir = 0, \ - .fdr = 0, \ - .old_k1 = 0, \ - .old_iob = 0, \ - .ppl = NULL, -#else -# define INIT_THREAD_IA32 -#endif /* CONFIG_IA32_SUPPORT */ #ifdef CONFIG_PERFMON void *pfm_context; /* pointer to detailed PMU context */ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ @@ -342,7 +302,6 @@ struct thread_struct { .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \ .last_fph_cpu = -1, \ - INIT_THREAD_IA32 \ INIT_THREAD_PM \ .dbr = {0, }, \ .ibr = {0, }, \ @@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph); extern void ia64_save_debug_regs (unsigned long *save_area); extern void ia64_load_debug_regs (unsigned long *save_area); -#ifdef CONFIG_IA32_SUPPORT -extern void ia32_save_state (struct task_struct *task); -extern void ia32_load_state (struct task_struct *task); -#endif - #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index 2f758a4..a7ff1c6 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task, if ((long)regs->cr_ifs < 0) /* Not a syscall */ return -1; -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(regs)) - return regs->r1; -#endif - return regs->r15; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(regs)) - regs->r8 = regs->r1; -#endif - /* do nothing */ } static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(regs)) - return regs->r8; -#endif - return regs->r10 == -1 ? regs->r8:0; } @@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(regs)) { - regs->r8 = (long) error ? error : val; - return; - } -#endif - if (error) { /* error < 0, but ia64 uses > 0 return value */ regs->r8 = -error; @@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task, { BUG_ON(i + n > 6); -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(regs)) { - switch (i + n) { - case 6: - if (!n--) break; - *args++ = regs->r13; - case 5: - if (!n--) break; - *args++ = regs->r15; - case 4: - if (!n--) break; - *args++ = regs->r14; - case 3: - if (!n--) break; - *args++ = regs->r10; - case 2: - if (!n--) break; - *args++ = regs->r9; - case 1: - if (!n--) break; - *args++ = regs->r11; - case 0: - if (!n--) break; - default: - BUG(); - break; - } - - return; - } -#endif ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); } @@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task, { BUG_ON(i + n > 6); -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(regs)) { - switch (i + n) { - case 6: - if (!n--) break; - regs->r13 = *args++; - case 5: - if (!n--) break; - regs->r15 = *args++; - case 4: - if (!n--) break; - regs->r14 = *args++; - case 3: - if (!n--) break; - regs->r10 = *args++; - case 2: - if (!n--) break; - regs->r9 = *args++; - case 1: - if (!n--) break; - regs->r11 = *args++; - case 0: - if (!n--) break; - } - - return; - } -#endif ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); } #endif /* _ASM_SYSCALL_H */ diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 927a381..9f342a5 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h @@ -191,15 +191,6 @@ do { \ #ifdef __KERNEL__ -#ifdef CONFIG_IA32_SUPPORT -# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) -#else -# define IS_IA32_PROCESS(regs) 0 -struct task_struct; -static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} -static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} -#endif - /* * Context switch from one thread to another. If the two threads have * different address spaces, schedule() has already taken care of @@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct #define IA64_HAS_EXTRA_STATE(t) \ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ - || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) + || PERFMON_IS_SYSWIDE()) #define __switch_to(prev,next,last) do { \ IA64_ACCOUNT_ON_SWITCH(prev, next); \ diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 10a8f21..bb8b0ff 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -335,20 +335,6 @@ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#ifdef CONFIG_IA32_SUPPORT -# define __ARCH_WANT_SYS_FADVISE64 -# define __ARCH_WANT_SYS_GETPGRP -# define __ARCH_WANT_SYS_LLSEEK -# define __ARCH_WANT_SYS_NICE -# define __ARCH_WANT_SYS_OLD_GETRLIMIT -# define __ARCH_WANT_SYS_OLDUMOUNT -# define __ARCH_WANT_SYS_PAUSE -# define __ARCH_WANT_SYS_SIGPENDING -# define __ARCH_WANT_SYS_SIGPROCMASK -# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND -# define __ARCH_WANT_COMPAT_SYS_TIME -#endif - #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #include diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c index f3802ae..96a9d18 100644 --- a/arch/ia64/kernel/audit.c +++ b/arch/ia64/kernel/audit.c @@ -30,20 +30,11 @@ static unsigned signal_class[] = { int audit_classify_arch(int arch) { -#ifdef CONFIG_IA32_SUPPORT - if (arch == AUDIT_ARCH_I386) - return 1; -#endif return 0; } int audit_classify_syscall(int abi, unsigned syscall) { -#ifdef CONFIG_IA32_SUPPORT - extern int ia32_classify_syscall(unsigned); - if (abi == AUDIT_ARCH_I386) - return ia32_classify_syscall(syscall); -#endif switch(syscall) { case __NR_open: return 2; @@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall) static int __init audit_classes_init(void) { -#ifdef CONFIG_IA32_SUPPORT - extern __u32 ia32_dir_class[]; - extern __u32 ia32_write_class[]; - extern __u32 ia32_read_class[]; - extern __u32 ia32_chattr_class[]; - extern __u32 ia32_signal_class[]; - audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); - audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); - audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); - audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); - audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); -#endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d75b872..9a260b3 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -71,15 +71,6 @@ ENTRY(ia64_execve) add out3=16,sp // regs br.call.sptk.many rp=sys_execve .ret0: -#ifdef CONFIG_IA32_SUPPORT - /* - * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers - * from pt_regs. - */ - adds r16=PT(CR_IPSR)+16,sp - ;; - ld8 r16=[r16] -#endif cmp4.ge p6,p7=r8,r0 mov ar.pfs=loc1 // restore ar.pfs sxt4 r8=r8 // return 64-bit result @@ -108,12 +99,6 @@ ENTRY(ia64_execve) ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 -#ifdef CONFIG_IA32_SUPPORT - tbit.nz p6,p0=r16, IA64_PSR_IS_BIT - movl loc0=ia64_ret_from_ia32_execve - ;; -(p6) mov rp=loc0 -#endif br.ret.sptk.many rp END(ia64_execve) @@ -848,30 +833,6 @@ __paravirt_work_processed_syscall: br.cond.sptk.many rbs_switch // B END(__paravirt_leave_syscall) -#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE -#ifdef CONFIG_IA32_SUPPORT -GLOBAL_ENTRY(ia64_ret_from_ia32_execve) - PT_REGS_UNWIND_INFO(0) - adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 - adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 - ;; - .mem.offset 0,0 - st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit - .mem.offset 8,0 - st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit -#ifdef CONFIG_PARAVIRT - ;; - // don't fall through, ia64_leave_kernel may be #define'd - br.cond.sptk.few ia64_leave_kernel - ;; -#endif /* CONFIG_PARAVIRT */ -END(ia64_ret_from_ia32_execve) -#ifndef CONFIG_PARAVIRT - // fall through -#endif -#endif /* CONFIG_IA32_SUPPORT */ -#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ - GLOBAL_ENTRY(__paravirt_leave_kernel) PT_REGS_UNWIND_INFO(0) /* diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index ec9a5fd..179fd12 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -49,7 +49,6 @@ #include #include -#include #include #include #include @@ -1386,28 +1385,6 @@ END(ia32_exception) // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) ENTRY(ia32_intercept) DBG_FAULT(46) -#ifdef CONFIG_IA32_SUPPORT - mov r31=pr - MOV_FROM_ISR(r16) - ;; - extr.u r17=r16,16,8 // get ISR.code - mov r18=ar.eflag - MOV_FROM_IIM(r19) // old eflag value - ;; - cmp.ne p6,p0=2,r17 -(p6) br.cond.spnt 1f // not a system flag fault - xor r16=r18,r19 - ;; - extr.u r17=r16,18,1 // get the eflags.ac bit - ;; - cmp.eq p6,p0=0,r17 -(p6) br.cond.spnt 1f // eflags.ac bit didn't change - ;; - mov pr=r31,-1 // restore predicate registers - RFI - -1: -#endif // CONFIG_IA32_SUPPORT FAULT(46) END(ia32_intercept) @@ -1416,12 +1393,7 @@ END(ia32_intercept) // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) ENTRY(ia32_interrupt) DBG_FAULT(47) -#ifdef CONFIG_IA32_SUPPORT - mov r31=pr - br.sptk.many dispatch_to_ia32_handler -#else FAULT(47) -#endif END(ia32_interrupt) .org ia64_ivt+0x6c00 @@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault) (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel br.sptk.many ia64_leave_kernel END(dispatch_illegal_op_fault) - -#ifdef CONFIG_IA32_SUPPORT - - /* - * There is no particular reason for this code to be here, other than that - * there happens to be space here that would go unused otherwise. If this - * fault ever gets "unreserved", simply moved the following code to a more - * suitable spot... - */ - - // IA32 interrupt entry point - -ENTRY(dispatch_to_ia32_handler) - SAVE_MIN - ;; - MOV_FROM_ISR(r14) - SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) - // guarantee that interruption collection is on - ;; - SSM_PSR_I(p15, p15, r3) - adds r3=8,r2 // Base pointer for SAVE_REST - ;; - SAVE_REST - ;; - mov r15=0x80 - shr r14=r14,16 // Get interrupt number - ;; - cmp.ne p6,p0=r14,r15 -(p6) br.call.dpnt.many b6=non_ia32_syscall - - adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions - adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp - ;; - cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 - ld8 r8=[r14] // get r8 - ;; - st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) - ;; - alloc r15=ar.pfs,0,0,6,0 // must first in an insn group - ;; - ld4 r8=[r14],8 // r8 == eax (syscall number) - mov r15=IA32_NR_syscalls - ;; - cmp.ltu.unc p6,p7=r8,r15 - ld4 out1=[r14],8 // r9 == ecx - ;; - ld4 out2=[r14],8 // r10 == edx - ;; - ld4 out0=[r14] // r11 == ebx - adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp - ;; - ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp - ;; - ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi - adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 - ;; - ld4 out4=[r14] // r15 == edi - movl r16=ia32_syscall_table - ;; -(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number - ld4 r2=[r2] // r2 = current_thread_info()->flags - ;; - ld8 r16=[r16] - and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit - ;; - mov b6=r16 - movl r15=ia32_ret_from_syscall - cmp.eq p8,p0=r2,r0 - ;; - mov rp=r15 -(p8) br.call.sptk.many b6=b6 - br.cond.sptk ia32_trace_syscall - -non_ia32_syscall: - alloc r15=ar.pfs,0,0,2,0 - mov out0=r14 // interrupt # - add out1=16,sp // pointer to pt_regs - ;; // avoid WAW on CFM - br.call.sptk.many rp=ia32_bad_interrupt -.ret1: movl r15=ia64_leave_kernel - ;; - mov rp=r15 - br.ret.sptk.many rp -END(dispatch_to_ia32_handler) - -#endif /* CONFIG_IA32_SUPPORT */ diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 9bcec99..883ecc9 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -358,11 +357,6 @@ ia64_save_extra (struct task_struct *task) if (info & PFM_CPUINFO_SYST_WIDE) pfm_syst_wide_update_task(task, info, 0); #endif - -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(task_pt_regs(task))) - ia32_save_state(task); -#endif } void @@ -383,11 +377,6 @@ ia64_load_extra (struct task_struct *task) if (info & PFM_CPUINFO_SYST_WIDE) pfm_syst_wide_update_task(task, info, 1); #endif - -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(task_pt_regs(task))) - ia32_load_state(task); -#endif } /* @@ -426,7 +415,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base, unsigned long user_stack_size, struct task_struct *p, struct pt_regs *regs) { - extern char ia64_ret_from_clone, ia32_ret_from_clone; + extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; unsigned long rbs, child_rbs, rbs_size; struct pt_regs *child_ptregs; @@ -457,7 +446,7 @@ copy_thread(unsigned long clone_flags, memcpy((void *) child_rbs, (void *) rbs, rbs_size); if (likely(user_mode(child_ptregs))) { - if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) + if (clone_flags & CLONE_SETTLS) child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ if (user_stack_base) { child_ptregs->r12 = user_stack_base + user_stack_size - 16; @@ -477,10 +466,7 @@ copy_thread(unsigned long clone_flags, child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ } child_stack->ar_bspstore = child_rbs + rbs_size; - if (IS_IA32_PROCESS(regs)) - child_stack->b0 = (unsigned long) &ia32_ret_from_clone; - else - child_stack->b0 = (unsigned long) &ia64_ret_from_clone; + child_stack->b0 = (unsigned long) &ia64_ret_from_clone; /* copy parts of thread_struct: */ p->thread.ksp = (unsigned long) child_stack - 16; @@ -515,22 +501,6 @@ copy_thread(unsigned long clone_flags, p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | THREAD_FLAGS_TO_SET); ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ -#ifdef CONFIG_IA32_SUPPORT - /* - * If we're cloning an IA32 task then save the IA32 extra - * state from the current task to the new task - */ - if (IS_IA32_PROCESS(task_pt_regs(current))) { - ia32_save_state(p); - if (clone_flags & CLONE_SETTLS) - retval = ia32_clone_tls(p, child_ptregs); - - /* Copy partially mapped page list */ - if (!retval) - retval = ia32_copy_ia64_partial_page_list(p, - clone_flags); - } -#endif #ifdef CONFIG_PERFMON if (current->thread.pfm_context) @@ -704,15 +674,6 @@ EXPORT_SYMBOL(kernel_thread); int kernel_thread_helper (int (*fn)(void *), void *arg) { -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(task_pt_regs(current))) { - /* A kernel thread is always a 64-bit process. */ - current->thread.map_base = DEFAULT_MAP_BASE; - current->thread.task_size = DEFAULT_TASK_SIZE; - ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); - ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); - } -#endif return (*fn)(arg); } @@ -725,14 +686,6 @@ flush_thread (void) /* drop floating-point and debug-register state if it exists: */ current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); ia64_drop_fpu(current); -#ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(task_pt_regs(current))) { - ia32_drop_ia64_partial_page_list(current); - current->thread.task_size = IA32_PAGE_OFFSET; - set_fs(USER_DS); - memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array)); - } -#endif } /* @@ -753,8 +706,6 @@ exit_thread (void) if (current->thread.flags & IA64_THREAD_DBG_VALID) pfm_release_debug_registers(current); #endif - if (IS_IA32_PROCESS(task_pt_regs(current))) - ia32_drop_ia64_partial_page_list(current); } unsigned long diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 9daa87f..b61afbb 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -1250,13 +1250,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, long syscall; int arch; - if (IS_IA32_PROCESS(®s)) { - syscall = regs.r1; - arch = AUDIT_ARCH_I386; - } else { - syscall = regs.r15; - arch = AUDIT_ARCH_IA64; - } + syscall = regs.r15; + arch = AUDIT_ARCH_IA64; audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); } @@ -2172,11 +2167,6 @@ static const struct user_regset_view user_ia64_view = { const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) { -#ifdef CONFIG_IA32_SUPPORT - extern const struct user_regset_view user_ia32_view; - if (IS_IA32_PROCESS(task_pt_regs(tsk))) - return &user_ia32_view; -#endif return &user_ia64_view; } diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index a1ea879..41ae6a5 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -46,7 +46,6 @@ #include #include -#include #include #include #include @@ -1016,10 +1015,6 @@ cpu_init (void) ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); -#ifdef CONFIG_IA32_SUPPORT - ia32_cpu_init(); -#endif - /* Clear ITC to eliminate sched_clock() overflows in human time. */ ia64_set_itc(0); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e1821ca..7bdafc8 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -425,14 +424,8 @@ static long handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct sigscratch *scr) { - if (IS_IA32_PROCESS(&scr->pt)) { - /* send signal to IA-32 process */ - if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt)) - return 0; - } else - /* send signal to IA-64 process */ - if (!setup_frame(sig, ka, info, oldset, scr)) - return 0; + if (!setup_frame(sig, ka, info, oldset, scr)) + return 0; spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); @@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) siginfo_t info; long restart = in_syscall; long errno = scr->pt.r8; -# define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c)) /* * In the ia64_leave_kernel code path, we want the common case to go fast, which @@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) * inferior call), thus it's important to check for restarting _after_ * get_signal_to_deliver(). */ - if (IS_IA32_PROCESS(&scr->pt)) { - if (in_syscall) { - if (errno >= 0) - restart = 0; - else - errno = -errno; - } - } else if ((long) scr->pt.r10 != -1) + if ((long) scr->pt.r10 != -1) /* * A system calls has to be restarted only if one of the error codes * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 @@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) switch (errno) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: - scr->pt.r8 = ERR_CODE(EINTR); + scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; case ERESTARTSYS: if ((ka.sa.sa_flags & SA_RESTART) == 0) { - scr->pt.r8 = ERR_CODE(EINTR); + scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; } case ERESTARTNOINTR: - if (IS_IA32_PROCESS(&scr->pt)) { - scr->pt.r8 = scr->pt.r1; - scr->pt.cr_iip -= 2; - } else - ia64_decrement_ip(&scr->pt); + ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ } } @@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR || errno == ERESTART_RESTARTBLOCK) { - if (IS_IA32_PROCESS(&scr->pt)) { - scr->pt.r8 = scr->pt.r1; - scr->pt.cr_iip -= 2; - if (errno == ERESTART_RESTARTBLOCK) - scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ - } else { - /* - * Note: the syscall number is in r15 which is saved in - * pt_regs so all we need to do here is adjust ip so that - * the "break" instruction gets re-executed. - */ - ia64_decrement_ip(&scr->pt); - if (errno == ERESTART_RESTARTBLOCK) - scr->pt.r15 = __NR_restart_syscall; - } + /* + * Note: the syscall number is in r15 which is saved in + * pt_regs so all we need to do here is adjust ip so that + * the "break" instruction gets re-executed. + */ + ia64_decrement_ip(&scr->pt); + if (errno == ERESTART_RESTARTBLOCK) + scr->pt.r15 = __NR_restart_syscall; } } diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index de100aa..e5230b2 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include @@ -443,10 +442,6 @@ smp_callin (void) calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; -#ifdef CONFIG_IA32_SUPPORT - ia32_gdt_init(); -#endif - /* * Allow the master to continue. */ diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f0cda76..fd80e70 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, break; case 45: -#ifdef CONFIG_IA32_SUPPORT - if (ia32_exception(®s, isr) == 0) - return; -#endif printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", iip, ifa, isr); @@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, break; case 46: -#ifdef CONFIG_IA32_SUPPORT - if (ia32_intercept(®s, isr) == 0) - return; -#endif printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", iip, ifa, isr, iim); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 7c0d481..ca3335e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include @@ -668,10 +667,6 @@ mem_init (void) fsyscall_table[i] = sys_call_table[i] | 1; } setup_gate(); - -#ifdef CONFIG_IA32_SUPPORT - ia32_mem_init(); -#endif } #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index e32dae4..08847aa 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S @@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) -#ifdef CONFIG_IA32_SUPPORT -__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) -__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8 -#endif /* CONFIG_IA32_SUPPORT */ - GLOBAL_ENTRY(xen_set_rr0_to_rr4) mov r8=r32 mov r9=r33 diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index 5e2270a..8adc6a1 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c @@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val) case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: xen_set_kr(regnum - _IA64_REG_AR_KR0, val); break; -#ifdef CONFIG_IA32_SUPPORT - case _IA64_REG_AR_EFLAG: - xen_set_eflag(val); - break; -#endif case _IA64_REG_AR_ITC: xen_set_itc(val); break; @@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum) case _IA64_REG_PSR: res = xen_get_psr(); break; -#ifdef CONFIG_IA32_SUPPORT - case _IA64_REG_AR_EFLAG: - res = xen_get_eflag(); - break; -#endif case _IA64_REG_AR_ITC: res = xen_get_itc(); break; @@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum); __DEFINE_FUNC(getreg, __DEFINE_GET_REG(PSR, PSR) -#ifdef CONFIG_IA32_SUPPORT - __DEFINE_GET_REG(AR_EFLAG, EFLAG) -#endif /* get_itc */ "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" @@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg, ";;\n" "(p6) br.cond.spnt xen_set_itc\n" -#ifdef CONFIG_IA32_SUPPORT - __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) -#endif __DEFINE_SET_REG(CR_TPR, SET_TPR) __DEFINE_SET_REG(CR_EOI, EOI) -- cgit v1.1 From 3ad2f3fbb961429d2aa627465ae4829758bc7e07 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 3 Feb 2010 08:01:28 +0800 Subject: tree-wide: Assorted spelling fixes In particular, several occurances of funny versions of 'success', 'unknown', 'therefore', 'acknowledge', 'argument', 'achieve', 'address', 'beginning', 'desirable', 'separate' and 'necessary' are fixed. Signed-off-by: Daniel Mack Cc: Joe Perches Cc: Junio C Hamano Signed-off-by: Jiri Kosina --- arch/ia64/sn/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index ece1bf9..c6c6d93 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -241,7 +241,7 @@ static void __cpuinit sn_check_for_wars(void) * Note: This stuff is duped here because Altix requires the PCDP to * locate a usable VGA device due to lack of proper ACPI support. Structures * could be used from drivers/firmware/pcdp.h, but it was decided that moving - * this file to a more public location just for Altix use was undesireable. + * this file to a more public location just for Altix use was undesirable. */ struct hcdp_uart_desc { -- cgit v1.1 From 22208ac586f2e456c49e927b90ded50e923b6aee Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 12 Feb 2010 08:17:58 -0800 Subject: [IA64] preserve personality flag bits across exec In its ia64 defines SET_PERSONALITY in a way that unconditionally sets the personality of the current process to PER_LINUX, losing any flag bits from the upper 3 bytes of current->personality. This is wrong. Those bits are intended to be inherited across exec (other code takes care of ensuring that security sensitive bits like ADDR_NO_RANDOMIZE are not passed to unsuspecting setuid/setgid applications). Signed-off-by: Tony Luck --- arch/ia64/include/asm/elf.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index e14108b..4c41656 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); relevant until we have real hardware to play with... */ #define ELF_PLATFORM NULL -#define SET_PERSONALITY(ex) set_personality(PER_LINUX) +#define SET_PERSONALITY(ex) \ + set_personality((current->personality & ~PER_MASK) | PER_LINUX) + #define elf_read_implies_exec(ex, executable_stack) \ ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) -- cgit v1.1 From 2b633e3fac5efada088b57d31e65401f22bcc18f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 10 Feb 2010 01:20:37 -0800 Subject: smp: Use nr_cpus= to set nr_cpu_ids early On x86, before prefill_possible_map(), nr_cpu_ids will be NR_CPUS aka CONFIG_NR_CPUS. Add nr_cpus= to set nr_cpu_ids. so we can simulate cpus <=8 are installed on normal config. -v2: accordging to Christoph, acpi_numa_init should use nr_cpu_ids in stead of NR_CPUS. -v3: add doc in kernel-parameters.txt according to Andrew. Signed-off-by: Yinghai Lu LKML-Reference: <1265793639-15071-34-git-send-email-yinghai@kernel.org> Acked-by: Linus Torvalds Signed-off-by: H. Peter Anvin Cc: Tony Luck --- arch/ia64/kernel/acpi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 40574ae..605a08b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -881,8 +881,8 @@ __init void prefill_possible_map(void) possible = available_cpus + additional_cpus; - if (possible > NR_CPUS) - possible = NR_CPUS; + if (possible > nr_cpu_ids) + possible = nr_cpu_ids; printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max((possible - available_cpus), 0)); -- cgit v1.1 From 49bf83a45fc677db1ed44d0e072e6aaeabe4e124 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Tue, 16 Feb 2010 03:45:45 -0500 Subject: ACPI: fix "acpi=ht" boot option We broke "acpi=ht" in 2.6.32 by disabling MADT parsing for acpi=disabled. e5b8fc6ac158f65598f58dba2c0d52ba3b412f52 This also broke systems which invoked acpi=ht via DMI blacklist. acpi=ht is a really ugly hack, but restore it for those that still use it. http://bugzilla.kernel.org/show_bug.cgi?id=14886 Signed-off-by: Len Brown --- arch/ia64/include/asm/acpi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 7ae5889..e97b255 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ +#define acpi_ht 0 /* no HT-only mode on IA64 */ #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } -- cgit v1.1 From ca4dbc668412d5fe039be3e26e8e717a616d1ca5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 17 Feb 2010 18:49:54 -0800 Subject: xen: Remove unnecessary arch specific xen irq functions. Right now xen's use of the x86 and ia64 handle_irq is just bizarre and very fragile as it is very non-obvious the function exists and is is used by code out in drivers/.... Luckily using handle_irq is completely unnecessary, and we can just use the generic irq apis instead. This still leaves drivers/xen/events.c as a problematic user of the generic irq apis it has "static struct irq_info irq_info[NR_IRQS]" but that can be fixed some other time. Signed-off-by: Eric W. Biederman LKML-Reference: <4B7CAAD2.10803@kernel.org> Acked-by: Jeremy Fitzhardinge Cc: Ian Campbell Signed-off-by: H. Peter Anvin --- arch/ia64/include/asm/xen/events.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h index b8370c8..baa74c8 100644 --- a/arch/ia64/include/asm/xen/events.h +++ b/arch/ia64/include/asm/xen/events.h @@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return !(ia64_psr(regs)->i); } -static inline void handle_irq(int irq, struct pt_regs *regs) -{ - __do_IRQ(irq); -} #define irq_ctx_init(cpu) do { } while (0) #endif /* _ASM_IA64_XEN_EVENTS_H */ -- cgit v1.1 From 4b3073e1c53a256275f1079c0fbfbe85883d9275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:40:18 +0000 Subject: MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Russell King --- arch/ia64/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 69bf138..c3286f4 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -462,7 +462,7 @@ pte_same (pte_t a, pte_t b) return pte_val(a) == pte_val(b); } -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init (void); -- cgit v1.1 From b26b2d494b659f988b4d75eb394dfa0ddac415c9 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Fri, 1 Jan 2010 17:40:49 +0100 Subject: resource/PCI: align functions now return start of resource As suggested by Linus, align functions should return the start of a resource, not void. An update of "res->start" is no longer necessary. Cc: Bjorn Helgaas Cc: Yinghai Lu Signed-off-by: Dominik Brodowski Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index df639db7..ef574cd 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -547,10 +547,11 @@ pcibios_disable_device (struct pci_dev *dev) acpi_pci_irq_disable(dev); } -void +resource_size_t pcibios_align_resource (void *data, struct resource *res, resource_size_t size, resource_size_t align) { + return res->start; } /* -- cgit v1.1 From 3b7a17fcdae532d29dffab9d564a28be08960988 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Fri, 1 Jan 2010 17:40:50 +0100 Subject: resource/PCI: mark struct resource as const Now that we return the new resource start position, there is no need to update "struct resource" inside the align function. Therefore, mark the struct resource as const. Cc: Bjorn Helgaas Cc: Yinghai Lu Signed-off-by: Dominik Brodowski Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index ef574cd..783c83b 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -548,7 +548,7 @@ pcibios_disable_device (struct pci_dev *dev) } resource_size_t -pcibios_align_resource (void *data, struct resource *res, +pcibios_align_resource (void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; -- cgit v1.1 From 89a74ecccd1f78e51faf6287e5c0e93a92ac096e Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 23 Feb 2010 10:24:31 -0700 Subject: PCI: add pci_bus_for_each_resource(), remove direct bus->resource[] refs No functional change; this converts loops that iterate from 0 to PCI_BUS_NUM_RESOURCES through pci_bus resource[] table to use the pci_bus_for_each_resource() iterator instead. This doesn't change the way resources are stored; it merely removes dependencies on the fact that they're in a table. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 783c83b..89f957c 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -452,13 +452,12 @@ EXPORT_SYMBOL(pcibios_bus_to_resource); static int __devinit is_valid_resource(struct pci_dev *dev, int idx) { unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; - struct resource *devr = &dev->resource[idx]; + struct resource *devr = &dev->resource[idx], *busr; if (!dev->bus) return 0; - for (i=0; ibus->resource[i]; + pci_bus_for_each_resource(dev->bus, busr, i) { if (!busr || ((busr->flags ^ devr->flags) & type_mask)) continue; if ((devr->start) && (devr->start >= busr->start) && -- cgit v1.1 From 2fe2abf896c1e7a0ee65faaf3ef0ce654848abbd Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 23 Feb 2010 10:24:36 -0700 Subject: PCI: augment bus resource table with a list Previously we used a table of size PCI_BUS_NUM_RESOURCES (16) for resources forwarded to a bus by its upstream bridge. We've increased this size several times when the table overflowed. But there's no good limit on the number of resources because host bridges and subtractive decode bridges can forward any number of ranges to their secondary buses. This patch reduces the table to only PCI_BRIDGE_RESOURCE_NUM (4) entries, which corresponds to the number of windows a PCI-to-PCI (3) or CardBus (4) bridge can positively decode. Any additional resources, e.g., PCI host bridge windows or subtractively-decoded regions, are kept in a list. I'd prefer a single list rather than this split table/list approach, but that requires simultaneous changes to every architecture. This approach only requires immediate changes where we set up (a) host bridges with more than four windows and (b) subtractive-decode P2P bridges, and we can incrementally change other architectures to use the list. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 89f957c..64aff52 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -320,9 +320,9 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) static void __devinit pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) { - int i, j; + int i; - j = 0; + pci_bus_remove_resources(bus); for (i = 0; i < ctrl->windows; i++) { struct resource *res = &ctrl->window[i].resource; /* HP's firmware has a hack to work around a Windows bug. @@ -330,13 +330,7 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) if ((res->flags & IORESOURCE_MEM) && (res->end - res->start < 16)) continue; - if (j >= PCI_BUS_NUM_RESOURCES) { - dev_warn(&bus->dev, - "ignoring host bridge window %pR (no space)\n", - res); - continue; - } - bus->resource[j++] = res; + pci_bus_add_resource(bus, res, 0); } } -- cgit v1.1 From 7bc5e3f2be32ae6fb0c74cd0f707f986b3a01a26 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 23 Feb 2010 10:24:41 -0700 Subject: x86/PCI: use host bridge _CRS info by default on 2008 and newer machines The main benefit of using ACPI host bridge window information is that we can do better resource allocation in systems with multiple host bridges, e.g., http://bugzilla.kernel.org/show_bug.cgi?id=14183 Sometimes we need _CRS information even if we only have one host bridge, e.g., https://bugs.launchpad.net/ubuntu/+source/linux/+bug/341681 Most of these systems are relatively new, so this patch turns on "pci=use_crs" only on machines with a BIOS date of 2008 or newer. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/ia64/include/asm/acpi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index e97b255..93997bd 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -98,6 +98,7 @@ ia64_acpi_release_global_lock (unsigned int *lock) #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } +static inline void pci_acpi_crs_quirks(void) { } const char *acpi_get_sysname (void); int acpi_request_vector (u32 int_type); -- cgit v1.1 From f7624c97b8e5bca49be7854309550bff8ce98c47 Mon Sep 17 00:00:00 2001 From: Hedi Berriche Date: Tue, 23 Feb 2010 23:58:49 +0000 Subject: [IA64] Fix broken sn2 build Revert the change made to arch/ia64/sn/kernel/setup.c by commit 204fba4aa303ea4a7bb726a539bf4a5b9e3203d0 as it breaks the build. Fixing the build the b94b08081fcecf83fa690d6c5664f6316fe72208 way breaks xpc because genksyms then fails to generate an CRC for per_cpu____sn_cnodeid_to_nasid because of limitations in the generic genksyms code. Signed-off-by: Hedi Berriche Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index ece1bf9..e456f06 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); -DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); -- cgit v1.1 From d868080d2a1c95526cb01e3d0c14096721cbb87a Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Thu, 25 Feb 2010 14:09:52 -0700 Subject: [IA64] Only build arch/ia64/kernel/acpi.o when CONFIG_ACPI The following commit broke the ia64 sim_defconfig build: 3b2b84c0b81108a9a869a88bf2beeb5a95d81dd1 ACPI: processor: driver doesn't need to evaluate _PDC This is because it added: +#include To arch/ia64/kernel/acpi.c. Unfortunately, the ia64_simdefconfig does not turn on CONFIG_ACPI, and we get build errors. The fix described in $subject seems to be the most sensible way to untangle the mess. The other issue is that acpi_get_sysname() is required for all configs, most of which define CONFIG_ACPI, but are not CONFIG_IA64_GENERIC. Turn it into an inline to cover the "non generic" ia64 configs; to prevent a duplicate definition build error, we need to wrap the definition in acpi.o inside an #ifdef. Finally, move the pm_idle and pm_power_off exports into process.c (which is always built), similar to other architectures, and allow the sim defconfig to link. Signed-off-by: Alex Chiang Signed-off-by: Tony Luck --- arch/ia64/include/asm/acpi.h | 25 +++++++++++++++++++++++++ arch/ia64/kernel/Makefile | 3 ++- arch/ia64/kernel/acpi.c | 33 +-------------------------------- arch/ia64/kernel/process.c | 4 ++++ 4 files changed, 32 insertions(+), 33 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index e97b255..d3b4830 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -99,7 +99,32 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } +#ifdef CONFIG_IA64_GENERIC const char *acpi_get_sysname (void); +#else +static inline const char *acpi_get_sysname (void) +{ +# if defined (CONFIG_IA64_HP_SIM) + return "hpsim"; +# elif defined (CONFIG_IA64_HP_ZX1) + return "hpzx1"; +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) + return "hpzx1_swiotlb"; +# elif defined (CONFIG_IA64_SGI_SN2) + return "sn2"; +# elif defined (CONFIG_IA64_SGI_UV) + return "uv"; +# elif defined (CONFIG_IA64_DIG) + return "dig"; +# elif defined (CONFIG_IA64_XEN_GUEST) + return "xen"; +# elif defined(CONFIG_IA64_DIG_VTD) + return "dig_vtd"; +# else +# error Unknown platform. Fix acpi.c. +# endif +} +#endif int acpi_request_vector (u32 int_type); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index e123634..78ad418 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -8,11 +8,12 @@ endif extra-y := head.o init_task.o vmlinux.lds -obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ +obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o +obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 40574ae..c16fb03 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -60,11 +60,6 @@ #define PREFIX "ACPI: " -void (*pm_idle) (void); -EXPORT_SYMBOL(pm_idle); -void (*pm_power_off) (void); -EXPORT_SYMBOL(pm_power_off); - u32 acpi_rsdt_forced; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -83,12 +78,10 @@ static unsigned long __init acpi_find_rsdp(void) "v1.0/r0.71 tables no longer supported\n"); return rsdp_phys; } -#endif const char __init * acpi_get_sysname(void) { -#ifdef CONFIG_IA64_GENERIC unsigned long rsdp_phys; struct acpi_table_rsdp *rsdp; struct acpi_table_xsdt *xsdt; @@ -143,30 +136,8 @@ acpi_get_sysname(void) #endif return "dig"; -#else -# if defined (CONFIG_IA64_HP_SIM) - return "hpsim"; -# elif defined (CONFIG_IA64_HP_ZX1) - return "hpzx1"; -# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) - return "hpzx1_swiotlb"; -# elif defined (CONFIG_IA64_SGI_SN2) - return "sn2"; -# elif defined (CONFIG_IA64_SGI_UV) - return "uv"; -# elif defined (CONFIG_IA64_DIG) - return "dig"; -# elif defined (CONFIG_IA64_XEN_GUEST) - return "xen"; -# elif defined(CONFIG_IA64_DIG_VTD) - return "dig_vtd"; -# else -# error Unknown platform. Fix acpi.c. -# endif -#endif } - -#ifdef CONFIG_ACPI +#endif /* CONFIG_IA64_GENERIC */ #define ACPI_MAX_PLATFORM_INTERRUPTS 256 @@ -1060,5 +1031,3 @@ void acpi_restore_state_mem(void) {} * do_suspend_lowlevel() */ void do_suspend_lowlevel(void) {} - -#endif /* CONFIG_ACPI */ diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 9bcec99..454a6a4 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -60,6 +60,10 @@ unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); unsigned long idle_nomwait; EXPORT_SYMBOL(idle_nomwait); +void (*pm_idle) (void); +EXPORT_SYMBOL(pm_idle); +void (*pm_power_off) (void); +EXPORT_SYMBOL(pm_power_off); void ia64_do_show_stack (struct unw_frame_info *info, void *arg) -- cgit v1.1 From e72aca30837d9c0ee287d18a092fc76a38a3b81d Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Thu, 25 Feb 2010 14:09:57 -0700 Subject: [IA64] build arch/ia64/kernel/acpi-ext.o when CONFIG_ACPI Simplify the makefile slightly by always building acpi-ext.o when CONFIG_ACPI is turned on. Yes, this adds a little bloat to the other configs, but not much: text data bss dec hex filename 839 41 0 880 370 arch/ia64/kernel/acpi-ext.o Before: text data bss dec hex filename 10952753 1299212 1334241 13586206 cf4f1e vmlinux After: text data bss dec hex filename 10953739 1299084 1334241 13587064 cf5278 vmlinux (gdb) p 13587064 - 13586206 $2 = 858 Seems like a small price to pay for the benefit of not having to think so hard about the multitude of ia64 configs when reading code/Makefiles. Signed-off-by: Alex Chiang Signed-off-by: Tony Luck --- arch/ia64/kernel/Makefile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 78ad418..4138282 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -13,11 +13,8 @@ obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o -obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_ACPI) += acpi.o acpi-ext.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o -obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o -obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o -obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o -- cgit v1.1 From 3a095a97e17897e8a8d7c3c995cb206f801c2477 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 26 Feb 2010 09:43:51 +0900 Subject: [IA64] use asm-generic/scatterlist.h IA64's scatterlist structure is identical to the generic one. Signed-off-by: FUJITA Tomonori Signed-off-by: Tony Luck --- arch/ia64/include/asm/scatterlist.h | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h index d6f5787..d8e9896 100644 --- a/arch/ia64/include/asm/scatterlist.h +++ b/arch/ia64/include/asm/scatterlist.h @@ -2,25 +2,6 @@ #define _ASM_IA64_SCATTERLIST_H /* - * Modified 1998-1999, 2001-2002, 2004 - * David Mosberger-Tang , Hewlett-Packard Co - */ - -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; /* buffer length */ - - dma_addr_t dma_address; - unsigned int dma_length; -}; - -/* * It used to be that ISA_DMA_THRESHOLD had something to do with the * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to @@ -30,9 +11,6 @@ struct scatterlist { */ #define ISA_DMA_THRESHOLD 0xffffffff -#define sg_dma_len(sg) ((sg)->dma_length) -#define sg_dma_address(sg) ((sg)->dma_address) - -#define ARCH_HAS_SG_CHAIN +#include #endif /* _ASM_IA64_SCATTERLIST_H */ -- cgit v1.1 From 04157e4c0612fb820bbef221f6222c402e17af3b Mon Sep 17 00:00:00 2001 From: Frans Pop Date: Sat, 6 Feb 2010 18:47:10 +0100 Subject: [IA64] remove trailing space in messages ia64 parts of system wide cleanup to drop trailing whitespace from lines in message strings. Signed-off-by: Frans Pop Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6bcbe21..b81e46b 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2713,7 +2713,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg goto buffer_error; } - DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", + DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n", ctx, ctx_flags, ctx->ctx_fl_system, @@ -3677,7 +3677,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * "self-monitoring". */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { - DPRINT(("unblocking [%d] \n", task_pid_nr(task))); + DPRINT(("unblocking [%d]\n", task_pid_nr(task))); complete(&ctx->ctx_restart_done); } else { DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); -- cgit v1.1 From b6dcefdef49000fc871a851cba4b48454343cde5 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Tue, 2 Feb 2010 14:42:01 -0800 Subject: [IA64] wrong attribute of HUB chip written in uv_setup() n_val should be assigned to n_val attribute of HUB chip. Signed-off-by: Roel Kluin Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/uv/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c index 7a5ae63..f149065 100644 --- a/arch/ia64/uv/kernel/setup.c +++ b/arch/ia64/uv/kernel/setup.c @@ -104,7 +104,7 @@ void __init uv_setup(char **cmdline_p) uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_base + lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; - uv_cpu_hub_info(cpu)->n_val = m_val; + uv_cpu_hub_info(cpu)->n_val = n_val; uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; -- cgit v1.1 From 50eb2a3cd0f50d912b26d0b79b7f443344608390 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 20 Dec 2009 15:00:10 +0200 Subject: KVM: Add KVM_MMIO kconfig item s390 doesn't have mmio, this will simplify ifdefing it out. Signed-off-by: Avi Kivity --- arch/ia64/kvm/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index ef3e7be..bf82e47 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select ANON_INODES select HAVE_KVM_IRQCHIP select KVM_APIC_ARCHITECTURE + select KVM_MMIO ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent -- cgit v1.1 From 46a26bf55714c1e2f17e34683292a389acb8e601 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:16 -0200 Subject: KVM: modify memslots layout in struct kvm Have a pointer to an allocated region inside struct kvm. [alex: fix ppc book 3s] Signed-off-by: Alexander Graf Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 5fdeec5..1ca1dbf 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1377,12 +1377,14 @@ static void free_kvm(struct kvm *kvm) static void kvm_release_vm_pages(struct kvm *kvm) { + struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i, j; unsigned long base_gfn; - for (i = 0; i < kvm->nmemslots; i++) { - memslot = &kvm->memslots[i]; + slots = kvm->memslots; + for (i = 0; i < slots->nmemslots; i++) { + memslot = &slots->memslots[i]; base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { @@ -1802,7 +1804,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, if (log->slot >= KVM_MEMORY_SLOTS) goto out; - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; r = -ENOENT; if (!memslot->dirty_bitmap) goto out; @@ -1840,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { kvm_flush_remote_tlbs(kvm); - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; memset(memslot->dirty_bitmap, 0, n); } -- cgit v1.1 From f7784b8ec9b6a041fa828cfbe9012fe51933f5ac Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:18 -0200 Subject: KVM: split kvm_arch_set_memory_region into prepare and commit Required for SRCU convertion later. Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 1ca1dbf..0757c70 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1578,15 +1578,15 @@ out: return r; } -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, struct kvm_memory_slot old, + struct kvm_userspace_memory_region *mem, int user_alloc) { unsigned long i; unsigned long pfn; - int npages = mem->memory_size >> PAGE_SHIFT; - struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; + int npages = memslot->npages; unsigned long base_gfn = memslot->base_gfn; if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) @@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm, return 0; } +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + struct kvm_memory_slot old, + int user_alloc) +{ + return; +} + void kvm_arch_flush_shadow(struct kvm *kvm) { kvm_flush_remote_tlbs(kvm); -- cgit v1.1 From bc6678a33d9b952981a8e44a4f876c3ad64ca4d8 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:21 -0200 Subject: KVM: introduce kvm->srcu and convert kvm_set_memory_region to SRCU update Use two steps for memslot deletion: mark the slot invalid (which stops instantiation of new shadow pages for that slot, but allows destruction), then instantiate the new empty slot. Also simplifies kvm_handle_hva locking. Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0757c70..b2e4d16 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1382,7 +1382,7 @@ static void kvm_release_vm_pages(struct kvm *kvm) int i, j; unsigned long base_gfn; - slots = kvm->memslots; + slots = rcu_dereference(kvm->memslots); for (i = 0; i < slots->nmemslots; i++) { memslot = &slots->memslots[i]; base_gfn = memslot->base_gfn; @@ -1837,6 +1837,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot; int is_dirty = 0; + down_write(&kvm->slots_lock); spin_lock(&kvm->arch.dirty_log_lock); r = kvm_ia64_sync_dirty_log(kvm, log); @@ -1856,6 +1857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, } r = 0; out: + up_write(&kvm->slots_lock); spin_unlock(&kvm->arch.dirty_log_lock); return r; } -- cgit v1.1 From e93f8a0f821e290ac5149830110a5f704db7a1fc Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:24 -0200 Subject: KVM: convert io_bus to SRCU Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index b2e4d16..d0ad538 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -241,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; mmio: if (p->dir) - r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, + r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); else - r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, + r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, p->size, &p->data); if (r) printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); -- cgit v1.1 From f656ce0185cabbbb0cf96877306879661297c7ad Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:25 -0200 Subject: KVM: switch vcpu context to use SRCU Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d0ad538..d5e3846 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { union context *host_ctx, *guest_ctx; - int r; + int r, idx; - /* - * down_read() may sleep and return with interrupts enabled - */ - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); again: if (signal_pending(current)) { @@ -663,7 +660,7 @@ again: if (r < 0) goto vcpu_run_fail; - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); kvm_guest_enter(); /* @@ -687,7 +684,7 @@ again: kvm_guest_exit(); preempt_enable(); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_handle_exit(kvm_run, vcpu); @@ -697,10 +694,10 @@ again: } out: - up_read(&vcpu->kvm->slots_lock); + srcu_read_unlock(&vcpu->kvm->srcu, idx); if (r > 0) { kvm_resched(vcpu); - down_read(&vcpu->kvm->slots_lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); goto again; } -- cgit v1.1 From 79fac95ecfa3969aab8119d37ccd7226165f933a Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 23 Dec 2009 14:35:26 -0200 Subject: KVM: convert slots_lock to a mutex Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d5e3846..e6ac549 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot; int is_dirty = 0; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); spin_lock(&kvm->arch.dirty_log_lock); r = kvm_ia64_sync_dirty_log(kvm, log); @@ -1854,7 +1854,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, } r = 0; out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); spin_unlock(&kvm->arch.dirty_log_lock); return r; } -- cgit v1.1 From 0f0412c1a734700fb8ccccb1c09371642e029e2e Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Thu, 14 Jan 2010 18:05:58 +0100 Subject: KVM: ia64: remove redundant kvm_get_exit_data() NULL tests kvm_get_exit_data() cannot return a NULL pointer. Signed-off-by: Roel Kluin Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm_fw.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index e4b8231..cb548ee 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -75,7 +75,7 @@ static void set_pal_result(struct kvm_vcpu *vcpu, struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); - if (p && p->exit_reason == EXIT_REASON_PAL_CALL) { + if (p->exit_reason == EXIT_REASON_PAL_CALL) { p->u.pal_data.ret = result; return ; } @@ -87,7 +87,7 @@ static void set_sal_result(struct kvm_vcpu *vcpu, struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); - if (p && p->exit_reason == EXIT_REASON_SAL_CALL) { + if (p->exit_reason == EXIT_REASON_SAL_CALL) { p->u.sal_data.ret = result; return ; } @@ -322,7 +322,7 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) struct exit_ctl_data *p; p = kvm_get_exit_data(vcpu); - if (p && (p->exit_reason == EXIT_REASON_PAL_CALL)) + if (p->exit_reason == EXIT_REASON_PAL_CALL) index = p->u.pal_data.gr28; return index; @@ -646,18 +646,16 @@ static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1, p = kvm_get_exit_data(vcpu); - if (p) { - if (p->exit_reason == EXIT_REASON_SAL_CALL) { - *in0 = p->u.sal_data.in0; - *in1 = p->u.sal_data.in1; - *in2 = p->u.sal_data.in2; - *in3 = p->u.sal_data.in3; - *in4 = p->u.sal_data.in4; - *in5 = p->u.sal_data.in5; - *in6 = p->u.sal_data.in6; - *in7 = p->u.sal_data.in7; - return ; - } + if (p->exit_reason == EXIT_REASON_SAL_CALL) { + *in0 = p->u.sal_data.in0; + *in1 = p->u.sal_data.in1; + *in2 = p->u.sal_data.in2; + *in3 = p->u.sal_data.in3; + *in4 = p->u.sal_data.in4; + *in5 = p->u.sal_data.in5; + *in6 = p->u.sal_data.in6; + *in7 = p->u.sal_data.in7; + return ; } *in0 = 0; } -- cgit v1.1 From 647492047763c3ee8fe51ecf9a04f39040aa495b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 19 Jan 2010 12:45:23 -0200 Subject: KVM: fix cleanup_srcu_struct on vm destruction cleanup_srcu_struct on VM destruction remains broken: BUG: unable to handle kernel paging request at ffffffffffffffff IP: [] srcu_read_lock+0x16/0x21 RIP: 0010:[] [] srcu_read_lock+0x16/0x21 Call Trace: [] kvm_arch_vcpu_uninit+0x1b/0x48 [kvm] [] kvm_vcpu_uninit+0x9/0x15 [kvm] [] vmx_free_vcpu+0x7f/0x8f [kvm_intel] [] kvm_arch_destroy_vm+0x78/0x111 [kvm] [] kvm_put_kvm+0xd4/0xfe [kvm] Move it to kvm_arch_destroy_vm. Signed-off-by: Marcelo Tosatti Reported-by: Jan Kiszka --- arch/ia64/kvm/kvm-ia64.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index e6ac549..0618898 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1404,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); kvm_free_physmem(kvm); + cleanup_srcu_struct(&kvm->srcu); free_kvm(kvm); } -- cgit v1.1 From 16fbb5eecc4ab8d4eb4d2a683d7fd74ccacc890b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 1 Feb 2010 23:22:07 -0800 Subject: KVM: ia64: Fix string literal continuation lines String constants that are continued on subsequent lines with \ are not good. Signed-off-by: Joe Perches Signed-off-by: Avi Kivity --- arch/ia64/kvm/mmio.c | 4 ++-- arch/ia64/kvm/vcpu.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 9bf55af..fb8f9f5 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c @@ -316,8 +316,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) return; } else { inst_type = -1; - panic_vm(vcpu, "Unsupported MMIO access instruction! \ - Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", + panic_vm(vcpu, "Unsupported MMIO access instruction! " + "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", bundle.i64[0], bundle.i64[1]); } diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index dce75b70..958815c 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -1639,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) * Otherwise panic */ if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) - panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ - & vpsr.is=0\n"); + panic_vm(vcpu, "Only support guests with vpsr.pk =0 " + "& vpsr.is=0\n"); /* * For those IA64_PSR bits: id/da/dd/ss/ed/ia -- cgit v1.1 From 4b7bb9210047fe880bb71e6273c3a4526799dbd7 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 9 Feb 2010 10:41:56 +0800 Subject: KVM: ia64: destroy ioapic device if fail to setup default irq routing If KVM_CREATE_IRQCHIP fail due to kvm_setup_default_irq_routing(), ioapic device is not destroyed and kvm->arch.vioapic is not set to NULL, this may cause KVM_GET_IRQCHIP and KVM_SET_IRQCHIP access to unexcepted memory. Signed-off-by: Wei Yongjun Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0618898..26e0e08 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -968,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { - kfree(kvm->arch.vioapic); + kvm_ioapic_destroy(kvm); goto out; } break; -- cgit v1.1 From 5beb49305251e5669852ed541e8e2f2f7696c53e Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 5 Mar 2010 13:42:07 -0800 Subject: mm: change anon_vma linking to fix multi-process server scalability issue The old anon_vma code can lead to scalability issues with heavily forking workloads. Specifically, each anon_vma will be shared between the parent process and all its child processes. In a workload with 1000 child processes and a VMA with 1000 anonymous pages per process that get COWed, this leads to a system with a million anonymous pages in the same anon_vma, each of which is mapped in just one of the 1000 processes. However, the current rmap code needs to walk them all, leading to O(N) scanning complexity for each page. This can result in systems where one CPU is walking the page tables of 1000 processes in page_referenced_one, while all other CPUs are stuck on the anon_vma lock. This leads to catastrophic failure for a benchmark like AIM7, where the total number of processes can reach in the tens of thousands. Real workloads are still a factor 10 less process intensive than AIM7, but they are catching up. This patch changes the way anon_vmas and VMAs are linked, which allows us to associate multiple anon_vmas with a VMA. At fork time, each child process gets its own anon_vmas, in which its COWed pages will be instantiated. The parents' anon_vma is also linked to the VMA, because non-COWed pages could be present in any of the children. This reduces rmap scanning complexity to O(1) for the pages of the 1000 child processes, with O(N) complexity for at most 1/N pages in the system. This reduces the average scanning cost in heavily forking workloads from O(N) to 2. The only real complexity in this patch stems from the fact that linking a VMA to anon_vmas now involves memory allocations. This means vma_adjust can fail, if it needs to attach a VMA to anon_vma structures. This in turn means error handling needs to be added to the calling functions. A second source of complexity is that, because there can be multiple anon_vmas, the anon_vma linking in vma_adjust can no longer be done under "the" anon_vma lock. To prevent the rmap code from walking up an incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h to make sure it is impossible to compile a kernel that needs both symbolic values for the same bitflag. Some test results: Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test box with 16GB RAM and not quite enough IO), the system ends up running >99% in system time, with every CPU on the same anon_vma lock in the pageout code. With these changes, AIM7 hits the cross-over point around 29.7k users. This happens with ~99% IO wait time, there never seems to be any spike in system time. The anon_vma lock contention appears to be resolved. [akpm@linux-foundation.org: cleanups] Signed-off-by: Rik van Riel Cc: KOSAKI Motohiro Cc: Larry Woodman Cc: Lee Schermerhorn Cc: Minchan Kim Cc: Andrea Arcangeli Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/perfmon.c | 1 + arch/ia64/mm/init.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index b81e46b..703062c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2315,6 +2315,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t DPRINT(("Cannot allocate vma\n")); goto error_kmem; } + INIT_LIST_HEAD(&vma->anon_vma_chain); /* * partially initialize the vma for the sampling buffer diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ca3335e..ed41759 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -117,6 +117,7 @@ ia64_init_addr_space (void) */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (vma) { + INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_mm = current->mm; vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; @@ -135,6 +136,7 @@ ia64_init_addr_space (void) if (!(current->personality & MMAP_PAGE_ZERO)) { vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (vma) { + INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_mm = current->mm; vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); -- cgit v1.1 From 1fcccbac89f5bbc5e41aa72086960059fce372da Mon Sep 17 00:00:00 2001 From: Daisuke HATAYAMA Date: Fri, 5 Mar 2010 13:44:07 -0800 Subject: elf coredump: replace ELF_CORE_EXTRA_* macros by functions elf_core_dump() and elf_fdpic_core_dump() use #ifdef and the corresponding macro for hiding _multiline_ logics in functions. This patch removes #ifdef and replaces ELF_CORE_EXTRA_* by corresponding functions. For architectures not implemeonting ELF_CORE_EXTRA_*, we use weak functions in order to reduce a range of modification. This cleanup is for my next patches, but I think this cleanup itself is worth doing regardless of my firnal purpose. Signed-off-by: Daisuke HATAYAMA Cc: "Luck, Tony" Cc: Jeff Dike Cc: David Howells Cc: Greg Ungerer Cc: Roland McGrath Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Alexander Viro Cc: Andi Kleen Cc: Alan Cox Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/elf.h | 48 ---------------------------------- arch/ia64/kernel/Makefile | 2 ++ arch/ia64/kernel/elfcore.c | 64 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 48 deletions(-) create mode 100644 arch/ia64/kernel/elfcore.c (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 4c41656..b5298eb 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -219,54 +219,6 @@ do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \ } while (0) - -/* - * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out - * extra segments containing the gate DSO contents. Dumping its - * contents makes post-mortem fully interpretable later without matching up - * the same kernel and hardware config to see what PC values meant. - * Dumping its extra ELF program headers includes all the other information - * a debugger needs to easily find how the gate DSO was being used. - */ -#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum) -#define ELF_CORE_WRITE_EXTRA_PHDRS \ -do { \ - const struct elf_phdr *const gate_phdrs = \ - (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \ - int i; \ - Elf64_Off ofs = 0; \ - for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \ - struct elf_phdr phdr = gate_phdrs[i]; \ - if (phdr.p_type == PT_LOAD) { \ - phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ - phdr.p_filesz = phdr.p_memsz; \ - if (ofs == 0) { \ - ofs = phdr.p_offset = offset; \ - offset += phdr.p_filesz; \ - } \ - else \ - phdr.p_offset = ofs; \ - } \ - else \ - phdr.p_offset += ofs; \ - phdr.p_paddr = 0; /* match other core phdrs */ \ - DUMP_WRITE(&phdr, sizeof(phdr)); \ - } \ -} while (0) -#define ELF_CORE_WRITE_EXTRA_DATA \ -do { \ - const struct elf_phdr *const gate_phdrs = \ - (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \ - int i; \ - for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \ - if (gate_phdrs[i].p_type == PT_LOAD) { \ - DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \ - PAGE_ALIGN(gate_phdrs[i].p_memsz)); \ - break; \ - } \ - } \ -} while (0) - /* * format for entries in the Global Offset Table */ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 4138282..db10b1e 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -45,6 +45,8 @@ endif obj-$(CONFIG_DMAR) += pci-dma.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o +obj-$(CONFIG_BINFMT_ELF) += elfcore.o + # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c new file mode 100644 index 0000000..57a2298 --- /dev/null +++ b/arch/ia64/kernel/elfcore.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include + +#include + + +Elf64_Half elf_core_extra_phdrs(void) +{ + return GATE_EHDR->e_phnum; +} + +int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, + unsigned long limit) +{ + const struct elf_phdr *const gate_phdrs = + (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); + int i; + Elf64_Off ofs = 0; + + for (i = 0; i < GATE_EHDR->e_phnum; ++i) { + struct elf_phdr phdr = gate_phdrs[i]; + + if (phdr.p_type == PT_LOAD) { + phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); + phdr.p_filesz = phdr.p_memsz; + if (ofs == 0) { + ofs = phdr.p_offset = offset; + offset += phdr.p_filesz; + } else { + phdr.p_offset = ofs; + } + } else { + phdr.p_offset += ofs; + } + phdr.p_paddr = 0; /* match other core phdrs */ + *size += sizeof(phdr); + if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) + return 0; + } + return 1; +} + +int elf_core_write_extra_data(struct file *file, size_t *size, + unsigned long limit) +{ + const struct elf_phdr *const gate_phdrs = + (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); + int i; + + for (i = 0; i < GATE_EHDR->e_phnum; ++i) { + if (gate_phdrs[i].p_type == PT_LOAD) { + void *addr = (void *)gate_phdrs[i].p_vaddr; + size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); + + *size += memsz; + if (*size > limit || !dump_write(file, addr, memsz)) + return 0; + break; + } + } + return 1; +} -- cgit v1.1 From 8d9032bbe4671dc481261ccd4e161cd96e54b118 Mon Sep 17 00:00:00 2001 From: Daisuke HATAYAMA Date: Fri, 5 Mar 2010 13:44:10 -0800 Subject: elf coredump: add extended numbering support The current ELF dumper implementation can produce broken corefiles if program headers exceed 65535. This number is determined by the number of vmas which the process have. In particular, some extreme programs may use more than 65535 vmas. (If you google max_map_count, you can find some users facing this problem.) This kind of program never be able to generate correct coredumps. This patch implements ``extended numbering'' that uses sh_info field of the first section header instead of e_phnum field in order to represent upto 4294967295 vmas. This is supported by AMD64-ABI(http://www.x86-64.org/documentation.html) and Solaris(http://docs.sun.com/app/docs/doc/817-1984/). Of course, we are preparing patches for gdb and binutils. Signed-off-by: Daisuke HATAYAMA Cc: "Luck, Tony" Cc: Jeff Dike Cc: David Howells Cc: Greg Ungerer Cc: Roland McGrath Cc: Oleg Nesterov Cc: Ingo Molnar Cc: Alexander Viro Cc: Andi Kleen Cc: Alan Cox Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/elfcore.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c index 57a2298..bac1639 100644 --- a/arch/ia64/kernel/elfcore.c +++ b/arch/ia64/kernel/elfcore.c @@ -62,3 +62,19 @@ int elf_core_write_extra_data(struct file *file, size_t *size, } return 1; } + +size_t elf_core_extra_data_size(void) +{ + const struct elf_phdr *const gate_phdrs = + (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); + int i; + size_t size = 0; + + for (i = 0; i < GATE_EHDR->e_phnum; ++i) { + if (gate_phdrs[i].p_type == PT_LOAD) { + size += PAGE_ALIGN(gate_phdrs[i].p_memsz); + break; + } + } + return size; +} -- cgit v1.1 From 52cf25d0ab7f78eeecc59ac652ed5090f69b619e Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Tue, 19 Jan 2010 02:58:23 +0100 Subject: Driver core: Constify struct sysfs_ops in struct kobj_type Constify struct sysfs_ops. This is part of the ops structure constification effort started by Arjan van de Ven et al. Benefits of this constification: * prevents modification of data that is shared (referenced) by many other structure instances at runtime * detects/prevents accidental (but not intentional) modification attempts on archs that enforce read-only kernel data at runtime * potentially better optimized code as the compiler can assume that the const data cannot be changed * the compiler/linker move const data into .rodata and therefore exclude them from false sharing Signed-off-by: Emese Revfy Acked-by: David Teigland Acked-by: Matt Domsch Acked-by: Maciej Sosnowski Acked-by: Hans J. Koch Acked-by: Pekka Enberg Acked-by: Jens Axboe Acked-by: Stephen Hemminger Signed-off-by: Greg Kroah-Hartman --- arch/ia64/kernel/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 8f06035..b3a5818 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * return ret; } -static struct sysfs_ops cache_sysfs_ops = { +static const struct sysfs_ops cache_sysfs_ops = { .show = cache_show }; -- cgit v1.1 From e28cbf22933d0c0ccaf3c4c27a1a263b41f73859 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Mar 2010 15:21:19 -0800 Subject: improve sys_newuname() for compat architectures On an architecture that supports 32-bit compat we need to override the reported machine in uname with the 32-bit value. Instead of doing this separately in every architecture introduce a COMPAT_UTS_MACHINE define in and apply it directly in sys_newuname(). Signed-off-by: Christoph Hellwig Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Paul Mundt Cc: Jeff Dike Cc: Hirokazu Takata Cc: Thomas Gleixner Cc: Ingo Molnar Cc: H. Peter Anvin Cc: Al Viro Cc: Arnd Bergmann Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: "Luck, Tony" Cc: James Morris Cc: Andreas Schwab Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/compat.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h index dfcf75b..f90edc8 100644 --- a/arch/ia64/include/asm/compat.h +++ b/arch/ia64/include/asm/compat.h @@ -5,7 +5,8 @@ */ #include -#define COMPAT_USER_HZ 100 +#define COMPAT_USER_HZ 100 +#define COMPAT_UTS_MACHINE "i686\0\0\0" typedef u32 compat_size_t; typedef s32 compat_ssize_t; -- cgit v1.1 From dacbe41f776db0a5a9aee1e41594f405c95778a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 10 Mar 2010 15:22:46 -0800 Subject: ptrace: move user_enable_single_step & co prototypes to linux/ptrace.h While in theory user_enable_single_step/user_disable_single_step/ user_enable_blockstep could also be provided as an inline or macro there's no good reason to do so, and having the prototype in one places keeps code size and confusion down. Roland said: The original thought there was that user_enable_single_step() et al might well be only an instruction or three on a sane machine (as if we have any of those!), and since there is only one call site inlining would be beneficial. But I agree that there is no strong reason to care about inlining it. As to the arch changes, there is only one thought I'd add to the record. It was always my thinking that for an arch where PTRACE_SINGLESTEP does text-modifying breakpoint insertion, user_enable_single_step() should not be provided. That is, arch_has_single_step()=>true means that there is an arch facility with "pure" semantics that does not have any unexpected side effects. Inserting a breakpoint might do very unexpected strange things in multi-threaded situations. Aside from that, it is a peculiar side effect that user_{enable,disable}_single_step() should cause COW de-sharing of text pages and so forth. For PTRACE_SINGLESTEP, all these peculiarities are the status quo ante for that arch, so having arch_ptrace() itself do those is one thing. But for building other things in the future, it is nicer to have a uniform "pure" semantics that arch-independent code can expect. OTOH, all such arch issues are really up to the arch maintainer. As of today, there is nothing but ptrace using user_enable_single_step() et al so it's a distinction without a practical difference. If/when there are other facilities that use user_enable_single_step() and might care, the affected arch's can revisit the question when someone cares about the quality of the arch support for said new facility. Signed-off-by: Christoph Hellwig Cc: Oleg Nesterov Cc: Roland McGrath Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/ptrace.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 14055c6..7ae9c3f 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -319,11 +319,7 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) ptrace_attach_sync_user_rbs(child) #define arch_has_single_step() (1) - extern void user_enable_single_step(struct task_struct *); - extern void user_disable_single_step(struct task_struct *); - #define arch_has_block_step() (1) - extern void user_enable_block_step(struct task_struct *); #endif /* !__KERNEL__ */ -- cgit v1.1 From 66ed5ef8b44374def8461a0a05d4afc34d4ad684 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 10 Mar 2010 15:23:25 -0800 Subject: pci-dma: ia64: use include/linux/pci-dma.h Signed-off-by: FUJITA Tomonori Cc: Tony Luck Cc: Jesse Barnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/Kconfig | 3 +++ arch/ia64/include/asm/pci.h | 14 +------------- 2 files changed, 4 insertions(+), 13 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9a50d7d..4d4f418 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -53,6 +53,9 @@ config MMU bool default y +config NEED_DMA_MAP_STATE + def_bool y + config SWIOTLB bool diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 55281aa..4adf227 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -56,19 +56,7 @@ pcibios_penalize_isa_irq (int irq, int active) #include -/* pci_unmap_{single,page} is not a nop, thus... */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) +#include #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, -- cgit v1.1 From f41b177157718abe9a93868bb76e47d4a6f3681d Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 10 Mar 2010 15:23:30 -0800 Subject: pci-dma: add linux/pci-dma.h to linux/pci.h All the architectures properly set NEED_DMA_MAP_STATE now so we can safely add linux/pci-dma.h to linux/pci.h and remove the linux/pci-dma.h inclusion in arch's asm/pci.h Signed-off-by: FUJITA Tomonori Acked-by: Arnd Bergmann Cc: Jesse Barnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/pci.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 4adf227..73b5f78 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -56,8 +56,6 @@ pcibios_penalize_isa_irq (int irq, int active) #include -#include - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, -- cgit v1.1 From 6ffdc5774a9ef80e58db398a8307d5b2db2644ce Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 3 Mar 2010 19:57:22 -0800 Subject: ia64: Convert ia64 to use read/update_persistent_clock This patch converts the ia64 architecture to use the generic read_persistent_clock and update_persistent_clock interfaces, reducing the amount of arch specific code we have to maintain, and allowing for further cleanups in the future. I have not built or tested this patch, so help from arch maintainers would be appreciated. Signed-off-by: John Stultz Cc: Tony Luck Cc: Fenghua Yu Cc: Andrew Morton LKML-Reference: <1267675049-12337-8-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- arch/ia64/kernel/time.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 47a1927..653b3c4 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -430,18 +430,16 @@ static int __init rtc_init(void) } module_init(rtc_init); +void read_persistent_clock(struct timespec *ts) +{ + efi_gettimeofday(ts); +} + void __init time_init (void) { register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); - efi_gettimeofday(&xtime); ia64_init_itm(); - - /* - * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the - * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC). - */ - set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } /* -- cgit v1.1 From 68ca406930d6380b3be7ada5f15fcf85bfcbd552 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 19 Feb 2010 00:09:22 -0500 Subject: ACPI: delete the "acpi=ht" boot option acpi=ht was important in 2003 -- before ACPI was universally deployed and enabled by default in the major Linux distributions. At that time, there were a fair number of people who or chose to, or needed to, run with acpi=off, yet also wanted access to Hyper-threading. Today we find that many invocations of "acpi=ht" are accidental, and thus is it possible that it is doing more harm than good. In 2.6.34, we warn on invocation of acpi=ht. In 2.6.35, we delete the boot option. Signed-off-by: Len Brown --- arch/ia64/include/asm/acpi.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 21adbd7..837dc82 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ -#define acpi_ht 0 /* no HT-only mode on IA64 */ #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } -- cgit v1.1 From d8191fa4a33fdc817277da4f2b7f771ff605a41c Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Mon, 22 Feb 2010 12:11:39 -0700 Subject: ACPI: processor: driver doesn't need to evaluate _PDC Now that the early _PDC evaluation path knows how to correctly evaluate _PDC on only physically present processors, there's no need for the processor driver to evaluate it later when it loads. To cover the hotplug case, push _PDC evaluation down into the hotplug paths. Cc: x86@kernel.org Cc: Tony Luck Acked-by: Venkatesh Pallipadi Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/kernel/acpi.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a7ca07f..f1c9f70 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -907,6 +908,8 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) cpu_set(cpu, cpu_present_map); ia64_cpu_to_sapicid[cpu] = physid; + acpi_processor_set_pdc(handle); + *pcpu = cpu; return (0); } -- cgit v1.1 From 5a0e3ad6af8660be21ca98a971cd00f331318c05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Mar 2010 17:04:11 +0900 Subject: include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo Guess-its-ok-by: Christoph Lameter Cc: Ingo Molnar Cc: Lee Schermerhorn --- arch/ia64/include/asm/dmi.h | 1 + arch/ia64/kernel/acpi-ext.c | 1 + arch/ia64/kernel/acpi.c | 1 + arch/ia64/kernel/cpufreq/acpi-cpufreq.c | 1 + arch/ia64/kernel/efi.c | 1 + arch/ia64/kernel/iosapic.c | 1 + arch/ia64/kernel/irq_ia64.c | 1 - arch/ia64/kernel/mca.c | 1 + arch/ia64/kernel/mca_drv.c | 1 + arch/ia64/kernel/pci-swiotlb.c | 1 + arch/ia64/kernel/perfmon.c | 1 + arch/ia64/kernel/process.c | 2 +- arch/ia64/kernel/ptrace.c | 1 - arch/ia64/kernel/topology.c | 1 + arch/ia64/kernel/uncached.c | 2 +- arch/ia64/kvm/kvm-ia64.c | 2 +- arch/ia64/mm/discontig.c | 1 + arch/ia64/mm/hugetlbpage.c | 1 - arch/ia64/mm/tlb.c | 1 + arch/ia64/sn/kernel/bte.c | 1 + arch/ia64/sn/kernel/io_acpi_init.c | 1 + arch/ia64/sn/kernel/io_common.c | 1 + arch/ia64/sn/kernel/io_init.c | 1 + arch/ia64/sn/kernel/irq.c | 1 + arch/ia64/sn/kernel/msi_sn.c | 1 + arch/ia64/sn/pci/pci_dma.c | 1 + arch/ia64/sn/pci/pcibr/pcibr_provider.c | 1 + arch/ia64/sn/pci/tioca_provider.c | 1 + arch/ia64/sn/pci/tioce_provider.c | 1 + arch/ia64/xen/grant-table.c | 1 + 30 files changed, 27 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h index 00eb1b1..1ed4c8f 100644 --- a/arch/ia64/include/asm/dmi.h +++ b/arch/ia64/include/asm/dmi.h @@ -1,6 +1,7 @@ #ifndef _ASM_DMI_H #define _ASM_DMI_H 1 +#include #include /* Use normal IO mappings for DMI */ diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index b7515bc..8b9318d 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c @@ -10,6 +10,7 @@ #include #include +#include #include #include diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index f1c9f70..4d1a7e9 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 7b43545..b0b4e6e 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c745d0a..a0f0019 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 95ac77a..7ded766 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index d4093a1..6404793 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include /* for rand_initialize_irq() */ #include diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 378b483..a0220dc 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -85,6 +85,7 @@ #include #include #include +#include #include #include diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index f94aaa8..09b4d68 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 53292ab..3095654 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -1,6 +1,7 @@ /* Glue code to lib/swiotlb.c */ #include +#include #include #include #include diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 703062c..ab985f7 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index d92765c..53f1648 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -15,11 +15,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index b61afbb..0dec7f7 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -11,7 +11,6 @@ */ #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index b3a5818..28f299d 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a595823..c4696d2 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -18,9 +18,9 @@ #include #include #include -#include #include #include +#include #include #include #include diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 26e0e08..73c5c2b 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -23,8 +23,8 @@ #include #include #include -#include #include +#include #include #include #include diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 8d586d1..6162032 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b0f6157..1841ee7 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index f3de9d7..5dfd916 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index c6d6b62..cad775a 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c @@ -19,6 +19,7 @@ #include #include #include +#include #include diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index 66f633b..8cdcb17 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c @@ -13,6 +13,7 @@ #include #include "xtalk/hubdev.h" #include +#include /* diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 308e659..4433dd0 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index ee774c3..98079f2 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -6,6 +6,7 @@ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. */ +#include #include #include #include diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 40d6eed..13c15d9 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index fbbfb97..ebfdd6a 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 98b6849..a9d310d 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -9,6 +9,7 @@ * a description of how these routines should be used. */ +#include #include #include #include diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index d13e5a2..3cb5cf3 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index efb4545..4d4536e 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 012f3b8..27faba0 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c index 777dd9a..48cca37 100644 --- a/arch/ia64/xen/grant-table.c +++ b/arch/ia64/xen/grant-table.c @@ -22,6 +22,7 @@ #include #include +#include #include #include -- cgit v1.1 From 57283776b2b821ba4d592f61cad04d0293412740 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 11 Mar 2010 12:20:11 -0700 Subject: ACPI: pci_root: pass acpi_pci_root to arch-specific scan The acpi_pci_root structure contains all the individual items (acpi_device, domain, bus number) we pass to pci_acpi_scan_root(), so just pass the single acpi_pci_root pointer directly. This will make it easier to add _CBA support later. For _CBA, we need the entire downstream bus range, not just the base bus number. We have that in the acpi_pci_root structure, so passing the pointer makes it available to the arch-specific code. Signed-off-by: Bjorn Helgaas Reviewed-by: Kenji Kaneshige Signed-off-by: Len Brown --- arch/ia64/pci/pci.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 64aff52..aa2533a 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) } struct pci_bus * __devinit -pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) +pci_acpi_scan_root(struct acpi_pci_root *root) { + struct acpi_device *device = root->device; + int domain = root->segment; + int bus = root->secondary.start; struct pci_controller *controller; unsigned int windows = 0; struct pci_bus *pbus; -- cgit v1.1 From 1527bc8b928dd1399c3d3467dd47d9ede210978a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 1 Feb 2010 15:03:07 +0100 Subject: bitops: Optimize hweight() by making use of compile-time evaluation Rename the extisting runtime hweight() implementations to __arch_hweight(), rename the compile-time versions to __const_hweight() and then have hweight() pick between them. Suggested-by: H. Peter Anvin Signed-off-by: Peter Zijlstra LKML-Reference: <20100318111929.GB11152@aftab> Acked-by: H. Peter Anvin LKML-Reference: <1265028224.24455.154.camel@laptop> Signed-off-by: H. Peter Anvin --- arch/ia64/include/asm/bitops.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 6ebc229..9da3df6 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -437,17 +437,18 @@ __fls (unsigned long x) * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ -static __inline__ unsigned long -hweight64 (unsigned long x) +static __inline__ unsigned long __arch_hweight64(unsigned long x) { unsigned long result; result = ia64_popcnt(x); return result; } -#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) -#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) -#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) +#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful)) +#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful)) +#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful)) + +#include #endif /* __KERNEL__ */ -- cgit v1.1 From 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Mon, 12 Apr 2010 19:35:35 +0900 Subject: KVM: fix the handling of dirty bitmaps to avoid overflows Int is not long enough to store the size of a dirty bitmap. This patch fixes this problem with the introduction of a wrapper function to calculate the sizes of dirty bitmaps. Note: in mark_page_dirty(), we have to consider the fact that __set_bit() takes the offset as int, not long. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 73c5c2b..7f3c0a2 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, { struct kvm_memory_slot *memslot; int r, i; - long n, base; + long base; + unsigned long n; unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); @@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, if (!memslot->dirty_bitmap) goto out; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); base = memslot->base_gfn / BITS_PER_LONG; for (i = 0; i < n/sizeof(long); ++i) { @@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; - int n; + unsigned long n; struct kvm_memory_slot *memslot; int is_dirty = 0; @@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, if (is_dirty) { kvm_flush_remote_tlbs(kvm); memslot = &kvm->memslots->memslots[log->slot]; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; -- cgit v1.1 From 2c2df8418ac7908eec4558407b83f16739006c54 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 30 Mar 2010 01:07:02 -0700 Subject: x86, acpi/irq: Introduce apci_isa_irq_to_gsi There are a number of cases where the current code makes the assumption that isa irqs identity map to the first 16 acpi global system intereupts. In most instances that assumption is correct as that is the required behaviour in dual i8259 mode and the default behavior in ioapic mode. However there are some systems out there that take advantage of acpis interrupt remapping for the isa irqs to have a completely different mapping of isa_irq to gsi. Introduce acpi_isa_irq_to_gsi to perform this mapping explicitly in the code that needs it. Initially this will be just the current assumed identity mapping to ensure it's introduction does not cause regressions. Signed-off-by: Eric W. Biederman LKML-Reference: <1269936436-7039-1-git-send-email-ebiederm@xmission.com> Signed-off-by: H. Peter Anvin --- arch/ia64/kernel/acpi.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 4d1a7e9..c6c90f3 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -785,6 +785,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) return 0; } +int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) +{ + if (isa_irq >= 16) + return -1; + *gsi = isa_irq; + return 0; +} + /* * ACPI based hotplug CPU support */ -- cgit v1.1 From 308eb7add8adaca8088c28a3f7610069b70d1ad6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 May 2010 12:10:39 +0200 Subject: ia64: add sparse annotation to __ia64_per_cpu_var() ia64 has its own optimized percpu accessor - __ia64_per_cpu_var(). Add percpu sparse annotations to it. Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter --- arch/ia64/include/asm/percpu.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index f7c00a5..1bd4082 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -39,7 +39,10 @@ extern void *per_cpu_init(void); * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly * more efficient. */ -#define __ia64_per_cpu_var(var) var +#define __ia64_per_cpu_var(var) (*({ \ + __verify_pcpu_ptr(&(var)); \ + ((typeof(var) __kernel __force *)&(var)); \ +})) #include -- cgit v1.1 From 0ddc9324b1a842afd77e8e86698b1d1d2ffed022 Mon Sep 17 00:00:00 2001 From: Andreas Dilger Date: Fri, 14 May 2010 11:13:27 +0200 Subject: add descriptive comment for TIF_MEMDIE task flag declaration. Signed-off-by: Andreas Dilger Acked-by: KOSAKI Motohiro Signed-off-by: Jiri Kosina --- arch/ia64/include/asm/thread_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 8ce2e38..b6a5ba2 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -102,7 +102,7 @@ struct thread_info { #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_NOTIFY_RESUME 6 /* resumption notification requested */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_MEMDIE 17 +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ #define TIF_FREEZE 20 /* is freezing for suspend */ -- cgit v1.1 From 1114b68468fa2359e15c75f415793b9dd0e5d837 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 27 Apr 2010 16:24:22 +0200 Subject: sn_hwperf: Kill BKL usage This driver always gave up the BKL in its ioctl function, so just convert it to unlocked_ioctl and remove the BKL here. Signed-off-by: Arnd Bergmann Signed-off-by: Frederic Weisbecker --- arch/ia64/sn/kernel/sn2/sn_hwperf.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 55ac3c4..d68fe0f 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -682,8 +681,7 @@ static int sn_hwperf_map_err(int hwperf_err) /* * ioctl for "sn_hwperf" misc device */ -static int -sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) +static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg) { struct sn_hwperf_ioctl_args a; struct cpuinfo_ia64 *cdata; @@ -699,8 +697,6 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) int i; int j; - unlock_kernel(); - /* only user requests are allowed here */ if ((op & SN_HWPERF_OP_MASK) < 10) { r = -EINVAL; @@ -859,12 +855,11 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) error: vfree(p); - lock_kernel(); return r; } static const struct file_operations sn_hwperf_fops = { - .ioctl = sn_hwperf_ioctl, + .unlocked_ioctl = sn_hwperf_ioctl, }; static struct miscdevice sn_hwperf_dev = { -- cgit v1.1 From 5c0d0920a270b9f2aa20c1cecb162703da32e766 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 12 Mar 2010 08:45:39 +0800 Subject: KVM: ia64: fix the error code of ioctl KVM_IA64_VCPU_GET_STACK failure The ioctl KVM_IA64_VCPU_GET_STACK does not set the error code if copy_to_user() fail, and 0 will be return, we should use -EFAULT instead of 0 in this case, so this patch fixed it. Signed-off-by: Wei Yongjun Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 7f3c0a2..38d5130 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1535,8 +1535,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, goto out; if (copy_to_user(user_stack, stack, - sizeof(struct kvm_ia64_vcpu_stack))) + sizeof(struct kvm_ia64_vcpu_stack))) { + r = -EFAULT; goto out; + } break; } -- cgit v1.1 From 600f1ec3761671307935a583c46f17fff0fa9b72 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 12 Mar 2010 10:11:15 +0800 Subject: KVM: ia64: fix the error of ioctl KVM_IRQ_LINE if no irq chip If no irq chip in kernel, ioctl KVM_IRQ_LINE will return -EFAULT. But I see in other place such as KVM_[GET|SET]IRQCHIP, -ENXIO is return. So this patch used -ENXIO instead of -EFAULT. Signed-off-by: Wei Yongjun Signed-off-by: Marcelo Tosatti --- arch/ia64/kvm/kvm-ia64.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 38d5130..d7bac1f 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -979,11 +979,13 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; + r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { + r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) -- cgit v1.1 From 90d83dc3d49f5101addae962ccc1b4aff66b68d8 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 19 Apr 2010 17:41:23 +0800 Subject: KVM: use the correct RCU API for PROVE_RCU=y The RCU/SRCU API have already changed for proving RCU usage. I got the following dmesg when PROVE_RCU=y because we used incorrect API. This patch coverts rcu_deference() to srcu_dereference() or family API. =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- arch/x86/kvm/mmu.c:3020 invoked rcu_dereference_check() without protection! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 2 locks held by qemu-system-x86/8550: #0: (&kvm->slots_lock){+.+.+.}, at: [] kvm_set_memory_region+0x29/0x50 [kvm] #1: (&(&kvm->mmu_lock)->rlock){+.+...}, at: [] kvm_arch_commit_memory_region+0xa6/0xe2 [kvm] stack backtrace: Pid: 8550, comm: qemu-system-x86 Not tainted 2.6.34-rc4-tip-01028-g939eab1 #27 Call Trace: [] lockdep_rcu_dereference+0xaa/0xb3 [] kvm_mmu_calculate_mmu_pages+0x44/0x7d [kvm] [] kvm_arch_commit_memory_region+0xb7/0xe2 [kvm] [] __kvm_set_memory_region+0x636/0x6e2 [kvm] [] kvm_set_memory_region+0x37/0x50 [kvm] [] vmx_set_tss_addr+0x46/0x5a [kvm_intel] [] kvm_arch_vm_ioctl+0x17a/0xcf8 [kvm] [] ? unlock_page+0x27/0x2c [] ? __do_fault+0x3a9/0x3e1 [] kvm_vm_ioctl+0x364/0x38d [kvm] [] ? up_read+0x23/0x3d [] vfs_ioctl+0x32/0xa6 [] do_vfs_ioctl+0x495/0x4db [] ? fget_light+0xc2/0x241 [] ? do_sys_open+0x104/0x116 [] ? retint_swapgs+0xe/0x13 [] sys_ioctl+0x47/0x6a [] system_call_fastpath+0x16/0x1b Signed-off-by: Lai Jiangshan Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d7bac1f..d5f4e91 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1381,7 +1381,7 @@ static void kvm_release_vm_pages(struct kvm *kvm) int i, j; unsigned long base_gfn; - slots = rcu_dereference(kvm->memslots); + slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { memslot = &slots->memslots[i]; base_gfn = memslot->base_gfn; -- cgit v1.1 From f3d46f9d3194e0329216002a8724d4c0957abc79 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 17 May 2010 14:33:53 +1000 Subject: atomic_t: Cast to volatile when accessing atomic variables In preparation for removing volatile from the atomic_t definition, this patch adds a volatile cast to all the atomic read functions. Signed-off-by: Anton Blanchard Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/atomic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 88405cb..4e19484 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,8 +21,8 @@ #define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) -#define atomic_read(v) ((v)->counter) -#define atomic64_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) -- cgit v1.1 From 82b22c887e02fc6ebeebc8ec43fb1d348e2a6a58 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 6 May 2010 19:36:06 +0200 Subject: [IA64] removing redundant ifdef Pointless to use #ifdef CONFIG_NUMA in code that is already inside another #ifdef CONFIG_NUMA. Signed-off-by: Jiri Olsa Signed-off-by: Tony Luck --- arch/ia64/include/asm/mmzone.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h index f2ca320..e0de617 100644 --- a/arch/ia64/include/asm/mmzone.h +++ b/arch/ia64/include/asm/mmzone.h @@ -19,16 +19,12 @@ static inline int pfn_to_nid(unsigned long pfn) { -#ifdef CONFIG_NUMA extern int paddr_to_nid(unsigned long); int nid = paddr_to_nid(pfn << PAGE_SHIFT); if (nid < 0) return 0; else return nid; -#else - return 0; -#endif } #ifdef CONFIG_IA64_DIG /* DIG systems are small */ -- cgit v1.1 From 80aa9bf0d8083554967400d48dbec078aa8f128e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 5 Apr 2010 12:05:31 -0700 Subject: [IA64] arch/ia64/hp/common/sba_iommu.c: Rename dev_info to adi There is a macro called dev_info that prints struct device specific information. Having variables with the same name can be confusing and prevents conversion of the macro to a function. Rename the existing dev_info variables to something else in preparation to converting the dev_info macro to a function. Signed-off-by: Joe Perches Signed-off-by: Tony Luck --- arch/ia64/hp/common/sba_iommu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index e14c492..4ce8d13 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device) struct ioc *ioc; acpi_status status; u64 hpa, length; - struct acpi_device_info *dev_info; + struct acpi_device_info *adi; status = hp_acpi_csr_space(device->handle, &hpa, &length); if (ACPI_FAILURE(status)) return 1; - status = acpi_get_object_info(device->handle, &dev_info); + status = acpi_get_object_info(device->handle, &adi); if (ACPI_FAILURE(status)) return 1; @@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device) * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ - if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) { + if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) { hpa += ZX1_IOC_OFFSET; /* zx1 based systems default to kernel page size iommu pages */ if (!iovp_shift) iovp_shift = min(PAGE_SHIFT, 16); } - kfree(dev_info); + kfree(adi); /* * default anything not caught above or specified on cmdline to 4k -- cgit v1.1 From 93f7c93bee6382e01c34ea0c34ff8fb98c648734 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 26 Mar 2010 23:02:47 +0100 Subject: [IA64] Use set_cpus_allowed_ptr Use set_cpus_allowed_ptr rather than set_cpus_allowed. Signed-off-by: Julia Lawall Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/sn2/sn_hwperf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 55ac3c4..f6c1c5f 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -629,9 +629,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) else { /* migrate the task before calling SAL */ save_allowed = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); sn_hwperf_call_sal(op_info); - set_cpus_allowed(current, save_allowed); + set_cpus_allowed_ptr(current, &save_allowed); } } r = op_info->ret; -- cgit v1.1 From 552dce3a071f0de2a84023fbba7f3b4ac36602cd Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 26 Mar 2010 23:02:23 +0100 Subject: [IA64] Use set_cpus_allowed_ptr Use set_cpus_allowed_ptr rather than set_cpus_allowed. Signed-off-by: Julia Lawall Signed-off-by: Tony Luck --- arch/ia64/kernel/cpufreq/acpi-cpufreq.c | 10 +++++----- arch/ia64/kernel/salinfo.c | 5 ++--- arch/ia64/kernel/topology.c | 4 ++-- 3 files changed, 9 insertions(+), 10 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index b0b4e6e..22f6152 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -113,7 +113,7 @@ processor_get_freq ( dprintk("processor_get_freq\n"); saved_mask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) goto migrate_end; @@ -121,7 +121,7 @@ processor_get_freq ( ret = processor_get_pstate(&value); if (ret) { - set_cpus_allowed(current, saved_mask); + set_cpus_allowed_ptr(current, &saved_mask); printk(KERN_WARNING "get performance failed with error %d\n", ret); ret = 0; @@ -131,7 +131,7 @@ processor_get_freq ( ret = (clock_freq*1000); migrate_end: - set_cpus_allowed(current, saved_mask); + set_cpus_allowed_ptr(current, &saved_mask); return ret; } @@ -151,7 +151,7 @@ processor_set_freq ( dprintk("processor_set_freq\n"); saved_mask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (smp_processor_id() != cpu) { retval = -EAGAIN; goto migrate_end; @@ -208,7 +208,7 @@ processor_set_freq ( retval = 0; migrate_end: - set_cpus_allowed(current, saved_mask); + set_cpus_allowed_ptr(current, &saved_mask); return (retval); } diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index e6676fc..aa8b5fa 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -404,10 +404,9 @@ static void call_on_cpu(int cpu, void (*fn)(void *), void *arg) { cpumask_t save_cpus_allowed = current->cpus_allowed; - cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu); - set_cpus_allowed(current, new_cpus_allowed); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); (*fn)(arg); - set_cpus_allowed(current, save_cpus_allowed); + set_cpus_allowed_ptr(current, &save_cpus_allowed); } static void diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 28f299d..0baa1bb 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) return 0; oldmask = current->cpus_allowed; - retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); + retval = set_cpus_allowed_ptr(current, cpumask_of(cpu)); if (unlikely(retval)) return retval; retval = cpu_cache_sysfs_init(cpu); - set_cpus_allowed(current, oldmask); + set_cpus_allowed_ptr(current, &oldmask); if (unlikely(retval < 0)) return retval; -- cgit v1.1 From 7683a3f9748f7adfbe47e33002a4f710ab557293 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 28 Feb 2010 19:58:14 +0900 Subject: [IA64] use __ratelimit Replace open-coded rate limiting logic with __ratelimit(). Signed-off-by: Akinobu Mita Signed-off-by: Tony Luck --- arch/ia64/kernel/irq_ia64.c | 9 +++------ arch/ia64/kernel/unaligned.c | 24 +++++------------------- 2 files changed, 8 insertions(+), 25 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 6404793..f14c35f 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { - static unsigned char count; - static long last_time; + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); - if (time_after(jiffies, last_time + 5 * HZ)) - count = 0; - if (++count < 5) { - last_time = jiffies; + if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 776dd40..622772b 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) /* * Make sure we log the unaligned access, so that user/sysadmin can notice it and * eventually fix the program. However, we don't want to do that for every access so we - * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be - * either... + * pace it with jiffies. */ -static int -within_logging_rate_limit (void) -{ - static unsigned long count, last_time; - - if (time_after(jiffies, last_time + 5 * HZ)) - count = 0; - if (count < 5) { - last_time = jiffies; - count++; - return 1; - } - return 0; - -} +static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5); void ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) @@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (!no_unaligned_warning && !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) && - within_logging_rate_limit()) + __ratelimit(&logging_rate_limit)) { char buf[200]; /* comm[] is at most 16 bytes... */ size_t len; @@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) } } } else { - if (within_logging_rate_limit()) { + if (__ratelimit(&logging_rate_limit)) { printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n", ifa, regs->cr_iip + ipsr->ri); if (unaligned_dump_stack) -- cgit v1.1 From 0c3b96e44739cdba7a75030107a2b47a15ee5f60 Mon Sep 17 00:00:00 2001 From: "npiggin@suse.de" Date: Fri, 7 May 2010 14:34:33 -0700 Subject: [IA64] invoke oom-killer from page fault As explained in commit 1c0fe6e3bd, we want to call the architecture independent oom killer when getting an unexplained OOM from handle_mm_fault, rather than simply killing current. Acked-by: David Rientjes Signed-off-by: Nick Piggin Signed-off-by: Tony Luck --- arch/ia64/mm/fault.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 19261a9..0799fea 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if ((vma->vm_flags & mask) != mask) goto bad_area; - survive: /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the @@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re out_of_memory: up_read(&mm->mmap_sem); - if (is_global_init(current)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk(KERN_CRIT "VM: killing process %s\n", current->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); } -- cgit v1.1 From 2a2ae2426bba944ce6dbcad35e1580df57aafcf1 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 12 May 2010 09:30:27 -0700 Subject: [IA64] Drop duplicated "config IOMMU_HELPER" One entry for this in arch/ia64/Kconfig should be enough. Reported-by: Christoph Egger Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4d4f418..9676100 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -59,9 +59,6 @@ config NEED_DMA_MAP_STATE config SWIOTLB bool -config IOMMU_HELPER - bool - config GENERIC_LOCKBREAK def_bool n -- cgit v1.1 From 0ee75bead83da4791e5cbf659806c54d8ee40f12 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 28 Apr 2010 15:39:01 +0300 Subject: KVM: Let vcpu structure alignment be determined at runtime vmx and svm vcpus have different contents and therefore may have different alignmment requirements. Let each specify its required alignment. Signed-off-by: Avi Kivity --- arch/ia64/kvm/vmm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index 7a62f75..f0b9cac 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c @@ -51,7 +51,7 @@ static int __init kvm_vmm_init(void) vmm_fpswa_interface = fpswa_interface; /*Register vmm data to kvm side*/ - return kvm_init(&vmm_info, 1024, THIS_MODULE); + return kvm_init(&vmm_info, 1024, 0, THIS_MODULE); } static void __exit kvm_vmm_exit(void) -- cgit v1.1 From ffdf91856c812646aa276711fa7e3ad80b00764a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 26 May 2010 14:43:14 -0700 Subject: ia64: ptrace_attach_sync_user_rbs: avoid "task->signal != NULL" checks Preparation to make task->signal immutable, no functional changes. It doesn't matter which pointer we check under tasklist to ensure the task was not released, ->signal or ->sighand. But we are going to make ->signal refcountable, change the code to use ->sighand. Note: this code doesn't need this check and tasklist_lock at all, it should be converted to use lock_task_sighand(). And, the code under SIGNAL_STOP_STOPPED check looks wrong. Signed-off-by: Oleg Nesterov Cc: Fenghua Yu Cc: Roland McGrath Cc: Stanislaw Gruszka Cc: Tony Luck Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/ptrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 0dec7f7..7c7909f 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -638,7 +638,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) */ read_lock(&tasklist_lock); - if (child->signal) { + if (child->sighand) { spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_STOPPED && !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { @@ -662,7 +662,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) * job control stop, so that SIGCONT can be used to wake it up. */ read_lock(&tasklist_lock); - if (child->signal) { + if (child->sighand) { spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_TRACED && (child->signal->flags & SIGNAL_STOP_STOPPED)) { -- cgit v1.1 From 8aee5c89e181fb30076febae1c8eafc0f75493ca Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 26 May 2010 14:44:15 -0700 Subject: ia64: remove unnecessary sync_single_range_* in swiotlb_dma_ops sync_single_range_for_cpu and sync_single_range_for_device hooks in swiotlb_dma_ops are unnecessary because sync_single_for_cpu and sync_single_for_device are used there. Signed-off-by: FUJITA Tomonori Cc: Tony Luck Cc: Fenghua Yu Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/pci-swiotlb.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 3095654..d9485d9 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -31,8 +31,6 @@ struct dma_map_ops swiotlb_dma_ops = { .unmap_sg = swiotlb_unmap_sg_attrs, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .dma_supported = swiotlb_dma_supported, -- cgit v1.1 From 18e98307de0d746cb0845ebf66535ce2184c25a2 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 26 May 2010 14:44:32 -0700 Subject: asm-generic: add NEED_SG_DMA_LENGTH to define sg_dma_len() There are only two ways to define sg_dma_len(); use sg->dma_length or sg->length. This patch introduces NEED_SG_DMA_LENGTH that enables architectures to choose sg->dma_length or sg->length. Signed-off-by: FUJITA Tomonori Cc: Arnd Bergmann Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Russell King Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9676100..f8afdd2 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -56,6 +56,9 @@ config MMU config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config SWIOTLB bool -- cgit v1.1 From 1ef04370d823a811d2cca9f237097559a6b99b12 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 26 May 2010 14:44:34 -0700 Subject: asm-generic: remove ARCH_HAS_SG_CHAIN in scatterlist.h There are more architectures that don't support ARCH_HAS_SG_CHAIN than those that support it. This removes removes ARCH_HAS_SG_CHAIN in asm-generic/scatterlist.h and lets arhictectures to define it. It's clearer than defining ARCH_HAS_SG_CHAIN asm-generic/scatterlist.h and undefing it in arhictectures that don't support it. Signed-off-by: FUJITA Tomonori Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/scatterlist.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h index d8e9896..f299a4f 100644 --- a/arch/ia64/include/asm/scatterlist.h +++ b/arch/ia64/include/asm/scatterlist.h @@ -1,6 +1,7 @@ #ifndef _ASM_IA64_SCATTERLIST_H #define _ASM_IA64_SCATTERLIST_H +#include /* * It used to be that ISA_DMA_THRESHOLD had something to do with the * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart @@ -10,7 +11,6 @@ * that's 4GB - 1. */ #define ISA_DMA_THRESHOLD 0xffffffff - -#include +#define ARCH_HAS_SG_CHAIN #endif /* _ASM_IA64_SCATTERLIST_H */ -- cgit v1.1 From 3bccd996276b108c138e8176793a26ecef54d573 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Wed, 26 May 2010 14:44:59 -0700 Subject: numa: ia64: use generic percpu var numa_node_id() implementation ia64: Use generic percpu implementation of numa_node_id() + intialize per cpu 'numa_node' + remove ia64 cpu_to_node() macro; use generic + define CONFIG_USE_PERCPU_NUMA_NODE_ID when NUMA configured Signed-off-by: Lee Schermerhorn Reviewed-by: Christoph Lameter Cc: Tejun Heo Cc: Mel Gorman Cc: Christoph Lameter Cc: Nick Piggin Cc: David Rientjes Cc: Eric Whitney Cc: KAMEZAWA Hiroyuki Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: "Luck, Tony" Cc: Pekka Enberg Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/Kconfig | 4 ++++ arch/ia64/include/asm/topology.h | 5 ----- arch/ia64/kernel/smpboot.c | 6 ++++++ 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index f8afdd2..00ba087 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -498,6 +498,10 @@ config HAVE_ARCH_NODEDATA_EXTENSION def_bool y depends on NUMA +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + config ARCH_PROC_KCORE_TEXT def_bool y depends on PROC_KCORE diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index d323071..09f6467 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -26,11 +26,6 @@ #define RECLAIM_DISTANCE 15 /* - * Returns the number of the node containing CPU 'cpu' - */ -#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu]) - -/* * Returns a bitmask of CPUs on Node 'node'. */ #define cpumask_of_node(node) ((node) == -1 ? \ diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e5230b2..8aae2d9 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -390,6 +390,11 @@ smp_callin (void) fix_b0_for_bsp(); + /* + * numa_node_id() works after this. + */ + set_numa_node(cpu_to_node_map[cpuid]); + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ @@ -632,6 +637,7 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callin_map); + set_numa_node(cpu_to_node_map[smp_processor_id()]); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); } -- cgit v1.1 From fd1197f1131a1f1d8bc192f9cfbbe17e305f17f3 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Wed, 26 May 2010 14:45:01 -0700 Subject: numa: ia64: support numa_mem_id() for memoryless nodes Enable 'HAVE_MEMORYLESS_NODES' by default when NUMA configured on ia64. Initialize percpu 'numa_mem' variable when starting secondary cpus. Generic initialization will handle the boot cpu. Nothing uses 'numa_mem_id()' yet. Subsequent patch with modify slab to use this. Signed-off-by: Lee Schermerhorn Cc: Tejun Heo Cc: Mel Gorman Cc: Christoph Lameter Cc: Nick Piggin Cc: David Rientjes Cc: Eric Whitney Cc: KAMEZAWA Hiroyuki Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: "Luck, Tony" Cc: Pekka Enberg Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/Kconfig | 4 ++++ arch/ia64/kernel/smpboot.c | 1 + 2 files changed, 5 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 00ba087..9561082 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -502,6 +502,10 @@ config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA +config HAVE_MEMORYLESS_NODES + def_bool y + depends on NUMA + config ARCH_PROC_KCORE_TEXT def_bool y depends on PROC_KCORE diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 8aae2d9..518e876 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -394,6 +394,7 @@ smp_callin (void) * numa_node_id() works after this. */ set_numa_node(cpu_to_node_map[cpuid]); + set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); ipi_call_lock_irq(); spin_lock(&vector_lock); -- cgit v1.1 From 4ec37de89d8c758ee8115e0e64b3f994910789ee Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 27 May 2010 15:35:13 -0700 Subject: [IA64] Fix build breakage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit 0ac0c0d0f837c499afd02a802f9cf52d3027fa3b cpusets: randomize node rotor used in cpuset_mem_spread_node() Jack Steiner fixed a problem with too many small tasks being assigned to node 0. Copy his code to ia64 to avoid build error. arch/ia64/kernel/smpboot.c:641: error: ‘cpu_to_node_map’ undeclared (first use in this function) In commit 3bccd996276b108c138e8176793a26ecef54d573 numa: ia64: use generic percpu var numa_node_id() implementation Lee Schermerhorn added some set_numa_node() calls - but these only work on CONFIG_NUMA=y configurations. Surround the calls with #ifdef CONFIG_NUMA Signed-off-by: Tony Luck --- arch/ia64/kernel/smpboot.c | 4 ++++ arch/ia64/mm/numa.c | 17 +++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 518e876..6a1380e 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -390,11 +390,13 @@ smp_callin (void) fix_b0_for_bsp(); +#ifdef CONFIG_NUMA /* * numa_node_id() works after this. */ set_numa_node(cpu_to_node_map[cpuid]); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); +#endif ipi_call_lock_irq(); spin_lock(&vector_lock); @@ -638,7 +640,9 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callin_map); +#ifdef CONFIG_NUMA set_numa_node(cpu_to_node_map[smp_processor_id()]); +#endif per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); } diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 3efea7d..2437718 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -50,6 +51,22 @@ paddr_to_nid(unsigned long paddr) return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } +/* + * Return the bit number of a random bit set in the nodemask. + * (returns -1 if nodemask is empty) + */ +int __node_random(const nodemask_t *maskp) +{ + int w, bit = -1; + + w = nodes_weight(*maskp); + if (w) + bit = bitmap_ord_to_pos(maskp->bits, + get_random_int() % w, MAX_NUMNODES); + return bit; +} +EXPORT_SYMBOL(__node_random); + #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) /* * Because of holes evaluate on section limits. -- cgit v1.1 From b3f2f6cd1ff935ecac9a5346904b899d7af689fe Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 30 May 2010 10:08:03 -0700 Subject: ia64: revert __node_random addition This partially reverts commit 4ec37de89d8c758ee8115e0e64b3f994910789ee ("[IA64] Fix build breakage"), since the commit that made it necessary got reverted earlier (see commit 35926ff5fba8, 'Revert "cpusets: randomize node rotor used in cpuset_mem_spread_node()"') Even if we ever re-introduce this, there is no reason to make __node_random be some architecture-specific function. Signed-off-by: Linus Torvalds --- arch/ia64/mm/numa.c | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 2437718..3efea7d 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -51,22 +50,6 @@ paddr_to_nid(unsigned long paddr) return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } -/* - * Return the bit number of a random bit set in the nodemask. - * (returns -1 if nodemask is empty) - */ -int __node_random(const nodemask_t *maskp) -{ - int w, bit = -1; - - w = nodes_weight(*maskp); - if (w) - bit = bitmap_ord_to_pos(maskp->bits, - get_random_int() % w, MAX_NUMNODES); - return bit; -} -EXPORT_SYMBOL(__node_random); - #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) /* * Because of holes evaluate on section limits. -- cgit v1.1 From 3499f4d0d1159a21245d6071f8af6a71b86a78bc Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 26 May 2010 17:57:05 +0200 Subject: KVM: ia64: Add missing spin_unlock in kvm_arch_hardware_enable() Add a spin_unlock missing on the error path. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression E1; @@ * spin_lock(E1,...); <+... when != E1 if (...) { ... when != E1 * return ...; } ...+> * spin_unlock(E1,...); // Signed-off-by: Julia Lawall Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index d5f4e91..21b70137 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -144,6 +144,7 @@ int kvm_arch_hardware_enable(void *garbage) VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { + spin_unlock(&vp_lock); printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); return -EINVAL; } -- cgit v1.1 From b70f4e85bfc4d7000036355b714a92d5c574f1be Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 30 Jun 2010 10:46:16 -0700 Subject: [IA64] Fix spinaphore down_spin() Typo in down_spin() meant it only read the low 32 bits of the "serve" value, instead of the full 64 bits. This results in the system hanging when the values in ticket/serve get larger than 32-bits. A big enough system running the right test can hit this in a just a few hours. Broken since 883a3acf5b0d4782ac35981227a0d094e8b44850 [IA64] Re-implement spinaphores using ticket lock concepts Reported via IRC by Bjorn Helgaas Signed-off-by: Tony Luck --- arch/ia64/mm/tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 5dfd916..7b3cdc6 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -121,7 +121,7 @@ static inline void down_spin(struct spinaphore *ss) ia64_invala(); for (;;) { - asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); + asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); if (time_before(t, serve)) return; cpu_relax(); -- cgit v1.1