diff options
Diffstat (limited to 'arch/ia64')
29 files changed, 94 insertions, 144 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index e0f5b6d..fcf3b43 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -22,6 +22,10 @@ config IA64 select HAVE_KVM select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG + select HAVE_GENERIC_HARDIRQS + select GENERIC_IRQ_PROBE + select GENERIC_PENDING_IRQ if SMP + select IRQ_PER_CPU default y help The Itanium Processor Family is Intel's 64-bit successor to @@ -678,28 +682,6 @@ source "arch/ia64/kvm/Kconfig" source "lib/Kconfig" -# -# Use the generic interrupt handling code in kernel/irq/: -# -config GENERIC_HARDIRQS - def_bool y - -config GENERIC_HARDIRQS_NO__DO_IRQ - def_bool y - -config GENERIC_IRQ_PROBE - bool - default y - -config GENERIC_PENDING_IRQ - bool - depends on GENERIC_HARDIRQS && SMP - default y - -config IRQ_PER_CPU - bool - default y - config IOMMU_HELPER def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 3ded8fe..1d7bca0 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -233,3 +233,4 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y +CONFIG_MISC_DEVICES=y diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 3a98b2d..b11fa88 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -208,3 +208,4 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_CRYPTO_MD5=y +CONFIG_MISC_DEVICES=y diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 3a078ad..331de72 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) } static int -simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) +simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { unsigned int target_id = sc->device->id; char fname[MAX_ROOT_LEN+16]; @@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(simscsi_queuecommand) + static int simscsi_host_reset (struct scsi_cmnd *sc) { diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 13633da..bff0824 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -390,8 +390,7 @@ static void rs_unthrottle(struct tty_struct * tty) } -static int rs_ioctl(struct tty_struct *tty, struct file * file, - unsigned int cmd, unsigned long arg) +static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index a2e7368..4336d08 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -12,6 +12,8 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK +#define DMA_ERROR_CODE 0 + extern struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index c7f0f06..8428525 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -46,7 +46,7 @@ do { \ } while (0) static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; + unsigned long prev; __asm__ __volatile__( " mf;; \n" " mov ar.ccv=%3;; \n" "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8) + : "=r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); + *uval = prev; return r8; } } diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index cc8335e..e5a6c35 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -426,6 +426,11 @@ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size) extern void iounmap (volatile void __iomem *addr); extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); extern void early_iounmap (volatile void __iomem *addr, unsigned long size); +static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) +{ + return ioremap(phys_addr, size); +} + /* * String version of IO memory access ops: diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 2f229e5..2689ee5 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -590,6 +590,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu); int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); void kvm_sal_emul(struct kvm_vcpu *vcpu); +#define __KVM_HAVE_ARCH_VM_ALLOC 1 +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ASSEMBLY__*/ #endif diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index 41b6d31..961a16f 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -189,6 +189,7 @@ get_order (unsigned long size) # define pgprot_val(x) ((x).pgprot) # define __pte(x) ((pte_t) { (x) } ) +# define __pmd(x) ((pmd_t) { (x) } ) # define __pgprot(x) ((pgprot_t) { (x) } ) #else /* !STRICT_MM_TYPECHECKS */ diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h index 7f3333d..d551183 100644 --- a/arch/ia64/include/asm/perfmon.h +++ b/arch/ia64/include/asm/perfmon.h @@ -7,7 +7,7 @@ #define _ASM_IA64_PERFMON_H /* - * perfmon comamnds supported on all CPU models + * perfmon commands supported on all CPU models */ #define PFM_WRITE_PMCS 0x01 #define PFM_WRITE_PMDS 0x02 diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 348e44d..03afe79 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -717,8 +717,9 @@ prefetchw (const void *x) #define spin_lock_prefetch(x) prefetchw(x) extern unsigned long boot_option_idle_override; -extern unsigned long idle_halt; -extern unsigned long idle_nomwait; + +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, + IDLE_NOMWAIT, IDLE_POLL}; #endif /* !__ASSEMBLY__ */ diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 215d545..3027e75 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -25,20 +25,8 @@ #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." #endif -#include <linux/list.h> -#include <linux/spinlock.h> - #include <asm/intrinsics.h> -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; - spinlock_t wait_lock; - struct list_head wait_list; -}; - #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) #define RWSEM_ACTIVE_BIAS (1L) #define RWSEM_ACTIVE_MASK (0xffffffffL) @@ -46,26 +34,6 @@ struct rw_semaphore { #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -static inline void -init_rwsem (struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} - /* * lock for reading */ @@ -174,9 +142,4 @@ __downgrade_write (struct rw_semaphore *sem) #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - #endif /* _ASM_IA64_RWSEM_H */ diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h index 96fc623..ed28bcd 100644 --- a/arch/ia64/include/asm/xen/hypercall.h +++ b/arch/ia64/include/asm/xen/hypercall.h @@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2, static inline int xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) { - return _hypercall2(int, sched_op_new, cmd, arg); + return _hypercall2(int, sched_op, cmd, arg); } static inline long diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index c6c90f3..90ebceb 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -477,6 +477,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) return; + if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { + printk_once(KERN_WARNING + "node_cpuid[%ld] is too small, may not be able to use all cpus\n", + ARRAY_SIZE(node_cpuid)); + return; + } pxm = get_processor_proximity_domain(pa); /* record this node in proximity bitmap */ diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 9a26015..38c07b8 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -633,7 +633,7 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; - desc->chip = &irq_type_ia64_lsapic; + set_irq_chip(irq, &irq_type_ia64_lsapic); if (action) setup_irq(irq, action); set_irq_handler(irq, handle_percpu_irq); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1753f6a..80d50b8 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -582,6 +582,8 @@ out: /* Get the CPE error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); + local_irq_disable(); + return IRQ_HANDLED; } @@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = __get_free_pages(GFP_KERNEL, get_order(sz)); + data = (void *)__get_free_pages(GFP_KERNEL, + get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 39e534f..89accc6 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -617,11 +617,14 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, return get_unmapped_area(file, addr, len, pgoff, flags); } +/* forward declaration */ +static const struct dentry_operations pfmfs_dentry_operations; static struct dentry * pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC); + return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations, + PFMFS_MAGIC); } static struct file_system_type pfm_fs_type = { @@ -829,10 +832,9 @@ pfm_rvmalloc(unsigned long size) unsigned long addr; size = PAGE_ALIGN(size); - mem = vmalloc(size); + mem = vzalloc(size); if (mem) { //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); - memset(mem, 0, size); addr = (unsigned long)mem; while (size > 0) { pfm_reserve_page(addr); @@ -1542,7 +1544,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt) * any operations on the root directory. However, we need a non-trivial * d_name - pfm: will go nicely and kill the special-casing in procfs. */ -static struct vfsmount *pfmfs_mnt; +static struct vfsmount *pfmfs_mnt __read_mostly; static int __init init_pfm_fs(void) @@ -2185,7 +2187,7 @@ static const struct file_operations pfm_file_ops = { }; static int -pfmfs_delete_dentry(struct dentry *dentry) +pfmfs_delete_dentry(const struct dentry *dentry) { return 1; } @@ -2233,7 +2235,6 @@ pfm_alloc_file(pfm_context_t *ctx) } path.mnt = mntget(pfmfs_mnt); - path.dentry->d_op = &pfmfs_dentry_operations; d_add(path.dentry, inode); file = alloc_file(&path, FMODE_READ, &pfm_file_ops); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 16f1c7b..6d33c5c 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -53,12 +53,8 @@ void (*ia64_mark_idle)(int); -unsigned long boot_option_idle_override = 0; +unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -unsigned long idle_halt; -EXPORT_SYMBOL(idle_halt); -unsigned long idle_nomwait; -EXPORT_SYMBOL(idle_nomwait); void (*pm_idle) (void); EXPORT_SYMBOL(pm_idle); void (*pm_power_off) (void); diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index dabeefe2..be450a3 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -293,6 +293,7 @@ smp_flush_tlb_all (void) void smp_flush_tlb_mm (struct mm_struct *mm) { + cpumask_var_t cpus; preempt_disable(); /* this happens for the common case of a single-threaded fork(): */ if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) @@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm) preempt_enable(); return; } - - smp_call_function_many(mm_cpumask(mm), - (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); + if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { + smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, + mm, 1); + } else { + cpumask_copy(cpus, mm_cpumask(mm)); + smp_call_function_many(cpus, + (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); + free_cpumask_var(cpus); + } local_irq_disable(); local_finish_flush_tlb_mm(mm); local_irq_enable(); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index ed6f22e..156ad80 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -168,7 +168,7 @@ timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; - if (unlikely(cpu_is_offline(smp_processor_id()))) { + if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } @@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id) new_itm += local_cpu_data->itm_delta; - if (smp_processor_id() == time_keeper_id) { - /* - * Here we are in the timer irq handler. We have irqs locally - * disabled, but we don't know if the timer_bh is running on - * another CPU. We need to avoid to SMP race by acquiring the - * xtime_lock. - */ - write_seqlock(&xtime_lock); - do_timer(1); - local_cpu_data->itm_next = new_itm; - write_sequnlock(&xtime_lock); - } else - local_cpu_data->itm_next = new_itm; + if (smp_processor_id() == time_keeper_id) + xtime_update(1); + + local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; @@ -222,7 +213,7 @@ skip_process_time_accounting: * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call - * do_timer() which in turn would let our clock run + * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 5a4d044..787de4a 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -198,7 +198,7 @@ SECTIONS { /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); - PERCPU_VADDR(PERCPU_ADDR, :percpu) + PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) __phys_per_cpu_start = __per_cpu_load; /* * ensure percpu data fits diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f56a631..8213efe 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -662,6 +662,7 @@ again: goto vcpu_run_fail; srcu_read_unlock(&vcpu->kvm->srcu, idx); + vcpu->mode = IN_GUEST_MODE; kvm_guest_enter(); /* @@ -683,6 +684,7 @@ again: */ barrier(); kvm_guest_exit(); + vcpu->mode = OUTSIDE_GUEST_MODE; preempt_enable(); idx = srcu_read_lock(&vcpu->kvm->srcu); @@ -749,7 +751,7 @@ out: return r; } -static struct kvm *kvm_alloc_kvm(void) +struct kvm *kvm_arch_alloc_vm(void) { struct kvm *kvm; @@ -760,7 +762,7 @@ static struct kvm *kvm_alloc_kvm(void) vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); if (!vm_base) - return ERR_PTR(-ENOMEM); + return NULL; memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); kvm = (struct kvm *)(vm_base + @@ -806,10 +808,12 @@ static void kvm_build_io_pmt(struct kvm *kvm) #define GUEST_PHYSICAL_RR4 0x2739 #define VMM_INIT_RR 0x1660 -static void kvm_init_vm(struct kvm *kvm) +int kvm_arch_init_vm(struct kvm *kvm) { BUG_ON(!kvm); + kvm->arch.is_sn2 = ia64_platform_is("sn2"); + kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; kvm->arch.vmm_init_rr = VMM_INIT_RR; @@ -823,21 +827,8 @@ static void kvm_init_vm(struct kvm *kvm) /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); -} - -struct kvm *kvm_arch_create_vm(void) -{ - struct kvm *kvm = kvm_alloc_kvm(); - - if (IS_ERR(kvm)) - return ERR_PTR(-ENOMEM); - - kvm->arch.is_sn2 = ia64_platform_is("sn2"); - - kvm_init_vm(kvm); - - return kvm; + return 0; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, @@ -962,7 +953,9 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; r = kvm_setup_default_irq_routing(kvm); if (r) { + mutex_lock(&kvm->slots_lock); kvm_ioapic_destroy(kvm); + mutex_unlock(&kvm->slots_lock); goto out; } break; @@ -1357,7 +1350,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -EINVAL; } -static void free_kvm(struct kvm *kvm) +void kvm_arch_free_vm(struct kvm *kvm) { unsigned long vm_base = kvm->arch.vm_base; @@ -1399,9 +1392,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) #endif kfree(kvm->arch.vioapic); kvm_release_vm_pages(kvm); - kvm_free_physmem(kvm); - cleanup_srcu_struct(&kvm->srcu); - free_kvm(kvm); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index fb8f9f5..f1e17d3 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c @@ -130,7 +130,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, local_irq_save(psr); - /*Intercept the acces for PIB range*/ + /*Intercept the access for PIB range*/ if (iot == GPFN_PIB) { if (!dir) lsapic_write(vcpu, src_pa, s, *dest); diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 1841ee7..5ca674b 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -38,7 +38,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) - pte = pte_alloc_map(mm, pmd, taddr); + pte = pte_alloc_map(mm, NULL, pmd, taddr); } return pte; } diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index dbc4cbe..77db0b5 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -592,7 +592,7 @@ void __cpuinit sn_cpu_init(void) /* * Don't check status. The SAL call is not supported on all PROMs * but a failure is harmless. - * Architechtuallly, cpu_init is always called twice on cpu 0. We + * Architecturally, cpu_init is always called twice on cpu 0. We * should set cpu_number on cpu 0 once. */ if (cpuid == 0) { diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 4d4536e..9c271be 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) * use the GART mapped mode. */ static u64 -tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) +tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { u64 mapaddr; diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c index fd66b04..419c862 100644 --- a/arch/ia64/xen/suspend.c +++ b/arch/ia64/xen/suspend.c @@ -37,19 +37,14 @@ xen_mm_unpin_all(void) /* nothing */ } -void xen_pre_device_suspend(void) -{ - /* nothing */ -} - void -xen_pre_suspend() +xen_arch_pre_suspend() { /* nothing */ } void -xen_post_suspend(int suspend_cancelled) +xen_arch_post_suspend(int suspend_cancelled) { if (suspend_cancelled) return; diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index c1c5445..1f8244a 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c @@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm) run_posix_cpu_timers(p); delta_itm += local_cpu_data->itm_delta * (stolen + blocked); - if (cpu == time_keeper_id) { - write_seqlock(&xtime_lock); - do_timer(stolen + blocked); - local_cpu_data->itm_next = delta_itm + new_itm; - write_sequnlock(&xtime_lock); - } else { - local_cpu_data->itm_next = delta_itm + new_itm; - } + if (cpu == time_keeper_id) + xtime_update(stolen + blocked); + + local_cpu_data->itm_next = delta_itm + new_itm; + per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; } |