diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 19 | ||||
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 9 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 24 | ||||
-rw-r--r-- | arch/ia64/include/asm/iommu.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 6 | ||||
-rw-r--r-- | arch/ia64/include/asm/machvec.h | 22 | ||||
-rw-r--r-- | arch/ia64/include/asm/meminit.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/sal.h | 15 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/sn_sal.h | 45 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/pci-dma.c | 9 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/Makefile | 8 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 92 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm_fw.c | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 5 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 1 | ||||
-rw-r--r-- | arch/ia64/uv/kernel/setup.c | 6 |
21 files changed, 179 insertions, 140 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 27eec714..6bd91ed 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -148,6 +148,7 @@ config IA64_GENERIC select ACPI_NUMA select SWIOTLB select PCI_MSI + select DMAR help This selects the system type of your hardware. A "generic" kernel will run on any supported IA-64 system. However, if you configure @@ -585,7 +586,7 @@ source "fs/Kconfig.binfmt" endmenu -menu "Power management and ACPI" +menu "Power management and ACPI options" source "kernel/power/Kconfig" @@ -641,6 +642,8 @@ source "net/Kconfig" source "drivers/Kconfig" +source "arch/ia64/hp/sim/Kconfig" + config MSPEC tristate "Memory special operations driver" depends on IA64 @@ -652,6 +655,12 @@ config MSPEC source "fs/Kconfig" +source "arch/ia64/Kconfig.debug" + +source "security/Kconfig" + +source "crypto/Kconfig" + source "arch/ia64/kvm/Kconfig" source "lib/Kconfig" @@ -678,11 +687,3 @@ config IRQ_PER_CPU config IOMMU_HELPER def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) - -source "arch/ia64/hp/sim/Kconfig" - -source "arch/ia64/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 88b6e6f3..2769dbf 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -13,19 +13,12 @@ */ #include <linux/device.h> +#include <linux/swiotlb.h> #include <asm/machvec.h> /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); -extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; -extern ia64_mv_dma_free_coherent swiotlb_free_coherent; -extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; -extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; -extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; -extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; -extern ia64_mv_dma_supported swiotlb_dma_supported; -extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; /* hwiommu declarations & definitions: */ diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 7f25750..0d9d16e 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -434,28 +434,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n); # endif /* __KERNEL__ */ -/* - * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that - * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). - * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on - * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing - * over BIO-level virtual merging. - */ -extern unsigned long ia64_max_iommu_merge_mask; -#if 1 -#define BIO_VMERGE_BOUNDARY 0 -#else -/* - * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be - * replaced by dma_merge_mask() or something of that sort. Note: the only way - * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets - * expanded into: - * - * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) - * - * which is precisely what we want. - */ -#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) -#endif - #endif /* _ASM_IA64_IO_H */ diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h index 5fb2bb9..0490794 100644 --- a/arch/ia64/include/asm/iommu.h +++ b/arch/ia64/include/asm/iommu.h @@ -11,6 +11,5 @@ extern int force_iommu, no_iommu; extern int iommu_detected; extern void iommu_dma_init(void); extern void machvec_init(const char *name); -extern int forbid_dac; #endif diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 85db124..c60d324 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -365,7 +365,8 @@ struct kvm_vcpu_arch { long itc_offset; unsigned long itc_check; unsigned long timer_check; - unsigned long timer_pending; + unsigned int timer_pending; + unsigned int timer_fired; unsigned long vrr[8]; unsigned long ibr[8]; @@ -417,6 +418,9 @@ struct kvm_arch { struct list_head assigned_dev_head; struct dmar_domain *intel_iommu_domain; struct hlist_head irq_ack_notifier_list; + + unsigned long irq_sources_bitmap; + unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; }; union cpuid3_t { diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 1ea28bc..59c17e4 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -11,6 +11,7 @@ #define _ASM_IA64_MACHVEC_H #include <linux/types.h> +#include <linux/swiotlb.h> /* forward declarations: */ struct device; @@ -298,27 +299,6 @@ extern void machvec_init_from_cmdline(const char *cmdline); # endif /* CONFIG_IA64_GENERIC */ /* - * Declare default routines which aren't declared anywhere else: - */ -extern ia64_mv_dma_init swiotlb_init; -extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; -extern ia64_mv_dma_free_coherent swiotlb_free_coherent; -extern ia64_mv_dma_map_single swiotlb_map_single; -extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; -extern ia64_mv_dma_unmap_single swiotlb_unmap_single; -extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; -extern ia64_mv_dma_map_sg swiotlb_map_sg; -extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; -extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; -extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; -extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; -extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; -extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; -extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device; -extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; -extern ia64_mv_dma_supported swiotlb_dma_supported; - -/* * Define default versions so we can extend machvec for new platforms without having * to update the machvec files for all existing platforms. */ diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 6bc96ee..c0cea37 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -48,7 +48,6 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); */ #define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1)) #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) -#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1)) #ifdef CONFIG_NUMA extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index ea310c0..966797a 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -337,11 +337,24 @@ typedef struct sal_log_record_header { #define sal_log_severity_fatal 1 #define sal_log_severity_corrected 2 +/* + * Error Recovery Info (ERI) bit decode. From SAL Spec section B.2.2 Table B-3 + * Error Section Error_Recovery_Info Field Definition. + */ +#define ERI_NOT_VALID 0x0 /* Error Recovery Field is not valid */ +#define ERI_NOT_ACCESSIBLE 0x30 /* Resource not accessible */ +#define ERI_CONTAINMENT_WARN 0x22 /* Corrupt data propagated */ +#define ERI_UNCORRECTED_ERROR 0x20 /* Uncorrected error */ +#define ERI_COMPONENT_RESET 0x24 /* Component must be reset */ +#define ERI_CORR_ERROR_LOG 0x21 /* Corrected error, needs logging */ +#define ERI_CORR_ERROR_THRESH 0x29 /* Corrected error threshold exceeded */ + /* Definition of log section header structures */ typedef struct sal_log_sec_header { efi_guid_t guid; /* Unique Section ID */ sal_log_revision_t revision; /* Major and Minor revision of Section */ - u16 reserved; + u8 error_recovery_info; /* Platform error recovery status */ + u8 reserved; u32 len; /* Section length */ } sal_log_section_hdr_t; diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h index 57e649d..e310fc0 100644 --- a/arch/ia64/include/asm/sn/sn_sal.h +++ b/arch/ia64/include/asm/sn/sn_sal.h @@ -90,6 +90,8 @@ #define SN_SAL_SET_CPU_NUMBER 0x02000068 #define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069 +#define SN_SAL_WATCHLIST_ALLOC 0x02000070 +#define SN_SAL_WATCHLIST_FREE 0x02000071 /* * Service-specific constants @@ -1185,4 +1187,47 @@ ia64_sn_kernel_launch_event(void) SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0); return rv.status; } + +union sn_watchlist_u { + u64 val; + struct { + u64 blade : 16, + size : 32, + filler : 16; + }; +}; + +static inline int +sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size, + unsigned long *intr_mmr_offset) +{ + struct ia64_sal_retval rv; + unsigned long addr; + union sn_watchlist_u size_blade; + int watchlist; + + addr = (unsigned long)mq; + size_blade.size = mq_size; + size_blade.blade = blade; + + /* + * bios returns watchlist number or negative error number. + */ + ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr, + size_blade.val, (u64)intr_mmr_offset, + (u64)&watchlist, 0, 0, 0); + if (rv.status < 0) + return rv.status; + + return watchlist; +} + +static inline int +sn_mq_watchlist_free(int blade, int watchlist_num) +{ + struct ia64_sal_retval rv; + ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade, + watchlist_num, 0, 0, 0, 0, 0); + return rv.status; +} #endif /* _ASM_IA64_SN_SN_SAL_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 0635015..bd7acc7 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -678,6 +678,30 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) return 0; } +int __init early_acpi_boot_init(void) +{ + int ret; + + /* + * do a partial walk of MADT to determine how many CPUs + * we have including offline CPUs + */ + if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + printk(KERN_ERR PREFIX "Can't find MADT\n"); + return 0; + } + + ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, + acpi_parse_lsapic, NR_CPUS); + if (ret < 1) + printk(KERN_ERR PREFIX + "Error parsing MADT - no LAPIC entries\n"); + + return 0; +} + + + int __init acpi_boot_init(void) { @@ -701,11 +725,6 @@ int __init acpi_boot_init(void) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); - if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS) - < 1) - printk(KERN_ERR PREFIX - "Error parsing MADT - no LAPIC entries\n"); - if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 10a75b5..dbdb778 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -12,13 +12,11 @@ #include <asm/machvec.h> #include <linux/dma-mapping.h> -#include <asm/machvec.h> #include <asm/system.h> #ifdef CONFIG_DMAR #include <linux/kernel.h> -#include <linux/string.h> #include <asm/page.h> #include <asm/iommu.h> @@ -89,13 +87,6 @@ int iommu_dma_supported(struct device *dev, u64 mask) { struct dma_mapping_ops *ops = get_dma_ops(dev); -#ifdef CONFIG_PCI - if (mask > 0xffffffff && forbid_dac > 0) { - dev_info(dev, "Disallowing DAC for device\n"); - return 0; - } -#endif - if (ops->dma_supported_op) return ops->dma_supported_op(dev, mask); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index ada4605..6543a55 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1995,11 +1995,6 @@ pfm_close(struct inode *inode, struct file *filp) return -EBADF; } - if (filp->f_flags & FASYNC) { - DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); - pfm_do_fasync(-1, filp, ctx, 0); - } - PROTECT_CTX(ctx, flags); state = ctx->ctx_state; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index ae79117..865af27 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -359,7 +359,7 @@ reserve_memory (void) } #endif -#ifdef CONFIG_CRASH_KERNEL +#ifdef CONFIG_CRASH_DUMP if (reserve_elfcorehdr(&rsvd_region[n].start, &rsvd_region[n].end) == 0) n++; @@ -561,8 +561,12 @@ setup_arch (char **cmdline_p) #ifdef CONFIG_ACPI /* Initialize the ACPI boot-time table parser */ acpi_table_init(); + early_acpi_boot_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); +#ifdef CONFIG_ACPI_HOTPLUG_CPU + prefill_possible_map(); +#endif per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 32 : cpus_weight(early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); @@ -853,9 +857,6 @@ void __init setup_per_cpu_areas (void) { /* start_kernel() requires this... */ -#ifdef CONFIG_ACPI_HOTPLUG_CPU - prefill_possible_map(); -#endif } /* diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 8e99fed..f833a0b 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -20,6 +20,8 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM && EXPERIMENTAL + # for device assignment: + depends on PCI select PREEMPT_NOTIFIERS select ANON_INODES ---help--- diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index cf37f8f..3ab4d6d 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile @@ -29,13 +29,18 @@ define cmd_offsets echo ""; \ echo "#endif" ) > $@ endef + # We use internal rules to avoid the "is up to date" message from make -arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c +arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \ + $(wildcard $(srctree)/arch/ia64/include/asm/*.h)\ + $(wildcard $(srctree)/include/linux/*.h) $(call if_changed_dep,cc_s_c) $(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s $(call cmd,offsets) +FORCE : $(obj)/$(offsets-file) + # # Makefile for Kernel-based Virtual Machine module # @@ -53,7 +58,6 @@ endif kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o obj-$(CONFIG_KVM) += kvm.o -FORCE : $(obj)/$(offsets-file) EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ vtlb.o process.o diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index a312c9e..af1464f 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) struct kvm *kvm = vcpu->kvm; struct call_data call_data; int i; + call_data.ptc_g_data = p->u.ptc_g_data; for (i = 0; i < KVM_MAX_VCPUS; i++) { @@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ktime_t kt; long itc_diff; unsigned long vcpu_now_itc; - unsigned long expires; struct hrtimer *p_ht = &vcpu->arch.hlt_timer; unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); - vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; + if (irqchip_in_kernel(vcpu->kvm)) { - if (time_after(vcpu_now_itc, vpd->itm)) { - vcpu->arch.timer_check = 1; - return 1; - } - itc_diff = vpd->itm - vcpu_now_itc; - if (itc_diff < 0) - itc_diff = -itc_diff; + vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; - expires = div64_u64(itc_diff, cyc_per_usec); - kt = ktime_set(0, 1000 * expires); - vcpu->arch.ht_active = 1; - hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); + if (time_after(vcpu_now_itc, vpd->itm)) { + vcpu->arch.timer_check = 1; + return 1; + } + itc_diff = vpd->itm - vcpu_now_itc; + if (itc_diff < 0) + itc_diff = -itc_diff; + + expires = div64_u64(itc_diff, cyc_per_usec); + kt = ktime_set(0, 1000 * expires); + + down_read(&vcpu->kvm->slots_lock); + vcpu->arch.ht_active = 1; + hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); - if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; kvm_vcpu_block(vcpu); hrtimer_cancel(p_ht); vcpu->arch.ht_active = 0; + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) + vcpu->arch.mp_state = + KVM_MP_STATE_RUNNABLE; + up_read(&vcpu->kvm->slots_lock); + if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) return -EINTR; return 1; @@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, static const int kvm_vti_max_exit_handlers = sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); -static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu) -{ -} - static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; @@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) again: preempt_disable(); - - kvm_prepare_guest_switch(vcpu); local_irq_disable(); if (signal_pending(current)) { @@ -614,7 +617,7 @@ again: vcpu->guest_mode = 1; kvm_guest_enter(); - + down_read(&vcpu->kvm->slots_lock); r = vti_vcpu_run(vcpu, kvm_run); if (r < 0) { local_irq_enable(); @@ -634,9 +637,8 @@ again: * But we need to prevent reordering, hence this barrier(): */ barrier(); - kvm_guest_exit(); - + up_read(&vcpu->kvm->slots_lock); preempt_enable(); r = kvm_handle_exit(kvm_run, vcpu); @@ -671,15 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu_load(vcpu); + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); - vcpu_put(vcpu); - return -EAGAIN; + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + r = -EAGAIN; + goto out; } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); kvm_set_mmio_data(vcpu); @@ -687,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_needed = 0; } r = __vcpu_run(vcpu, kvm_run); - +out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); @@ -778,6 +781,9 @@ static void kvm_init_vm(struct kvm *kvm) kvm_build_io_pmt(kvm); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); + + /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ + set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); } struct kvm *kvm_arch_create_vm(void) @@ -941,9 +947,8 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; if (irqchip_in_kernel(kvm)) { mutex_lock(&kvm->lock); - kvm_ioapic_set_irq(kvm->arch.vioapic, - irq_event.irq, - irq_event.level); + kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event.irq, irq_event.level); mutex_unlock(&kvm->lock); r = 0; } @@ -1123,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) wait_queue_head_t *q; vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); + q = &vcpu->wq; + if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) goto out; - q = &vcpu->wq; - if (waitqueue_active(q)) { - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; + if (waitqueue_active(q)) wake_up_interruptible(q); - } + out: + vcpu->arch.timer_fired = 1; vcpu->arch.timer_check = 1; return HRTIMER_NORESTART; } @@ -1700,12 +1706,14 @@ static void vcpu_kick_intr(void *info) void kvm_vcpu_kick(struct kvm_vcpu *vcpu) { int ipi_pcpu = vcpu->cpu; + int cpu = get_cpu(); if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); - if (vcpu->guest_mode) + if (vcpu->guest_mode && cpu != ipi_pcpu) smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); + put_cpu(); } int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) @@ -1715,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) if (!test_and_set_bit(vec, &vpd->irr[0])) { vcpu->arch.irq_new_pending = 1; - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) - kvm_vcpu_kick(vcpu); - else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - if (waitqueue_active(&vcpu->wq)) - wake_up_interruptible(&vcpu->wq); - } + kvm_vcpu_kick(vcpu); return 1; } return 0; @@ -1791,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return 0; + return vcpu->arch.timer_fired; } gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index 0c69d9e..cb7600b 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c @@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) return index; } +static void prepare_for_halt(struct kvm_vcpu *vcpu) +{ + vcpu->arch.timer_pending = 1; + vcpu->arch.timer_fired = 0; +} + int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) { @@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) break; case PAL_HALT_LIGHT: { - vcpu->arch.timer_pending = 1; INIT_PAL_STATUS_SUCCESS(result); + prepare_for_halt(vcpu); if (kvm_highest_pending_irq(vcpu) == -1) ret = kvm_emulate_halt(vcpu); - } break; diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 3417783..8008173 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c @@ -713,7 +713,7 @@ void leave_hypervisor_tail(void) if (!(VCPU(v, itv) & (1 << 16))) { vcpu_pend_interrupt(v, VCPU(v, itv) & 0xff); - VMX(v, itc_check) = 0; + VMX(v, itc_check) = 0; } else { v->arch.timer_pending = 1; } diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 341e3fe..e9b2a4e 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define MODE_IND(psr) \ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) +#ifndef CONFIG_SMP +#define _vmm_raw_spin_lock(x) do {}while(0) +#define _vmm_raw_spin_unlock(x) do {}while(0) +#else #define _vmm_raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ @@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn) do { barrier(); \ ((spinlock_t *)x)->raw_lock.lock = 0; } \ while (0) +#endif void vmm_spin_lock(spinlock_t *lock); void vmm_spin_unlock(spinlock_t *lock); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d8c5fcd..d85ba98 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -635,7 +635,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; #endif start = GRANULEROUNDDOWN(start); - start = ORDERROUNDDOWN(start); end = GRANULEROUNDUP(end); mem_data[node].max_pfn = max(mem_data[node].max_pfn, end >> PAGE_SHIFT); diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c index cf5f28a..7a5ae63 100644 --- a/arch/ia64/uv/kernel/setup.c +++ b/arch/ia64/uv/kernel/setup.c @@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); #ifdef CONFIG_IA64_SGI_UV int sn_prom_type; +long sn_partition_id; +EXPORT_SYMBOL(sn_partition_id); +long sn_coherency_id; +EXPORT_SYMBOL_GPL(sn_coherency_id); +long sn_region_size; +EXPORT_SYMBOL(sn_region_size); #endif struct redir_addr { |