diff options
43 files changed, 669 insertions, 780 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index bbb4450..3d3ed12 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13354,7 +13354,6 @@ F: drivers/media/tuners/tuner-xc2028.* XEN HYPERVISOR INTERFACE M: Boris Ostrovsky <boris.ostrovsky@oracle.com> -M: David Vrabel <david.vrabel@citrix.com> M: Juergen Gross <jgross@suse.com> L: xen-devel@lists.xenproject.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 9d874db..3522cba 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -1,87 +1 @@ -/****************************************************************************** - * hypercall.h - * - * Linux-specific hypervisor handling. - * - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_ARM_XEN_HYPERCALL_H -#define _ASM_ARM_XEN_HYPERCALL_H - -#include <linux/bug.h> - -#include <xen/interface/xen.h> -#include <xen/interface/sched.h> -#include <xen/interface/platform.h> - -long privcmd_call(unsigned call, unsigned long a1, - unsigned long a2, unsigned long a3, - unsigned long a4, unsigned long a5); -int HYPERVISOR_xen_version(int cmd, void *arg); -int HYPERVISOR_console_io(int cmd, int count, char *str); -int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); -int HYPERVISOR_sched_op(int cmd, void *arg); -int HYPERVISOR_event_channel_op(int cmd, void *arg); -unsigned long HYPERVISOR_hvm_op(int op, void *arg); -int HYPERVISOR_memory_op(unsigned int cmd, void *arg); -int HYPERVISOR_physdev_op(int cmd, void *arg); -int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); -int HYPERVISOR_tmem_op(void *arg); -int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); -int HYPERVISOR_platform_op_raw(void *arg); -static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) -{ - op->interface_version = XENPF_INTERFACE_VERSION; - return HYPERVISOR_platform_op_raw(op); -} -int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); - -static inline int -HYPERVISOR_suspend(unsigned long start_info_mfn) -{ - struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; - - /* start_info_mfn is unused on ARM */ - return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); -} - -static inline void -MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, - unsigned int new_val, unsigned long flags) -{ - BUG(); -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - BUG(); -} - -#endif /* _ASM_ARM_XEN_HYPERCALL_H */ +#include <xen/arm/hypercall.h> diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index 9525151..d6e7709 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -1,39 +1 @@ -#ifndef _ASM_ARM_XEN_HYPERVISOR_H -#define _ASM_ARM_XEN_HYPERVISOR_H - -#include <linux/init.h> - -extern struct shared_info *HYPERVISOR_shared_info; -extern struct start_info *xen_start_info; - -/* Lazy mode for batching updates / context switch */ -enum paravirt_lazy_mode { - PARAVIRT_LAZY_NONE, - PARAVIRT_LAZY_MMU, - PARAVIRT_LAZY_CPU, -}; - -static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) -{ - return PARAVIRT_LAZY_NONE; -} - -extern struct dma_map_ops *xen_dma_ops; - -#ifdef CONFIG_XEN -void __init xen_early_init(void); -#else -static inline void xen_early_init(void) { return; } -#endif - -#ifdef CONFIG_HOTPLUG_CPU -static inline void xen_arch_register_cpu(int num) -{ -} - -static inline void xen_arch_unregister_cpu(int num) -{ -} -#endif - -#endif /* _ASM_ARM_XEN_HYPERVISOR_H */ +#include <xen/arm/hypervisor.h> diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 75d5968..88c0d75 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -1,85 +1 @@ -/****************************************************************************** - * Guest OS interface to ARM Xen. - * - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 - */ - -#ifndef _ASM_ARM_XEN_INTERFACE_H -#define _ASM_ARM_XEN_INTERFACE_H - -#include <linux/types.h> - -#define uint64_aligned_t uint64_t __attribute__((aligned(8))) - -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef struct { union { type *p; uint64_aligned_t q; }; } \ - __guest_handle_ ## name - -#define DEFINE_GUEST_HANDLE_STRUCT(name) \ - __DEFINE_GUEST_HANDLE(name, struct name) -#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) -#define GUEST_HANDLE(name) __guest_handle_ ## name - -#define set_xen_guest_handle(hnd, val) \ - do { \ - if (sizeof(hnd) == 8) \ - *(uint64_t *)&(hnd) = 0; \ - (hnd).p = val; \ - } while (0) - -#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op - -#ifndef __ASSEMBLY__ -/* Explicitly size integers that represent pfns in the interface with - * Xen so that we can have one ABI that works for 32 and 64 bit guests. - * Note that this means that the xen_pfn_t type may be capable of - * representing pfn's which the guest cannot represent in its own pfn - * type. However since pfn space is controlled by the guest this is - * fine since it simply wouldn't be able to create any sure pfns in - * the first place. - */ -typedef uint64_t xen_pfn_t; -#define PRI_xen_pfn "llx" -typedef uint64_t xen_ulong_t; -#define PRI_xen_ulong "llx" -typedef int64_t xen_long_t; -#define PRI_xen_long "llx" -/* Guest handles for primitive C types. */ -__DEFINE_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_GUEST_HANDLE(uint, unsigned int); -DEFINE_GUEST_HANDLE(char); -DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(void); -DEFINE_GUEST_HANDLE(uint64_t); -DEFINE_GUEST_HANDLE(uint32_t); -DEFINE_GUEST_HANDLE(xen_pfn_t); -DEFINE_GUEST_HANDLE(xen_ulong_t); - -/* Maximum number of virtual CPUs in multi-processor guests. */ -#define MAX_VIRT_CPUS 1 - -struct arch_vcpu_info { }; -struct arch_shared_info { }; - -/* TODO: Move pvclock definitions some place arch independent */ -struct pvclock_vcpu_time_info { - u32 version; - u32 pad0; - u64 tsc_timestamp; - u64 system_time; - u32 tsc_to_system_mul; - s8 tsc_shift; - u8 flags; - u8 pad[2]; -} __attribute__((__packed__)); /* 32 bytes */ - -/* It is OK to have a 12 bytes struct with no padding because it is packed */ -struct pvclock_wall_clock { - u32 version; - u32 sec; - u32 nsec; - u32 sec_hi; -} __attribute__((__packed__)); -#endif - -#endif /* _ASM_ARM_XEN_INTERFACE_H */ +#include <xen/arm/interface.h> diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 95ce6ac..b3ef061 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -1,98 +1 @@ -#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H -#define _ASM_ARM_XEN_PAGE_COHERENT_H - -#include <asm/page.h> -#include <linux/dma-mapping.h> - -void __xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs); -void __xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); - -void __xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) -{ - return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -{ - __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long page_pfn = page_to_xen_pfn(page); - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); - unsigned long compound_pages = - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; - bool local = (page_pfn <= dev_pfn) && - (dev_pfn - page_pfn < compound_pages); - - /* - * Dom0 is mapped 1:1, while the Linux page can span across - * multiple Xen pages, it's not possible for it to contain a - * mix of local and foreign Xen pages. So if the first xen_pfn - * == mfn the page is local otherwise it's a foreign page - * grant-mapped in dom0. If the page is local we can safely - * call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (local) - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); - else - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); -} - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long pfn = PFN_DOWN(handle); - /* - * Dom0 is mapped 1:1, while the Linux page can be spanned accross - * multiple Xen page, it's not possible to have a mix of local and - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a - * foreign mfn will always return false. If the page is local we can - * safely call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (pfn_valid(pfn)) { - if (__generic_dma_ops(hwdev)->unmap_page) - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); - } else - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); -} - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (__generic_dma_ops(hwdev)->sync_single_for_cpu) - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); -} - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (__generic_dma_ops(hwdev)->sync_single_for_device) - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); -} - -#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ +#include <xen/arm/page-coherent.h> diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 415dbc6..31bbc80 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -1,122 +1 @@ -#ifndef _ASM_ARM_XEN_PAGE_H -#define _ASM_ARM_XEN_PAGE_H - -#include <asm/page.h> -#include <asm/pgtable.h> - -#include <linux/pfn.h> -#include <linux/types.h> -#include <linux/dma-mapping.h> - -#include <xen/xen.h> -#include <xen/interface/grant_table.h> - -#define phys_to_machine_mapping_valid(pfn) (1) - -/* Xen machine address */ -typedef struct xmaddr { - phys_addr_t maddr; -} xmaddr_t; - -/* Xen pseudo-physical address */ -typedef struct xpaddr { - phys_addr_t paddr; -} xpaddr_t; - -#define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) -#define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) - -#define INVALID_P2M_ENTRY (~0UL) - -/* - * The pseudo-physical frame (pfn) used in all the helpers is always based - * on Xen page granularity (i.e 4KB). - * - * A Linux page may be split across multiple non-contiguous Xen page so we - * have to keep track with frame based on 4KB page granularity. - * - * PV drivers should never make a direct usage of those helpers (particularly - * pfn_to_gfn and gfn_to_pfn). - */ - -unsigned long __pfn_to_mfn(unsigned long pfn); -extern struct rb_root phys_to_mach; - -/* Pseudo-physical <-> Guest conversion */ -static inline unsigned long pfn_to_gfn(unsigned long pfn) -{ - return pfn; -} - -static inline unsigned long gfn_to_pfn(unsigned long gfn) -{ - return gfn; -} - -/* Pseudo-physical <-> BUS conversion */ -static inline unsigned long pfn_to_bfn(unsigned long pfn) -{ - unsigned long mfn; - - if (phys_to_mach.rb_node != NULL) { - mfn = __pfn_to_mfn(pfn); - if (mfn != INVALID_P2M_ENTRY) - return mfn; - } - - return pfn; -} - -static inline unsigned long bfn_to_pfn(unsigned long bfn) -{ - return bfn; -} - -#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) - -/* VIRT <-> GUEST conversion */ -#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) -#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) - -/* Only used in PV code. But ARM guests are always HVM. */ -static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) -{ - BUG(); -} - -/* TODO: this shouldn't be here but it is because the frontend drivers - * are using it (its rolled in headers) even though we won't hit the code path. - * So for right now just punt with this. - */ -static inline pte_t *lookup_address(unsigned long address, unsigned int *level) -{ - BUG(); - return NULL; -} - -extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count); - -extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, - struct gnttab_unmap_grant_ref *kunmap_ops, - struct page **pages, unsigned int count); - -bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); -bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, - unsigned long nr_pages); - -static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - return __set_phys_to_machine(pfn, mfn); -} - -#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) -#define xen_unmap(cookie) iounmap((cookie)) - -bool xen_arch_need_swiotlb(struct device *dev, - phys_addr_t phys, - dma_addr_t dev_addr); -unsigned long xen_get_swiotlb_free_pages(unsigned int order); - -#endif /* _ASM_ARM_XEN_PAGE_H */ +#include <xen/arm/page.h> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index f193414..4986dc0 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -372,8 +372,7 @@ static int __init xen_guest_init(void) * for secondary CPUs as they are brought up. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. */ - xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), - sizeof(struct vcpu_info)); + xen_vcpu_info = alloc_percpu(struct vcpu_info); if (xen_vcpu_info == NULL) return -ENOMEM; diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h index 74b0c42..3522cba 100644 --- a/arch/arm64/include/asm/xen/hypercall.h +++ b/arch/arm64/include/asm/xen/hypercall.h @@ -1 +1 @@ -#include <../../arm/include/asm/xen/hypercall.h> +#include <xen/arm/hypercall.h> diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h index f263da8..d6e7709 100644 --- a/arch/arm64/include/asm/xen/hypervisor.h +++ b/arch/arm64/include/asm/xen/hypervisor.h @@ -1 +1 @@ -#include <../../arm/include/asm/xen/hypervisor.h> +#include <xen/arm/hypervisor.h> diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h index 44457ae..88c0d75 100644 --- a/arch/arm64/include/asm/xen/interface.h +++ b/arch/arm64/include/asm/xen/interface.h @@ -1 +1 @@ -#include <../../arm/include/asm/xen/interface.h> +#include <xen/arm/interface.h> diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2052102..b3ef061 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -1 +1 @@ -#include <../../arm/include/asm/xen/page-coherent.h> +#include <xen/arm/page-coherent.h> diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h index bed87ec..31bbc80 100644 --- a/arch/arm64/include/asm/xen/page.h +++ b/arch/arm64/include/asm/xen/page.h @@ -1 +1 @@ -#include <../../arm/include/asm/xen/page.h> +#include <xen/arm/page.h> diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 476b574..ec23d8e 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -1,13 +1,17 @@ #ifndef _ASM_X86_E820_H #define _ASM_X86_E820_H -#ifdef CONFIG_EFI +/* + * E820_X_MAX is the maximum size of the extended E820 table. The extended + * table may contain up to 3 extra E820 entries per possible NUMA node, so we + * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128. + * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h. + */ #include <linux/numa.h> #define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) -#else /* ! CONFIG_EFI */ -#define E820_X_MAX E820MAX -#endif + #include <uapi/asm/e820.h> + #ifndef __ASSEMBLY__ /* see comment in arch/x86/kernel/e820.c */ extern struct e820map *e820; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index bedfab98..e1fb269 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -264,8 +264,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 0; error: - dev_err(&dev->dev, - "Xen PCI frontend has not registered MSI/MSI-X support!\n"); + dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n", + type == PCI_CAP_ID_MSI ? "" : "-X", irq); return irq; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f8960fc..8c394e30 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -41,7 +41,7 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; unsigned long xen_released_pages; /* E820 map used during setting up memory. */ -static struct e820entry xen_e820_map[E820MAX] __initdata; +static struct e820entry xen_e820_map[E820_X_MAX] __initdata; static u32 xen_e820_map_entries __initdata; /* @@ -750,7 +750,7 @@ char * __init xen_memory_setup(void) max_pfn = min(max_pfn, xen_start_info->nr_pages); mem_end = PFN_PHYS(max_pfn); - memmap.nr_entries = E820MAX; + memmap.nr_entries = ARRAY_SIZE(xen_e820_map); set_xen_guest_handle(memmap.buffer, xen_e820_map); op = xen_initial_domain() ? @@ -923,7 +923,7 @@ char * __init xen_auto_xlated_memory_setup(void) int i; int rc; - memmap.nr_entries = E820MAX; + memmap.nr_entries = ARRAY_SIZE(xen_e820_map); set_xen_guest_handle(memmap.buffer, xen_e820_map); rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 3cc6d1d..415e79b 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -533,13 +533,11 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info struct xenbus_device *dev = be->dev; struct xen_blkif *blkif = be->blkif; int err; - int state = 0, discard_enable; + int state = 0; struct block_device *bdev = be->blkif->vbd.bdev; struct request_queue *q = bdev_get_queue(bdev); - err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d", - &discard_enable); - if (err == 1 && !discard_enable) + if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1)) return; if (blk_queue_discard(q)) { @@ -1039,30 +1037,24 @@ static int connect_ring(struct backend_info *be) xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -ENOSYS; } - err = xenbus_scanf(XBT_NIL, dev->otherend, - "feature-persistent", "%u", &pers_grants); - if (err <= 0) - pers_grants = 0; - + pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", + 0); be->blkif->vbd.feature_gnt_persistent = pers_grants; be->blkif->vbd.overflow_max_grants = 0; /* * Read the number of hardware queues from frontend. */ - err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues", - "%u", &requested_num_queues); - if (err < 0) { - requested_num_queues = 1; - } else { - if (requested_num_queues > xenblk_max_queues - || requested_num_queues == 0) { - /* Buggy or malicious guest. */ - xenbus_dev_fatal(dev, err, - "guest requested %u queues, exceeding the maximum of %u.", - requested_num_queues, xenblk_max_queues); - return -ENOSYS; - } + requested_num_queues = xenbus_read_unsigned(dev->otherend, + "multi-queue-num-queues", + 1); + if (requested_num_queues > xenblk_max_queues + || requested_num_queues == 0) { + /* Buggy or malicious guest. */ + xenbus_dev_fatal(dev, err, + "guest requested %u queues, exceeding the maximum of %u.", + requested_num_queues, xenblk_max_queues); + return -ENOSYS; } be->blkif->nr_rings = requested_num_queues; if (xen_blkif_alloc_rings(be->blkif)) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c000fdf..b2bdfa8 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1758,17 +1758,13 @@ static int talk_to_blkback(struct xenbus_device *dev, const char *message = NULL; struct xenbus_transaction xbt; int err; - unsigned int i, max_page_order = 0; - unsigned int ring_page_order = 0; + unsigned int i, max_page_order; + unsigned int ring_page_order; - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "max-ring-page-order", "%u", &max_page_order); - if (err != 1) - info->nr_ring_pages = 1; - else { - ring_page_order = min(xen_blkif_max_ring_order, max_page_order); - info->nr_ring_pages = 1 << ring_page_order; - } + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, + "max-ring-page-order", 0); + ring_page_order = min(xen_blkif_max_ring_order, max_page_order); + info->nr_ring_pages = 1 << ring_page_order; for (i = 0; i < info->nr_rings; i++) { struct blkfront_ring_info *rinfo = &info->rinfo[i]; @@ -1877,18 +1873,14 @@ again: static int negotiate_mq(struct blkfront_info *info) { - unsigned int backend_max_queues = 0; - int err; + unsigned int backend_max_queues; unsigned int i; BUG_ON(info->nr_rings); /* Check if backend supports multiple queues. */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "multi-queue-max-queues", "%u", &backend_max_queues); - if (err < 0) - backend_max_queues = 1; - + backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend, + "multi-queue-max-queues", 1); info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); /* We need at least one ring. */ if (!info->nr_rings) @@ -2196,7 +2188,6 @@ static void blkfront_setup_discard(struct blkfront_info *info) int err; unsigned int discard_granularity; unsigned int discard_alignment; - unsigned int discard_secure; info->feature_discard = 1; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, @@ -2207,10 +2198,9 @@ static void blkfront_setup_discard(struct blkfront_info *info) info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "discard-secure", "%u", &discard_secure); - if (err > 0) - info->feature_secdiscard = !!discard_secure; + info->feature_secdiscard = + !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", + 0); } static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) @@ -2302,16 +2292,11 @@ out_of_memory: */ static void blkfront_gather_backend_features(struct blkfront_info *info) { - int err; - int barrier, flush, discard, persistent; unsigned int indirect_segments; info->feature_flush = 0; info->feature_fua = 0; - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%d", &barrier); - /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes @@ -2319,7 +2304,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) * * If there are barriers, then we use flush. */ - if (err > 0 && barrier) { + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) { info->feature_flush = 1; info->feature_fua = 1; } @@ -2328,35 +2313,23 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) * And if there is "feature-flush-cache" use that above * barriers. */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-flush-cache", "%d", &flush); - - if (err > 0 && flush) { + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache", + 0)) { info->feature_flush = 1; info->feature_fua = 0; } - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-discard", "%d", &discard); - - if (err > 0 && discard) + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0)) blkfront_setup_discard(info); - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-persistent", "%d", &persistent); - if (err <= 0) - info->feature_persistent = 0; - else - info->feature_persistent = persistent; + info->feature_persistent = + xenbus_read_unsigned(info->xbdev->otherend, + "feature-persistent", 0); - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-max-indirect-segments", "%u", - &indirect_segments); - if (err <= 0) - info->max_indirect_segments = 0; - else - info->max_indirect_segments = min(indirect_segments, - xen_blkif_max_segments); + indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, + "feature-max-indirect-segments", 0); + info->max_indirect_segments = min(indirect_segments, + xen_blkif_max_segments); } /* @@ -2421,11 +2394,9 @@ static void blkfront_connect(struct blkfront_info *info) * provide this. Assume physical sector size to be the same as * sector_size in that case. */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "physical-sector-size", "%u", &physical_sector_size); - if (err != 1) - physical_sector_size = sector_size; - + physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, + "physical-sector-size", + sector_size); blkfront_gather_backend_features(info); for (i = 0; i < info->nr_rings; i++) { err = blkfront_setup_indirect(&info->rinfo[i]); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 62028f4..50072cc 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -337,18 +337,14 @@ static int tpmfront_resume(struct xenbus_device *dev) static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { - int val; - switch (backend_state) { case XenbusStateInitialised: case XenbusStateConnected: if (dev->state == XenbusStateConnected) break; - if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-protocol-v2", "%d", &val) < 0) - val = 0; - if (!val) { + if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2", + 0)) { xenbus_dev_fatal(dev, -EINVAL, "vTPM protocol 2 required"); return; diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 227fbd2..3900875 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -108,7 +108,8 @@ static irqreturn_t input_handler(int rq, void *dev_id) static int xenkbd_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { - int ret, i, abs; + int ret, i; + unsigned int abs; struct xenkbd_info *info; struct input_dev *kbd, *ptr; @@ -127,8 +128,7 @@ static int xenkbd_probe(struct xenbus_device *dev, if (!info->page) goto error_nomem; - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) - abs = 0; + abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0); if (abs) { ret = xenbus_write(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); @@ -322,11 +322,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateInitWait: InitWait: - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-abs-pointer", "%d", &val); - if (ret < 0) - val = 0; - if (val) { + if (xenbus_read_unsigned(info->xbdev->otherend, + "feature-abs-pointer", 0)) { ret = xenbus_write(XBT_NIL, info->xbdev->nodename, "request-abs-pointer", "1"); if (ret) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 55a4488..3124eae 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -785,12 +785,9 @@ static void xen_mcast_ctrl_changed(struct xenbus_watch *watch, struct xenvif *vif = container_of(watch, struct xenvif, mcast_ctrl_watch); struct xenbus_device *dev = xenvif_to_xenbus_device(vif); - int val; - if (xenbus_scanf(XBT_NIL, dev->otherend, - "request-multicast-control", "%d", &val) < 0) - val = 0; - vif->multicast_control = !!val; + vif->multicast_control = !!xenbus_read_unsigned(dev->otherend, + "request-multicast-control", 0); } static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, @@ -934,14 +931,11 @@ static void connect(struct backend_info *be) /* Check whether the frontend requested multiple queues * and read the number requested. */ - err = xenbus_scanf(XBT_NIL, dev->otherend, - "multi-queue-num-queues", - "%u", &requested_num_queues); - if (err < 0) { - requested_num_queues = 1; /* Fall back to single queue */ - } else if (requested_num_queues > xenvif_max_queues) { + requested_num_queues = xenbus_read_unsigned(dev->otherend, + "multi-queue-num-queues", 1); + if (requested_num_queues > xenvif_max_queues) { /* buggy or malicious guest */ - xenbus_dev_fatal(dev, err, + xenbus_dev_fatal(dev, -EINVAL, "guest requested %u queues, exceeding the maximum of %u.", requested_num_queues, xenvif_max_queues); return; @@ -1134,7 +1128,7 @@ static int read_xenbus_vif_flags(struct backend_info *be) struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned int rx_copy; - int err, val; + int err; err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); @@ -1150,10 +1144,7 @@ static int read_xenbus_vif_flags(struct backend_info *be) if (!rx_copy) return -EOPNOTSUPP; - if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-rx-notify", "%d", &val) < 0) - val = 0; - if (!val) { + if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) { /* - Reduce drain timeout to poll more frequently for * Rx requests. * - Disable Rx stall detection. @@ -1162,34 +1153,21 @@ static int read_xenbus_vif_flags(struct backend_info *be) be->vif->stall_timeout = 0; } - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", - "%d", &val) < 0) - val = 0; - vif->can_sg = !!val; + vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0); vif->gso_mask = 0; - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", - "%d", &val) < 0) - val = 0; - if (val) + if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0)) vif->gso_mask |= GSO_BIT(TCPV4); - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", - "%d", &val) < 0) - val = 0; - if (val) + if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0)) vif->gso_mask |= GSO_BIT(TCPV6); - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", - "%d", &val) < 0) - val = 0; - vif->ip_csum = !val; + vif->ip_csum = !xenbus_read_unsigned(dev->otherend, + "feature-no-csum-offload", 0); - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", - "%d", &val) < 0) - val = 0; - vif->ipv6_csum = !!val; + vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend, + "feature-ipv6-csum-offload", 0); return 0; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e085c8c..a479cd9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1169,43 +1169,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); - int val; - if (features & NETIF_F_SG) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", - "%d", &val) < 0) - val = 0; + if (features & NETIF_F_SG && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) + features &= ~NETIF_F_SG; - if (!val) - features &= ~NETIF_F_SG; - } - - if (features & NETIF_F_IPV6_CSUM) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-ipv6-csum-offload", "%d", &val) < 0) - val = 0; - - if (!val) - features &= ~NETIF_F_IPV6_CSUM; - } - - if (features & NETIF_F_TSO) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-gso-tcpv4", "%d", &val) < 0) - val = 0; + if (features & NETIF_F_IPV6_CSUM && + !xenbus_read_unsigned(np->xbdev->otherend, + "feature-ipv6-csum-offload", 0)) + features &= ~NETIF_F_IPV6_CSUM; - if (!val) - features &= ~NETIF_F_TSO; - } + if (features & NETIF_F_TSO && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) + features &= ~NETIF_F_TSO; - if (features & NETIF_F_TSO6) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-gso-tcpv6", "%d", &val) < 0) - val = 0; - - if (!val) - features &= ~NETIF_F_TSO6; - } + if (features & NETIF_F_TSO6 && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) + features &= ~NETIF_F_TSO6; return features; } @@ -1823,18 +1803,13 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; /* Check if backend supports multiple queues */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "multi-queue-max-queues", "%u", &max_queues); - if (err < 0) - max_queues = 1; + max_queues = xenbus_read_unsigned(info->xbdev->otherend, + "multi-queue-max-queues", 1); num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-split-event-channels", "%u", - &feature_split_evtchn); - if (err < 0) - feature_split_evtchn = 0; + feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, + "feature-split-event-channels", 0); /* Read mac addr. */ err = xen_net_read_mac(dev, info->netdev->dev_addr); @@ -1968,16 +1943,10 @@ static int xennet_connect(struct net_device *dev) struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = 0; int err; - unsigned int feature_rx_copy; unsigned int j = 0; struct netfront_queue *queue = NULL; - err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-rx-copy", "%u", &feature_rx_copy); - if (err != 1) - feature_rx_copy = 0; - - if (!feature_rx_copy) { + if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index d6ff5e8..8fc2e95 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) err = -ENOMEM; goto out; } - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", - &state); - if (err != 1) - state = XenbusStateUnknown; + state = xenbus_read_unsigned(pdev->xdev->otherend, str, + XenbusStateUnknown); if (state != XenbusStateClosing) continue; diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 9dc8687..9aa1fe1 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -79,10 +79,13 @@ struct vscsifrnt_shadow { /* command between backend and frontend */ unsigned char act; + uint8_t nr_segments; uint16_t rqid; + uint16_t ref_rqid; unsigned int nr_grants; /* number of grants in gref[] */ struct scsiif_request_segment *sg; /* scatter/gather elements */ + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; /* Do reset or abort function. */ wait_queue_head_t wq_reset; /* reset work queue */ @@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id) scsifront_wake_up(info); } -static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) +static int scsifront_do_request(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) { struct vscsiif_front_ring *ring = &(info->ring); struct vscsiif_request *ring_req; + struct scsi_cmnd *sc = shadow->sc; uint32_t id; + int i, notify; + + if (RING_FULL(&info->ring)) + return -EBUSY; id = scsifront_get_rqid(info); /* use id in response */ if (id >= VSCSIIF_MAX_REQS) - return NULL; + return -EBUSY; - ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); + info->shadow[id] = shadow; + shadow->rqid = id; + ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; - ring_req->rqid = (uint16_t)id; + ring_req->rqid = id; + ring_req->act = shadow->act; + ring_req->ref_rqid = shadow->ref_rqid; + ring_req->nr_segments = shadow->nr_segments; - return ring_req; -} + ring_req->id = sc->device->id; + ring_req->lun = sc->device->lun; + ring_req->channel = sc->device->channel; + ring_req->cmd_len = sc->cmd_len; -static void scsifront_do_request(struct vscsifrnt_info *info) -{ - struct vscsiif_front_ring *ring = &(info->ring); - int notify; + BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); + + memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); + + ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; + ring_req->timeout_per_command = sc->request->timeout / HZ; + + for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) + ring_req->seg[i] = shadow->seg[i]; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(info->irq); + + return 0; } -static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) +static void scsifront_gnttab_done(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) { - struct vscsifrnt_shadow *s = info->shadow[id]; int i; - if (s->sc->sc_data_direction == DMA_NONE) + if (shadow->sc->sc_data_direction == DMA_NONE) return; - for (i = 0; i < s->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { + for (i = 0; i < shadow->nr_grants; i++) { + if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(s->gref[i], 0, 0UL); + gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } - kfree(s->sg); + kfree(shadow->sg); } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, struct vscsiif_response *ring_rsp) { + struct vscsifrnt_shadow *shadow; struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_rsp->rqid; - sc = info->shadow[id]->sc; + shadow = info->shadow[id]; + sc = shadow->sc; BUG_ON(sc == NULL); - scsifront_gnttab_done(info, id); + scsifront_gnttab_done(info, shadow); scsifront_put_rqid(info, id); sc->result = ring_rsp->rslt; @@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info) static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, - struct vscsiif_request *ring_req, struct vscsifrnt_shadow *shadow) { grant_ref_t gref_head; @@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info, struct scatterlist *sg; struct scsiif_request_segment *seg; - ring_req->nr_segments = 0; if (sc->sc_data_direction == DMA_NONE || !data_len) return 0; @@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info, if (!shadow->sg) return -ENOMEM; } - seg = shadow->sg ? : ring_req->seg; + seg = shadow->sg ? : shadow->seg; err = gnttab_alloc_grant_references(seg_grants + data_grants, &gref_head); @@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info, info->dev->otherend_id, xen_page_to_gfn(page), 1); shadow->gref[ref_cnt] = ref; - ring_req->seg[ref_cnt].gref = ref; - ring_req->seg[ref_cnt].offset = (uint16_t)off; - ring_req->seg[ref_cnt].length = (uint16_t)bytes; + shadow->seg[ref_cnt].gref = ref; + shadow->seg[ref_cnt].offset = (uint16_t)off; + shadow->seg[ref_cnt].length = (uint16_t)bytes; page++; len -= bytes; @@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info, } if (seg_grants) - ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; else - ring_req->nr_segments = (uint8_t)ref_cnt; + shadow->nr_segments = (uint8_t)ref_cnt; shadow->nr_grants = ref_cnt; return 0; } -static struct vscsiif_request *scsifront_command2ring( - struct vscsifrnt_info *info, struct scsi_cmnd *sc, - struct vscsifrnt_shadow *shadow) -{ - struct vscsiif_request *ring_req; - - memset(shadow, 0, sizeof(*shadow)); - - ring_req = scsifront_pre_req(info); - if (!ring_req) - return NULL; - - info->shadow[ring_req->rqid] = shadow; - shadow->rqid = ring_req->rqid; - - ring_req->id = sc->device->id; - ring_req->lun = sc->device->lun; - ring_req->channel = sc->device->channel; - ring_req->cmd_len = sc->cmd_len; - - BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); - - memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); - - ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; - ring_req->timeout_per_command = sc->request->timeout / HZ; - - return ring_req; -} - static int scsifront_enter(struct vscsifrnt_info *info) { if (info->pause) @@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct vscsifrnt_info *info = shost_priv(shost); - struct vscsiif_request *ring_req; struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); unsigned long flags; int err; - uint16_t rqid; + + sc->result = 0; + memset(shadow, 0, sizeof(*shadow)); + + shadow->sc = sc; + shadow->act = VSCSIIF_ACT_SCSI_CDB; spin_lock_irqsave(shost->host_lock, flags); if (scsifront_enter(info)) { spin_unlock_irqrestore(shost->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } - if (RING_FULL(&info->ring)) - goto busy; - - ring_req = scsifront_command2ring(info, sc, shadow); - if (!ring_req) - goto busy; - - sc->result = 0; - - rqid = ring_req->rqid; - ring_req->act = VSCSIIF_ACT_SCSI_CDB; - shadow->sc = sc; - shadow->act = VSCSIIF_ACT_SCSI_CDB; - - err = map_data_for_request(info, sc, ring_req, shadow); + err = map_data_for_request(info, sc, shadow); if (err < 0) { pr_debug("%s: err %d\n", __func__, err); - scsifront_put_rqid(info, rqid); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); if (err == -ENOMEM) @@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, return 0; } - scsifront_do_request(info); + if (scsifront_do_request(info, shadow)) { + scsifront_gnttab_done(info, shadow); + goto busy; + } + scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); @@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = shost_priv(host); struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); - struct vscsiif_request *ring_req; int err = 0; - shadow = kmalloc(sizeof(*shadow), GFP_NOIO); + shadow = kzalloc(sizeof(*shadow), GFP_NOIO); if (!shadow) return FAILED; + shadow->act = act; + shadow->rslt_reset = RSLT_RESET_WAITING; + shadow->sc = sc; + shadow->ref_rqid = s->rqid; + init_waitqueue_head(&shadow->wq_reset); + spin_lock_irq(host->host_lock); for (;;) { - if (!RING_FULL(&info->ring)) { - ring_req = scsifront_command2ring(info, sc, shadow); - if (ring_req) - break; - } - if (err || info->pause) { - spin_unlock_irq(host->host_lock); - kfree(shadow); - return FAILED; - } + if (scsifront_enter(info)) + goto fail; + + if (!scsifront_do_request(info, shadow)) + break; + + scsifront_return(info); + if (err) + goto fail; info->wait_ring_available = 1; spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->wq_sync, @@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) spin_lock_irq(host->host_lock); } - if (scsifront_enter(info)) { - spin_unlock_irq(host->host_lock); - return FAILED; - } - - ring_req->act = act; - ring_req->ref_rqid = s->rqid; - - shadow->act = act; - shadow->rslt_reset = RSLT_RESET_WAITING; - init_waitqueue_head(&shadow->wq_reset); - - ring_req->nr_segments = 0; - - scsifront_do_request(info); - spin_unlock_irq(host->host_lock); err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); spin_lock_irq(host->host_lock); @@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) scsifront_return(info); spin_unlock_irq(host->host_lock); return err; + +fail: + spin_unlock_irq(host->host_lock); + kfree(shadow); + return FAILED; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) @@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev, struct vscsifrnt_info *info) { unsigned int sg_grant, nr_segs; - int ret; struct Scsi_Host *host = info->host; - ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u", - &sg_grant); - if (ret != 1) - sg_grant = 0; + sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0); nr_segs = min_t(unsigned int, sg_grant, SG_ALL); nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); nr_segs = min_t(unsigned int, nr_segs, diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 0567d51..d0115a7 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -633,7 +633,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct xenfb_info *info = dev_get_drvdata(&dev->dev); - int val; switch (backend_state) { case XenbusStateInitialising: @@ -657,16 +656,12 @@ InitWait: if (dev->state != XenbusStateConnected) goto InitWait; /* no InitWait seen yet, fudge it */ - if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "request-update", "%d", &val) < 0) - val = 0; - if (val) + if (xenbus_read_unsigned(info->xbdev->otherend, + "request-update", 0)) info->update_wanted = 1; - if (xenbus_scanf(XBT_NIL, dev->otherend, - "feature-resize", "%d", &val) < 0) - val = 0; - info->feature_resize = val; + info->feature_resize = xenbus_read_unsigned(dev->otherend, + "feature-resize", 0); break; case XenbusStateClosed: diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index e4db19e..db107fa 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -180,7 +180,6 @@ static void __balloon_append(struct page *page) static void balloon_append(struct page *page) { __balloon_append(page); - adjust_managed_page_count(page, -1); } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ @@ -201,8 +200,6 @@ static struct page *balloon_retrieve(bool require_lowmem) else balloon_stats.balloon_low--; - adjust_managed_page_count(page, 1); - return page; } @@ -478,7 +475,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) #endif /* Relinquish the page back to the allocator. */ - __free_reserved_page(page); + free_reserved_page(page); } balloon_stats.current_pages += rc; @@ -509,6 +506,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) state = BP_EAGAIN; break; } + adjust_managed_page_count(page, -1); scrub_page(page); list_add(&page->lru, &pages); } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index adc19ce..fd8e872 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -947,7 +947,7 @@ static int find_virq(unsigned int virq, unsigned int cpu) continue; if (status.status != EVTCHNSTAT_virq) continue; - if (status.u.virq == virq && status.vcpu == cpu) { + if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { rc = port; break; } diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 7a47c4c..1bf55a3 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -127,18 +127,21 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, struct gntalloc_gref *gref, *next; readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); - rc = -ENOMEM; for (i = 0; i < op->count; i++) { gref = kzalloc(sizeof(*gref), GFP_KERNEL); - if (!gref) + if (!gref) { + rc = -ENOMEM; goto undo; + } list_add_tail(&gref->next_gref, &queue_gref); list_add_tail(&gref->next_file, &queue_file); gref->users = 1; gref->file_index = op->index + i * PAGE_SIZE; gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO); - if (!gref->page) + if (!gref->page) { + rc = -ENOMEM; goto undo; + } /* Grant foreign access to the page. */ rc = gnttab_grant_foreign_access(op->domid, diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index bb95212..2ef2b61 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -1007,7 +1007,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; if (use_ptemod) vma->vm_flags |= VM_DONTCOPY; diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index b59c9455..112ce42 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -125,8 +125,4 @@ static struct pci_driver platform_driver = { .id_table = platform_pci_tbl, }; -static int __init platform_pci_init(void) -{ - return pci_register_driver(&platform_driver); -} -device_initcall(platform_pci_init); +builtin_pci_driver(platform_driver); diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 5ce878c..3f0aee0 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -362,7 +362,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) int err = 0; int num_devs; int domain, bus, slot, func; - int substate; + unsigned int substate; int i, len; char state_str[64]; char dev_str[64]; @@ -395,10 +395,8 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) "configuration"); goto out; } - err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str, - "%d", &substate); - if (err != 1) - substate = XenbusStateUnknown; + substate = xenbus_read_unsigned(pdev->xdev->nodename, state_str, + XenbusStateUnknown); switch (substate) { case XenbusStateInitialising: diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 1e8be12..6c0ead4 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -538,6 +538,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); + filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ + u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 33a31cf..4bdf654 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -702,7 +702,7 @@ device_initcall(xenbus_probe_initcall); */ static int __init xenstored_local_init(void) { - int err = 0; + int err = -ENOMEM; unsigned long page = 0; struct evtchn_alloc_unbound alloc_unbound; @@ -826,7 +826,7 @@ static int __init xenbus_init(void) * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ - proc_mkdir("xen", NULL); + proc_create_mount_point("xen"); #endif out_error: diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index 04f7f85..37929df 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -224,13 +224,7 @@ static int read_frontend_details(struct xenbus_device *xendev) int xenbus_dev_is_online(struct xenbus_device *dev) { - int rc, val; - - rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); - if (rc != 1) - val = 0; /* no online node present */ - - return val; + return !!xenbus_read_unsigned(dev->nodename, "online", 0); } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 22f7cd7..6afb993 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -559,6 +559,21 @@ int xenbus_scanf(struct xenbus_transaction t, } EXPORT_SYMBOL_GPL(xenbus_scanf); +/* Read an (optional) unsigned value. */ +unsigned int xenbus_read_unsigned(const char *dir, const char *node, + unsigned int default_val) +{ + unsigned int val; + int ret; + + ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val); + if (ret <= 0) + val = default_val; + + return val; +} +EXPORT_SYMBOL_GPL(xenbus_read_unsigned); + /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) @@ -672,7 +687,7 @@ static bool xen_strict_xenbus_quirk(void) } static void xs_reset_watches(void) { - int err, supported = 0; + int err; if (!xen_hvm_domain() || xen_initial_domain()) return; @@ -680,9 +695,8 @@ static void xs_reset_watches(void) if (xen_strict_xenbus_quirk()) return; - err = xenbus_scanf(XBT_NIL, "control", - "platform-feature-xs_reset_watches", "%d", &supported); - if (err != 1 || !supported) + if (!xenbus_read_unsigned("control", + "platform-feature-xs_reset_watches", 0)) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 5f2dc20..7eb3cef 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -479,6 +479,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name) } return ent; } +EXPORT_SYMBOL(proc_create_mount_point); struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent, diff --git a/fs/proc/internal.h b/fs/proc/internal.h index bbba5d2..109876a2 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -195,7 +195,6 @@ static inline bool is_empty_pde(const struct proc_dir_entry *pde) { return S_ISDIR(pde->mode) && !pde->proc_iops; } -struct proc_dir_entry *proc_create_mount_point(const char *name); /* * inode.c diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 368c7ad..2d2bf59 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, struct proc_dir_entry *, void *); extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, struct proc_dir_entry *); +struct proc_dir_entry *proc_create_mount_point(const char *name); extern struct proc_dir_entry *proc_create_data(const char *, umode_t, struct proc_dir_entry *, @@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) { return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} +static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h new file mode 100644 index 0000000..9d874db --- /dev/null +++ b/include/xen/arm/hypercall.h @@ -0,0 +1,87 @@ +/****************************************************************************** + * hypercall.h + * + * Linux-specific hypervisor handling. + * + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _ASM_ARM_XEN_HYPERCALL_H +#define _ASM_ARM_XEN_HYPERCALL_H + +#include <linux/bug.h> + +#include <xen/interface/xen.h> +#include <xen/interface/sched.h> +#include <xen/interface/platform.h> + +long privcmd_call(unsigned call, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5); +int HYPERVISOR_xen_version(int cmd, void *arg); +int HYPERVISOR_console_io(int cmd, int count, char *str); +int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); +int HYPERVISOR_sched_op(int cmd, void *arg); +int HYPERVISOR_event_channel_op(int cmd, void *arg); +unsigned long HYPERVISOR_hvm_op(int op, void *arg); +int HYPERVISOR_memory_op(unsigned int cmd, void *arg); +int HYPERVISOR_physdev_op(int cmd, void *arg); +int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); +int HYPERVISOR_tmem_op(void *arg); +int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); +int HYPERVISOR_platform_op_raw(void *arg); +static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) +{ + op->interface_version = XENPF_INTERFACE_VERSION; + return HYPERVISOR_platform_op_raw(op); +} +int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); + +static inline int +HYPERVISOR_suspend(unsigned long start_info_mfn) +{ + struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; + + /* start_info_mfn is unused on ARM */ + return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); +} + +static inline void +MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, + unsigned int new_val, unsigned long flags) +{ + BUG(); +} + +static inline void +MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, + int count, int *success_count, domid_t domid) +{ + BUG(); +} + +#endif /* _ASM_ARM_XEN_HYPERCALL_H */ diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h new file mode 100644 index 0000000..9525151 --- /dev/null +++ b/include/xen/arm/hypervisor.h @@ -0,0 +1,39 @@ +#ifndef _ASM_ARM_XEN_HYPERVISOR_H +#define _ASM_ARM_XEN_HYPERVISOR_H + +#include <linux/init.h> + +extern struct shared_info *HYPERVISOR_shared_info; +extern struct start_info *xen_start_info; + +/* Lazy mode for batching updates / context switch */ +enum paravirt_lazy_mode { + PARAVIRT_LAZY_NONE, + PARAVIRT_LAZY_MMU, + PARAVIRT_LAZY_CPU, +}; + +static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) +{ + return PARAVIRT_LAZY_NONE; +} + +extern struct dma_map_ops *xen_dma_ops; + +#ifdef CONFIG_XEN +void __init xen_early_init(void); +#else +static inline void xen_early_init(void) { return; } +#endif + +#ifdef CONFIG_HOTPLUG_CPU +static inline void xen_arch_register_cpu(int num) +{ +} + +static inline void xen_arch_unregister_cpu(int num) +{ +} +#endif + +#endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/include/xen/arm/interface.h b/include/xen/arm/interface.h new file mode 100644 index 0000000..75d5968 --- /dev/null +++ b/include/xen/arm/interface.h @@ -0,0 +1,85 @@ +/****************************************************************************** + * Guest OS interface to ARM Xen. + * + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 + */ + +#ifndef _ASM_ARM_XEN_INTERFACE_H +#define _ASM_ARM_XEN_INTERFACE_H + +#include <linux/types.h> + +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) + +#define __DEFINE_GUEST_HANDLE(name, type) \ + typedef struct { union { type *p; uint64_aligned_t q; }; } \ + __guest_handle_ ## name + +#define DEFINE_GUEST_HANDLE_STRUCT(name) \ + __DEFINE_GUEST_HANDLE(name, struct name) +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) +#define GUEST_HANDLE(name) __guest_handle_ ## name + +#define set_xen_guest_handle(hnd, val) \ + do { \ + if (sizeof(hnd) == 8) \ + *(uint64_t *)&(hnd) = 0; \ + (hnd).p = val; \ + } while (0) + +#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op + +#ifndef __ASSEMBLY__ +/* Explicitly size integers that represent pfns in the interface with + * Xen so that we can have one ABI that works for 32 and 64 bit guests. + * Note that this means that the xen_pfn_t type may be capable of + * representing pfn's which the guest cannot represent in its own pfn + * type. However since pfn space is controlled by the guest this is + * fine since it simply wouldn't be able to create any sure pfns in + * the first place. + */ +typedef uint64_t xen_pfn_t; +#define PRI_xen_pfn "llx" +typedef uint64_t xen_ulong_t; +#define PRI_xen_ulong "llx" +typedef int64_t xen_long_t; +#define PRI_xen_long "llx" +/* Guest handles for primitive C types. */ +__DEFINE_GUEST_HANDLE(uchar, unsigned char); +__DEFINE_GUEST_HANDLE(uint, unsigned int); +DEFINE_GUEST_HANDLE(char); +DEFINE_GUEST_HANDLE(int); +DEFINE_GUEST_HANDLE(void); +DEFINE_GUEST_HANDLE(uint64_t); +DEFINE_GUEST_HANDLE(uint32_t); +DEFINE_GUEST_HANDLE(xen_pfn_t); +DEFINE_GUEST_HANDLE(xen_ulong_t); + +/* Maximum number of virtual CPUs in multi-processor guests. */ +#define MAX_VIRT_CPUS 1 + +struct arch_vcpu_info { }; +struct arch_shared_info { }; + +/* TODO: Move pvclock definitions some place arch independent */ +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +/* It is OK to have a 12 bytes struct with no padding because it is packed */ +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; + u32 sec_hi; +} __attribute__((__packed__)); +#endif + +#endif /* _ASM_ARM_XEN_INTERFACE_H */ diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h new file mode 100644 index 0000000..95ce6ac --- /dev/null +++ b/include/xen/arm/page-coherent.h @@ -0,0 +1,98 @@ +#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H +#define _ASM_ARM_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-mapping.h> + +void __xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs); +void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); +void __xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir); + +void __xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir); + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) +{ + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) +{ + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + dma_addr_t dev_addr, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + unsigned long page_pfn = page_to_xen_pfn(page); + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); + unsigned long compound_pages = + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; + bool local = (page_pfn <= dev_pfn) && + (dev_pfn - page_pfn < compound_pages); + + /* + * Dom0 is mapped 1:1, while the Linux page can span across + * multiple Xen pages, it's not possible for it to contain a + * mix of local and foreign Xen pages. So if the first xen_pfn + * == mfn the page is local otherwise it's a foreign page + * grant-mapped in dom0. If the page is local we can safely + * call the native dma_ops function, otherwise we call the xen + * specific function. + */ + if (local) + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); + else + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + unsigned long pfn = PFN_DOWN(handle); + /* + * Dom0 is mapped 1:1, while the Linux page can be spanned accross + * multiple Xen page, it's not possible to have a mix of local and + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a + * foreign mfn will always return false. If the page is local we can + * safely call the native dma_ops function, otherwise we call the xen + * specific function. + */ + if (pfn_valid(pfn)) { + if (__generic_dma_ops(hwdev)->unmap_page) + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); + } else + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) { + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); + } else + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(handle); + if (pfn_valid(pfn)) { + if (__generic_dma_ops(hwdev)->sync_single_for_device) + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); + } else + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); +} + +#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h new file mode 100644 index 0000000..415dbc6 --- /dev/null +++ b/include/xen/arm/page.h @@ -0,0 +1,122 @@ +#ifndef _ASM_ARM_XEN_PAGE_H +#define _ASM_ARM_XEN_PAGE_H + +#include <asm/page.h> +#include <asm/pgtable.h> + +#include <linux/pfn.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> + +#include <xen/xen.h> +#include <xen/interface/grant_table.h> + +#define phys_to_machine_mapping_valid(pfn) (1) + +/* Xen machine address */ +typedef struct xmaddr { + phys_addr_t maddr; +} xmaddr_t; + +/* Xen pseudo-physical address */ +typedef struct xpaddr { + phys_addr_t paddr; +} xpaddr_t; + +#define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) +#define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) + +#define INVALID_P2M_ENTRY (~0UL) + +/* + * The pseudo-physical frame (pfn) used in all the helpers is always based + * on Xen page granularity (i.e 4KB). + * + * A Linux page may be split across multiple non-contiguous Xen page so we + * have to keep track with frame based on 4KB page granularity. + * + * PV drivers should never make a direct usage of those helpers (particularly + * pfn_to_gfn and gfn_to_pfn). + */ + +unsigned long __pfn_to_mfn(unsigned long pfn); +extern struct rb_root phys_to_mach; + +/* Pseudo-physical <-> Guest conversion */ +static inline unsigned long pfn_to_gfn(unsigned long pfn) +{ + return pfn; +} + +static inline unsigned long gfn_to_pfn(unsigned long gfn) +{ + return gfn; +} + +/* Pseudo-physical <-> BUS conversion */ +static inline unsigned long pfn_to_bfn(unsigned long pfn) +{ + unsigned long mfn; + + if (phys_to_mach.rb_node != NULL) { + mfn = __pfn_to_mfn(pfn); + if (mfn != INVALID_P2M_ENTRY) + return mfn; + } + + return pfn; +} + +static inline unsigned long bfn_to_pfn(unsigned long bfn) +{ + return bfn; +} + +#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) + +/* VIRT <-> GUEST conversion */ +#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) +#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) + +/* Only used in PV code. But ARM guests are always HVM. */ +static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) +{ + BUG(); +} + +/* TODO: this shouldn't be here but it is because the frontend drivers + * are using it (its rolled in headers) even though we won't hit the code path. + * So for right now just punt with this. + */ +static inline pte_t *lookup_address(unsigned long address, unsigned int *level) +{ + BUG(); + return NULL; +} + +extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + struct gnttab_map_grant_ref *kmap_ops, + struct page **pages, unsigned int count); + +extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, + struct gnttab_unmap_grant_ref *kunmap_ops, + struct page **pages, unsigned int count); + +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, + unsigned long nr_pages); + +static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + return __set_phys_to_machine(pfn, mfn); +} + +#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) +#define xen_unmap(cookie) iounmap((cookie)) + +bool xen_arch_need_swiotlb(struct device *dev, + phys_addr_t phys, + dma_addr_t dev_addr); +unsigned long xen_get_swiotlb_free_pages(unsigned int order); + +#endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 32b944b..271ba62 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -151,6 +151,10 @@ __scanf(4, 5) int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...); +/* Read an (optional) unsigned value. */ +unsigned int xenbus_read_unsigned(const char *dir, const char *node, + unsigned int default_val); + /* Single printf and write: returns -errno or 0. */ __printf(4, 5) int xenbus_printf(struct xenbus_transaction t, |