summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/include/xen/xen-os.h31
-rw-r--r--sys/i386/include/xen/xen-os.h27
-rw-r--r--sys/xen/hvm.h94
-rw-r--r--sys/xen/interface/arch-arm.h176
-rw-r--r--sys/xen/interface/arch-arm/hvm/save.h39
-rw-r--r--sys/xen/interface/arch-ia64.h20
-rw-r--r--sys/xen/interface/arch-ia64/debug_op.h99
-rw-r--r--sys/xen/interface/arch-ia64/hvm/memmap.h91
-rw-r--r--sys/xen/interface/arch-ia64/hvm/save.h208
-rw-r--r--sys/xen/interface/arch-ia64/sioemu.h92
-rw-r--r--sys/xen/interface/arch-x86/cpuid.h2
-rw-r--r--sys/xen/interface/arch-x86/hvm/save.h194
-rw-r--r--sys/xen/interface/arch-x86/xen-mca.h207
-rw-r--r--sys/xen/interface/arch-x86/xen-x86_32.h18
-rw-r--r--sys/xen/interface/arch-x86/xen-x86_64.h12
-rw-r--r--sys/xen/interface/arch-x86/xen.h54
-rw-r--r--sys/xen/interface/arch-x86_32.h2
-rw-r--r--sys/xen/interface/arch-x86_64.h16
-rw-r--r--sys/xen/interface/domctl.h476
-rw-r--r--sys/xen/interface/elfnote.h32
-rw-r--r--sys/xen/interface/event_channel.h84
-rw-r--r--sys/xen/interface/features.h21
-rw-r--r--sys/xen/interface/grant_table.h295
-rw-r--r--sys/xen/interface/hvm/hvm_info_table.h33
-rw-r--r--sys/xen/interface/hvm/hvm_op.h160
-rw-r--r--sys/xen/interface/hvm/ioreq.h63
-rw-r--r--sys/xen/interface/hvm/params.h80
-rw-r--r--sys/xen/interface/hvm/save.h29
-rw-r--r--sys/xen/interface/hvm/vmx_assist.h122
-rw-r--r--sys/xen/interface/io/blkif.h4
-rw-r--r--sys/xen/interface/io/fsif.h192
-rw-r--r--sys/xen/interface/io/libxenvchan.h97
-rw-r--r--sys/xen/interface/io/netif.h2
-rw-r--r--sys/xen/interface/io/pciif.h37
-rw-r--r--sys/xen/interface/io/protocols.h6
-rw-r--r--sys/xen/interface/io/ring.h22
-rw-r--r--sys/xen/interface/io/usbif.h151
-rw-r--r--sys/xen/interface/io/vscsiif.h105
-rw-r--r--sys/xen/interface/io/xs_wire.h6
-rw-r--r--sys/xen/interface/kexec.h21
-rw-r--r--sys/xen/interface/mem_event.h80
-rw-r--r--sys/xen/interface/memory.h148
-rw-r--r--sys/xen/interface/nmi.h2
-rw-r--r--sys/xen/interface/physdev.h135
-rw-r--r--sys/xen/interface/platform.h203
-rw-r--r--sys/xen/interface/sched.h24
-rw-r--r--sys/xen/interface/sysctl.h409
-rw-r--r--sys/xen/interface/tmem.h148
-rw-r--r--sys/xen/interface/trace.h58
-rw-r--r--sys/xen/interface/vcpu.h39
-rw-r--r--sys/xen/interface/version.h3
-rw-r--r--sys/xen/interface/xen-compat.h2
-rw-r--r--sys/xen/interface/xen.h234
-rw-r--r--sys/xen/interface/xenoprof.h18
-rw-r--r--sys/xen/interface/xsm/flask_op.h193
-rw-r--r--sys/xen/xenstore/xenstore.c1
56 files changed, 4568 insertions, 549 deletions
diff --git a/sys/amd64/include/xen/xen-os.h b/sys/amd64/include/xen/xen-os.h
index 6941558..8974323 100644
--- a/sys/amd64/include/xen/xen-os.h
+++ b/sys/amd64/include/xen/xen-os.h
@@ -13,16 +13,21 @@
#define CONFIG_X86_PAE
#endif
+#ifdef LOCORE
+#define __ASSEMBLY__
+#endif
+
#if !defined(__XEN_INTERFACE_VERSION__)
-/*
- * Can update to a more recent version when we implement
- * the hypercall page
- */
-#define __XEN_INTERFACE_VERSION__ 0x00030204
+#define __XEN_INTERFACE_VERSION__ 0x00030208
#endif
+#define GRANT_REF_INVALID 0xffffffff
+
#include <xen/interface/xen.h>
+/* Everything below this point is not included by assembler (.S) files. */
+#ifndef __ASSEMBLY__
+
/* Force a proper event-channel callback from Xen. */
void force_evtchn_callback(void);
@@ -43,10 +48,6 @@ static inline void rep_nop(void)
void *bootmem_alloc(unsigned int size);
void bootmem_free(void *ptr, unsigned int size);
-
-/* Everything below this point is not included by assembler (.S) files. */
-#ifndef __ASSEMBLY__
-
void printk(const char *fmt, ...);
/* some function prototypes */
@@ -128,14 +129,14 @@ do { \
#else
#endif
-#ifndef mb
-#define mb() __asm__ __volatile__("mfence":::"memory")
+#ifndef xen_mb
+#define xen_mb() mb()
#endif
-#ifndef rmb
-#define rmb() __asm__ __volatile__("lfence":::"memory");
+#ifndef xen_rmb
+#define xen_rmb() rmb()
#endif
-#ifndef wmb
-#define wmb() barrier()
+#ifndef xen_wmb
+#define xen_wmb() wmb()
#endif
#ifdef SMP
#define smp_mb() mb()
diff --git a/sys/i386/include/xen/xen-os.h b/sys/i386/include/xen/xen-os.h
index e90109d..257202e 100644
--- a/sys/i386/include/xen/xen-os.h
+++ b/sys/i386/include/xen/xen-os.h
@@ -12,16 +12,21 @@
#define CONFIG_X86_PAE
#endif
+#ifdef LOCORE
+#define __ASSEMBLY__
+#endif
+
#if !defined(__XEN_INTERFACE_VERSION__)
-/*
- * Can update to a more recent version when we implement
- * the hypercall page
- */
-#define __XEN_INTERFACE_VERSION__ 0x00030204
+#define __XEN_INTERFACE_VERSION__ 0x00030208
#endif
+#define GRANT_REF_INVALID 0xffffffff
+
#include <xen/interface/xen.h>
+/* Everything below this point is not included by assembler (.S) files. */
+#ifndef __ASSEMBLY__
+
/* Force a proper event-channel callback from Xen. */
void force_evtchn_callback(void);
@@ -85,9 +90,6 @@ static inline void rep_nop(void)
void *bootmem_alloc(unsigned int size);
void bootmem_free(void *ptr, unsigned int size);
-
-/* Everything below this point is not included by assembler (.S) files. */
-#ifndef __ASSEMBLY__
#include <sys/types.h>
void printk(const char *fmt, ...);
@@ -168,6 +170,15 @@ do { \
#endif
+#ifndef xen_mb
+#define xen_mb() mb()
+#endif
+#ifndef xen_rmb
+#define xen_rmb() rmb()
+#endif
+#ifndef xen_wmb
+#define xen_wmb() wmb()
+#endif
#ifdef SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
diff --git a/sys/xen/hvm.h b/sys/xen/hvm.h
new file mode 100644
index 0000000..338a107
--- /dev/null
+++ b/sys/xen/hvm.h
@@ -0,0 +1,94 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __XEN_HVM_H__
+#define __XEN_HVM_H__
+
+#include <xen/interface/hvm/params.h>
+
+/**
+ * \brief Wrapper function to obtain a HVM parameter value.
+ *
+ * \param index HVM parameter index; see <xen/interface/hvm/params.h>.
+ *
+ * \returns 0 on failure; the value of the parameter otherwise.
+ */
+static inline unsigned long
+hvm_get_parameter(int index)
+{
+ struct xen_hvm_param xhv;
+ int error;
+
+ xhv.domid = DOMID_SELF;
+ xhv.index = index;
+ error = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+ if (error) {
+ printf("%s: error %d trying to get %d\n", __func__,
+ error, index);
+ return (0);
+ }
+ return (xhv.value);
+}
+
+/** The callback method types for Hypervisor event delivery to our domain. */
+enum {
+ HVM_CB_TYPE_GSI,
+ HVM_CB_TYPE_PCI_INTX,
+ HVM_CB_TYPE_VECTOR,
+ HVM_CB_TYPE_MASK = 0xFF,
+ HVM_CB_TYPE_SHIFT = 56
+};
+
+/** Format for specifying a GSI type callback. */
+enum {
+ HVM_CB_GSI_GSI_MASK = 0xFFFFFFFF,
+ HVM_CB_GSI_GSI_SHIFT = 0
+};
+#define HVM_CALLBACK_GSI(gsi) \
+ (((uint64_t)HVM_CB_TYPE_GSI << HVM_CB_TYPE_SHIFT) \
+ | ((gsi) & HVM_CB_GSI_GSI_MASK) << HVM_CB_GSI_GSI_SHIFT)
+
+/** Format for specifying a virtual PCI interrupt line GSI style callback. */
+enum {
+ HVM_CB_PCI_INTX_INTPIN_MASK = 0x3,
+ HVM_CB_PCI_INTX_INTPIN_SHIFT = 0,
+ HVM_CB_PCI_INTX_SLOT_MASK = 0x1F,
+ HVM_CB_PCI_INTX_SLOT_SHIFT = 11,
+};
+#define HVM_CALLBACK_PCI_INTX(slot, pin) \
+ (((uint64_t)HVM_CB_TYPE_PCI_INTX << HVM_CB_TYPE_SHIFT) \
+ | (((slot) & HVM_CB_PCI_INTX_SLOT_MASK) << HVM_CB_PCI_INTX_SLOT_SHIFT) \
+ | (((pin) & HVM_CB_PCI_INTX_INTPIN_MASK) << HVM_CB_PCI_INTX_INTPIN_SHIFT))
+
+/** Format for specifying a direct IDT vector injection style callback. */
+enum {
+ HVM_CB_VECTOR_VECTOR_MASK = 0xFFFFFFFF,
+ HVM_CB_VECTOR_VECTOR_SHIFT = 0
+};
+#define HVM_CALLBACK_VECTOR(vector) \
+ (((uint64_t)HVM_CB_TYPE_VECTOR << HVM_CB_TYPE_SHIFT) \
+ | (((vector) & HVM_CB_GSI_GSI_MASK) << HVM_CB_GSI_GSI_SHIFT))
+
+void xen_hvm_set_callback(device_t);
+void xen_hvm_suspend(void);
+void xen_hvm_resume(void);
+#endif /* __XEN_HVM_H__ */
diff --git a/sys/xen/interface/arch-arm.h b/sys/xen/interface/arch-arm.h
new file mode 100644
index 0000000..f18bafa
--- /dev/null
+++ b/sys/xen/interface/arch-arm.h
@@ -0,0 +1,176 @@
+/******************************************************************************
+ * arch-arm.h
+ *
+ * Guest OS interface to ARM Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright 2011 (C) Citrix Systems
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_ARM_H__
+#define __XEN_PUBLIC_ARCH_ARM_H__
+
+/* hypercall calling convention
+ * ----------------------------
+ *
+ * A hypercall is issued using the ARM HVC instruction.
+ *
+ * A hypercall can take up to 5 arguments. These are passed in
+ * registers, the first argument in r0, the second argument in r1, the
+ * third in r2, the forth in r3 and the fifth in r4.
+ *
+ * The hypercall number is passed in r12.
+ *
+ * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG.
+ *
+ * The return value is in r0.
+ *
+ * The hypercall will clobber r12 and the argument registers used by
+ * that hypercall (except r0 which is the return value) i.e. a 2
+ * argument hypercall will clobber r1 and a 4 argument hypercall will
+ * clobber r1, r2 and r3.
+ *
+ */
+
+#define XEN_HYPERCALL_TAG 0XEA1
+
+
+#ifndef __ASSEMBLY__
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
+#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
+#ifdef __XEN_TOOLS__
+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
+#endif
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
+
+struct cpu_user_regs
+{
+ uint32_t r0;
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+ uint32_t r5;
+ uint32_t r6;
+ uint32_t r7;
+ uint32_t r8;
+ uint32_t r9;
+ uint32_t r10;
+ union {
+ uint32_t r11;
+ uint32_t fp;
+ };
+ uint32_t r12;
+
+ uint32_t sp; /* r13 - SP: Valid for Hyp. frames only, o/w banked (see below) */
+
+ /* r14 - LR: is the same physical register as LR_usr */
+ union {
+ uint32_t lr; /* r14 - LR: Valid for Hyp. Same physical register as lr_usr. */
+ uint32_t lr_usr;
+ };
+
+ uint32_t pc; /* Return IP */
+ uint32_t cpsr; /* Return mode */
+ uint32_t pad0; /* Doubleword-align the kernel half of the frame */
+
+ /* Outer guest frame only from here on... */
+
+ uint32_t r8_fiq, r9_fiq, r10_fiq, r11_fiq, r12_fiq;
+
+ uint32_t sp_usr; /* LR_usr is the same register as LR, see above */
+
+ uint32_t sp_svc, sp_abt, sp_und, sp_irq, sp_fiq;
+ uint32_t lr_svc, lr_abt, lr_und, lr_irq, lr_fiq;
+
+ uint32_t spsr_svc, spsr_abt, spsr_und, spsr_irq, spsr_fiq;
+
+ uint32_t pad1; /* Doubleword-align the user half of the frame */
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn PRIx64
+
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
+#define XEN_LEGACY_MAX_VCPUS 1
+
+typedef uint32_t xen_ulong_t;
+
+struct vcpu_guest_context {
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+
+ uint32_t sctlr;
+ uint32_t ttbr0, ttbr1, ttbcr;
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct arch_vcpu_info { };
+typedef struct arch_vcpu_info arch_vcpu_info_t;
+
+struct arch_shared_info { };
+typedef struct arch_shared_info arch_shared_info_t;
+typedef uint64_t xen_callback_t;
+
+#endif /* ifndef __ASSEMBLY __ */
+
+/* PSR bits (CPSR, SPSR)*/
+
+/* 0-4: Mode */
+#define PSR_MODE_MASK 0x1f
+#define PSR_MODE_USR 0x10
+#define PSR_MODE_FIQ 0x11
+#define PSR_MODE_IRQ 0x12
+#define PSR_MODE_SVC 0x13
+#define PSR_MODE_MON 0x16
+#define PSR_MODE_ABT 0x17
+#define PSR_MODE_HYP 0x1a
+#define PSR_MODE_UND 0x1b
+#define PSR_MODE_SYS 0x1f
+
+#define PSR_THUMB (1<<5) /* Thumb Mode enable */
+#define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */
+#define PSR_IRQ_MASK (1<<7) /* Interrupt mask */
+#define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */
+#define PSR_BIG_ENDIAN (1<<9) /* Big Endian Mode */
+#define PSR_JAZELLE (1<<24) /* Jazelle Mode */
+
+#endif /* __XEN_PUBLIC_ARCH_ARM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/arch-arm/hvm/save.h b/sys/xen/interface/arch-arm/hvm/save.h
new file mode 100644
index 0000000..ec61298
--- /dev/null
+++ b/sys/xen/interface/arch-arm/hvm/save.h
@@ -0,0 +1,39 @@
+/*
+ * Structure definitions for HVM state that is held by Xen and must
+ * be saved along with the domain's memory and device-model state.
+ *
+ * Copyright (c) 2012 Citrix Systems Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_SAVE_ARM_H__
+#define __XEN_PUBLIC_HVM_SAVE_ARM_H__
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/arch-ia64.h b/sys/xen/interface/arch-ia64.h
index 0c43cee..c9da5d4 100644
--- a/sys/xen/interface/arch-ia64.h
+++ b/sys/xen/interface/arch-ia64.h
@@ -49,10 +49,11 @@
#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
#define uint64_aligned_t uint64_t
-#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
+#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#ifndef __ASSEMBLY__
typedef unsigned long xen_pfn_t;
@@ -66,7 +67,7 @@ typedef unsigned long xen_pfn_t;
/* Maximum number of virtual CPUs in multi-processor guests. */
/* WARNING: before changing this, check that shared_info fits on a page */
-#define MAX_VIRT_CPUS 64
+#define XEN_LEGACY_MAX_VCPUS 64
/* IO ports location for PV. */
#define IO_PORTS_PADDR 0x00000ffffc000000UL
@@ -198,6 +199,15 @@ struct mapped_regs {
unsigned long rrs[8]; // region registers
unsigned long krs[8]; // kernel registers
unsigned long tmp[16]; // temp registers (e.g. for hyperprivops)
+
+ /* itc paravirtualization
+ * vAR.ITC = mAR.ITC + itc_offset
+ * itc_last is one which was lastly passed to
+ * the guest OS in order to prevent it from
+ * going backwords.
+ */
+ unsigned long itc_offset;
+ unsigned long itc_last;
};
};
};
@@ -392,6 +402,7 @@ struct vcpu_guest_context {
#define VGCF_EXTRA_REGS (1UL << 1) /* Set extra regs. */
#define VGCF_SET_CR_IRR (1UL << 2) /* Set cr_irr[0:3]. */
#define VGCF_online (1UL << 3) /* make this vcpu online */
+#define VGCF_SET_AR_ITC (1UL << 4) /* set pv ar.itc. itc_offset, itc_last */
unsigned long flags; /* VGCF_* flags */
struct vcpu_guest_context_regs regs;
@@ -453,6 +464,11 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
/* unexpose the foreign domain's p2m table into privileged domain */
#define IA64_DOM0VP_unexpose_foreign_p2m 13
+/* get memmap_info and memmap. It is possible to map the page directly
+ by foreign page mapping, but there is a race between writer.
+ This hypercall avoids such race. */
+#define IA64_DOM0VP_get_memmap 14
+
// flags for page assignement to pseudo physical address space
#define _ASSIGN_readonly 0
#define ASSIGN_readonly (1UL << _ASSIGN_readonly)
diff --git a/sys/xen/interface/arch-ia64/debug_op.h b/sys/xen/interface/arch-ia64/debug_op.h
new file mode 100644
index 0000000..8f47358
--- /dev/null
+++ b/sys/xen/interface/arch-ia64/debug_op.h
@@ -0,0 +1,99 @@
+/******************************************************************************
+ * debug_op.h
+ *
+ * Copyright (c) 2007 Tristan Gingold <tgingold@free.fr>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_IA64_DEBUG_OP_H__
+#define __XEN_PUBLIC_IA64_DEBUG_OP_H__
+
+/* Set/Get extra conditions to break. */
+#define XEN_IA64_DEBUG_OP_SET_FLAGS 1
+#define XEN_IA64_DEBUG_OP_GET_FLAGS 2
+
+/* Break on kernel single step. */
+#define XEN_IA64_DEBUG_ON_KERN_SSTEP (1 << 0)
+
+/* Break on kernel debug (breakpoint or watch point). */
+#define XEN_IA64_DEBUG_ON_KERN_DEBUG (1 << 1)
+
+/* Break on kernel taken branch. */
+#define XEN_IA64_DEBUG_ON_KERN_TBRANCH (1 << 2)
+
+/* Break on interrupt injection. */
+#define XEN_IA64_DEBUG_ON_EXTINT (1 << 3)
+
+/* Break on interrupt injection. */
+#define XEN_IA64_DEBUG_ON_EXCEPT (1 << 4)
+
+/* Break on event injection. */
+#define XEN_IA64_DEBUG_ON_EVENT (1 << 5)
+
+/* Break on privop/virtualized instruction (slow path only). */
+#define XEN_IA64_DEBUG_ON_PRIVOP (1 << 6)
+
+/* Break on emulated PAL call (at entry). */
+#define XEN_IA64_DEBUG_ON_PAL (1 << 7)
+
+/* Break on emulated SAL call (at entry). */
+#define XEN_IA64_DEBUG_ON_SAL (1 << 8)
+
+/* Break on emulated EFI call (at entry). */
+#define XEN_IA64_DEBUG_ON_EFI (1 << 9)
+
+/* Break on rfi emulation (slow path only, before exec). */
+#define XEN_IA64_DEBUG_ON_RFI (1 << 10)
+
+/* Break on address translation switch. */
+#define XEN_IA64_DEBUG_ON_MMU (1 << 11)
+
+/* Break on bad guest physical address. */
+#define XEN_IA64_DEBUG_ON_BAD_MPA (1 << 12)
+
+/* Force psr.ss bit. */
+#define XEN_IA64_DEBUG_FORCE_SS (1 << 13)
+
+/* Force psr.db bit. */
+#define XEN_IA64_DEBUG_FORCE_DB (1 << 14)
+
+/* Break on ITR/PTR. */
+#define XEN_IA64_DEBUG_ON_TR (1 << 15)
+
+/* Break on ITC/PTC.L/PTC.G/PTC.GA. */
+#define XEN_IA64_DEBUG_ON_TC (1 << 16)
+
+/* Get translation cache. */
+#define XEN_IA64_DEBUG_OP_GET_TC 3
+
+/* Translate virtual address to guest physical address. */
+#define XEN_IA64_DEBUG_OP_TRANSLATE 4
+
+union xen_ia64_debug_op {
+ uint64_t flags;
+ struct xen_ia64_debug_vtlb {
+ uint64_t nbr; /* IN/OUT */
+ XEN_GUEST_HANDLE_64(ia64_tr_entry_t) tr; /* IN/OUT */
+ } vtlb;
+};
+typedef union xen_ia64_debug_op xen_ia64_debug_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_ia64_debug_op_t);
+
+#endif /* __XEN_PUBLIC_IA64_DEBUG_OP_H__ */
diff --git a/sys/xen/interface/arch-ia64/hvm/memmap.h b/sys/xen/interface/arch-ia64/hvm/memmap.h
new file mode 100644
index 0000000..68d14f3
--- /dev/null
+++ b/sys/xen/interface/arch-ia64/hvm/memmap.h
@@ -0,0 +1,91 @@
+/******************************************************************************
+ * memmap.h
+ *
+ * Copyright (c) 2008 Tristan Gingold <tgingold AT free fr>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_MEMMAP_IA64_H__
+#define __XEN_PUBLIC_HVM_MEMMAP_IA64_H__
+
+#define MEM_G (1UL << 30)
+#define MEM_M (1UL << 20)
+#define MEM_K (1UL << 10)
+
+/* Guest physical address of IO ports space. */
+#define MMIO_START (3 * MEM_G)
+#define MMIO_SIZE (512 * MEM_M)
+
+#define VGA_IO_START 0xA0000UL
+#define VGA_IO_SIZE 0x20000
+
+#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
+#define LEGACY_IO_SIZE (64 * MEM_M)
+
+#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
+#define IO_PAGE_SIZE XEN_PAGE_SIZE
+
+#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
+#define STORE_PAGE_SIZE XEN_PAGE_SIZE
+
+#define BUFFER_IO_PAGE_START (STORE_PAGE_START + STORE_PAGE_SIZE)
+#define BUFFER_IO_PAGE_SIZE XEN_PAGE_SIZE
+
+#define BUFFER_PIO_PAGE_START (BUFFER_IO_PAGE_START + BUFFER_IO_PAGE_SIZE)
+#define BUFFER_PIO_PAGE_SIZE XEN_PAGE_SIZE
+
+#define IO_SAPIC_START 0xfec00000UL
+#define IO_SAPIC_SIZE 0x100000
+
+#define PIB_START 0xfee00000UL
+#define PIB_SIZE 0x200000
+
+#define GFW_START (4 * MEM_G - 16 * MEM_M)
+#define GFW_SIZE (16 * MEM_M)
+
+/* domVTI */
+#define GPFN_FRAME_BUFFER 0x1 /* VGA framebuffer */
+#define GPFN_LOW_MMIO 0x2 /* Low MMIO range */
+#define GPFN_PIB 0x3 /* PIB base */
+#define GPFN_IOSAPIC 0x4 /* IOSAPIC base */
+#define GPFN_LEGACY_IO 0x5 /* Legacy I/O base */
+#define GPFN_HIGH_MMIO 0x6 /* High MMIO range */
+
+/* Nvram belongs to GFW memory space */
+#define NVRAM_SIZE (MEM_K * 64)
+#define NVRAM_START (GFW_START + 10 * MEM_M)
+
+#define NVRAM_VALID_SIG 0x4650494e45584948 /* "HIXENIPF" */
+struct nvram_save_addr {
+ unsigned long addr;
+ unsigned long signature;
+};
+
+#endif /* __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/arch-ia64/hvm/save.h b/sys/xen/interface/arch-ia64/hvm/save.h
new file mode 100644
index 0000000..c44e913
--- /dev/null
+++ b/sys/xen/interface/arch-ia64/hvm/save.h
@@ -0,0 +1,208 @@
+/******************************************************************************
+ * save_types.h
+ *
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_SAVE_IA64_H__
+#define __XEN_PUBLIC_HVM_SAVE_IA64_H__
+
+#include "../../hvm/save.h"
+#include "../../arch-ia64.h"
+
+/*
+ * Save/restore header: general info about the save file.
+ */
+
+/* x86 uses 0x54381286 */
+#define HVM_FILE_MAGIC 0x343641492f6e6558UL /* "Xen/IA64" */
+#define HVM_FILE_VERSION 0x0000000000000001UL
+
+struct hvm_save_header {
+ uint64_t magic; /* Must be HVM_FILE_MAGIC */
+ uint64_t version; /* File format version */
+ uint64_t changeset; /* Version of Xen that saved this file */
+ uint64_t cpuid[5]; /* CPUID[0x01][%eax] on the saving machine */
+};
+
+DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
+
+/*
+ * CPU
+ */
+struct hvm_hw_ia64_cpu {
+ uint64_t ipsr;
+};
+DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_ia64_cpu);
+
+/*
+ * CPU
+ */
+struct hvm_hw_ia64_vpd {
+ struct vpd vpd;
+};
+DECLARE_HVM_SAVE_TYPE(VPD, 3, struct hvm_hw_ia64_vpd);
+
+/*
+ * device dependency
+ * vacpi => viosapic => vlsapic
+ */
+/*
+ * vlsapic
+ */
+struct hvm_hw_ia64_vlsapic {
+ uint64_t insvc[4];
+ uint64_t vhpi; // ??? should this be saved in vpd
+ uint8_t xtp;
+ uint8_t pal_init_pending;
+ uint8_t pad[2];
+};
+DECLARE_HVM_SAVE_TYPE(VLSAPIC, 4, struct hvm_hw_ia64_vlsapic);
+/* set
+ * unconditionaly set v->arch.irq_new_peding = 1
+ * unconditionaly set v->arch.irq_new_condition = 0
+ */
+
+/*
+ * vtime
+ */
+/* itc, itm, itv are saved by arch vcpu context */
+struct hvm_hw_ia64_vtime {
+ uint64_t itc;
+ uint64_t itm;
+
+ uint64_t last_itc;
+ uint64_t pending;
+};
+DECLARE_HVM_SAVE_TYPE(VTIME, 5, struct hvm_hw_ia64_vtime);
+/*
+ * calculate v->vtm.vtm_offset
+ * ??? Or should vtm_offset be set by leave_hypervisor_tail()?
+ * start vtm_timer if necessary by vtm_set_itm().
+ * ??? Or should vtm_timer be set by leave_hypervisor_tail()?
+ *
+ * ??? or should be done by schedule_tail()
+ * => schedule_tail() should do.
+ */
+
+/*
+ * viosapic
+ */
+#define VIOSAPIC_NUM_PINS 48
+
+/* To share VT-d code which uses vioapic_redir_entry.
+ * Although on ia64 this is for vsapic, but we have to vioapic_redir_entry
+ * instead of viosapic_redir_entry.
+ */
+union vioapic_redir_entry
+{
+ uint64_t bits;
+ struct {
+ uint8_t vector;
+
+ uint8_t delivery_mode : 3;
+ uint8_t reserve1 : 1;
+ uint8_t delivery_status: 1;
+ uint8_t polarity : 1;
+ uint8_t reserve2 : 1;
+ uint8_t trig_mode : 1;
+
+ uint8_t mask : 1;
+ uint8_t reserve3 : 7;
+
+ uint8_t reserved[3];
+ uint16_t dest_id;
+ } fields;
+};
+
+struct hvm_hw_ia64_viosapic {
+ uint64_t irr;
+ uint64_t isr;
+ uint32_t ioregsel;
+ uint32_t pad;
+ uint64_t lowest_vcpu_id;
+ uint64_t base_address;
+ union vioapic_redir_entry redirtbl[VIOSAPIC_NUM_PINS];
+};
+DECLARE_HVM_SAVE_TYPE(VIOSAPIC, 6, struct hvm_hw_ia64_viosapic);
+
+/*
+ * vacpi
+ * PM timer
+ */
+struct vacpi_regs {
+ union {
+ struct {
+ uint32_t pm1a_sts:16;/* PM1a_EVT_BLK.PM1a_STS: status register */
+ uint32_t pm1a_en:16; /* PM1a_EVT_BLK.PM1a_EN: enable register */
+ };
+ uint32_t evt_blk;
+ };
+ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
+};
+
+struct hvm_hw_ia64_vacpi {
+ struct vacpi_regs regs;
+};
+DECLARE_HVM_SAVE_TYPE(VACPI, 7, struct hvm_hw_ia64_vacpi);
+/* update last_gtime and setup timer of struct vacpi */
+
+/*
+ * opt_feature: identity mapping of region 4, 5 and 7.
+ * With the c/s 16396:d2935f9c217f of xen-ia64-devel.hg,
+ * opt_feature hypercall supports only region 4,5,7 identity mappings.
+ * structure hvm_hw_ia64_identity_mappings only supports them.
+ * The new structure, struct hvm_hw_ia64_identity_mappings, is created to
+ * avoid to keep up with change of the xen/ia64 internal structure, struct
+ * opt_feature.
+ *
+ * If it is enhanced in the future, new structure will be created.
+ */
+struct hvm_hw_ia64_identity_mapping {
+ uint64_t on; /* on/off */
+ uint64_t pgprot; /* The page protection bit mask of the pte. */
+ uint64_t key; /* A protection key. */
+};
+
+struct hvm_hw_ia64_identity_mappings {
+ struct hvm_hw_ia64_identity_mapping im_reg4;/* Region 4 identity mapping */
+ struct hvm_hw_ia64_identity_mapping im_reg5;/* Region 5 identity mapping */
+ struct hvm_hw_ia64_identity_mapping im_reg7;/* Region 7 identity mapping */
+};
+DECLARE_HVM_SAVE_TYPE(OPT_FEATURE_IDENTITY_MAPPINGS, 8, struct hvm_hw_ia64_identity_mappings);
+
+/*
+ * Largest type-code in use
+ */
+#define HVM_SAVE_CODE_MAX 8
+
+#endif /* __XEN_PUBLIC_HVM_SAVE_IA64_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/arch-ia64/sioemu.h b/sys/xen/interface/arch-ia64/sioemu.h
new file mode 100644
index 0000000..d48da1a
--- /dev/null
+++ b/sys/xen/interface/arch-ia64/sioemu.h
@@ -0,0 +1,92 @@
+/******************************************************************************
+ * sioemu.h
+ *
+ * Copyright (c) 2008 Tristan Gingold <tgingold@free.fr>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_IA64_SIOEMU_H__
+#define __XEN_PUBLIC_IA64_SIOEMU_H__
+
+/* SIOEMU specific hypercalls.
+ The numbers are the minor part of FW_HYPERCALL_SIOEMU. */
+
+/* Defines the callback entry point. r8=ip, r9=data.
+ Must be called per-vcpu. */
+#define SIOEMU_HYPERCALL_SET_CALLBACK 0x01
+
+/* Finish sioemu fw initialization and start firmware. r8=ip. */
+#define SIOEMU_HYPERCALL_START_FW 0x02
+
+/* Add IO pages in physmap. */
+#define SIOEMU_HYPERCALL_ADD_IO_PHYSMAP 0x03
+
+/* Get wallclock time. */
+#define SIOEMU_HYPERCALL_GET_TIME 0x04
+
+/* Flush cache. */
+#define SIOEMU_HYPERCALL_FLUSH_CACHE 0x07
+
+/* Get freq base. */
+#define SIOEMU_HYPERCALL_FREQ_BASE 0x08
+
+/* Return from callback. */
+#define SIOEMU_HYPERCALL_CALLBACK_RETURN 0x09
+
+/* Deliver an interrupt. */
+#define SIOEMU_HYPERCALL_DELIVER_INT 0x0a
+
+/* SIOEMU callback reason. */
+
+/* An event (from event channel) has to be delivered. */
+#define SIOEMU_CB_EVENT 0x00
+
+/* Emulate an IO access. */
+#define SIOEMU_CB_IO_EMULATE 0x01
+
+/* An IPI is sent to a dead vcpu. */
+#define SIOEMU_CB_WAKEUP_VCPU 0x02
+
+/* A SAL hypercall is executed. */
+#define SIOEMU_CB_SAL_ASSIST 0x03
+
+#ifndef __ASSEMBLY__
+struct sioemu_callback_info {
+ /* Saved registers. */
+ unsigned long ip;
+ unsigned long psr;
+ unsigned long ifs;
+ unsigned long nats;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+
+ /* Callback parameters. */
+ unsigned long cause;
+ unsigned long arg0;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long _pad2[2];
+ unsigned long r2;
+};
+#endif /* __ASSEMBLY__ */
+#endif /* __XEN_PUBLIC_IA64_SIOEMU_H__ */
diff --git a/sys/xen/interface/arch-x86/cpuid.h b/sys/xen/interface/arch-x86/cpuid.h
index 34fd72d..d9bd627 100644
--- a/sys/xen/interface/arch-x86/cpuid.h
+++ b/sys/xen/interface/arch-x86/cpuid.h
@@ -24,7 +24,7 @@
* Copyright (c) 2007 Citrix Systems, Inc.
*
* Authors:
- * Keir Fraser <keir.fraser@citrix.com>
+ * Keir Fraser <keir@xen.org>
*/
#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
diff --git a/sys/xen/interface/arch-x86/hvm/save.h b/sys/xen/interface/arch-x86/hvm/save.h
index 73b3909..9ae645d 100644
--- a/sys/xen/interface/arch-x86/hvm/save.h
+++ b/sys/xen/interface/arch-x86/hvm/save.h
@@ -38,7 +38,7 @@ struct hvm_save_header {
uint32_t version; /* File format version */
uint64_t changeset; /* Version of Xen that saved this file */
uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
- uint32_t pad0;
+ uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */
};
DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
@@ -46,6 +46,8 @@ DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
/*
* Processor
+ *
+ * Compat: Pre-3.4 didn't have msr_tsc_aux
*/
struct hvm_hw_cpu {
@@ -123,9 +125,116 @@ struct hvm_hw_cpu {
uint32_t tr_arbytes;
uint32_t ldtr_arbytes;
- uint32_t sysenter_cs;
- uint32_t padding0;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+
+ /* msr for em64t */
+ uint64_t shadow_gs;
+
+ /* msr content saved/restored. */
+ uint64_t msr_flags;
+ uint64_t msr_lstar;
+ uint64_t msr_star;
+ uint64_t msr_cstar;
+ uint64_t msr_syscall_mask;
+ uint64_t msr_efer;
+ uint64_t msr_tsc_aux;
+
+ /* guest's idea of what rdtsc() would return */
+ uint64_t tsc;
+
+ /* pending event, if any */
+ union {
+ uint32_t pending_event;
+ struct {
+ uint8_t pending_vector:8;
+ uint8_t pending_type:3;
+ uint8_t pending_error_valid:1;
+ uint32_t pending_reserved:19;
+ uint8_t pending_valid:1;
+ };
+ };
+ /* error code for pending event */
+ uint32_t error_code;
+};
+
+struct hvm_hw_cpu_compat {
+ uint8_t fpu_regs[512];
+
+ uint64_t rax;
+ uint64_t rbx;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t rsp;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+
+ uint64_t rip;
+ uint64_t rflags;
+
+ uint64_t cr0;
+ uint64_t cr2;
+ uint64_t cr3;
+ uint64_t cr4;
+
+ uint64_t dr0;
+ uint64_t dr1;
+ uint64_t dr2;
+ uint64_t dr3;
+ uint64_t dr6;
+ uint64_t dr7;
+
+ uint32_t cs_sel;
+ uint32_t ds_sel;
+ uint32_t es_sel;
+ uint32_t fs_sel;
+ uint32_t gs_sel;
+ uint32_t ss_sel;
+ uint32_t tr_sel;
+ uint32_t ldtr_sel;
+
+ uint32_t cs_limit;
+ uint32_t ds_limit;
+ uint32_t es_limit;
+ uint32_t fs_limit;
+ uint32_t gs_limit;
+ uint32_t ss_limit;
+ uint32_t tr_limit;
+ uint32_t ldtr_limit;
+ uint32_t idtr_limit;
+ uint32_t gdtr_limit;
+
+ uint64_t cs_base;
+ uint64_t ds_base;
+ uint64_t es_base;
+ uint64_t fs_base;
+ uint64_t gs_base;
+ uint64_t ss_base;
+ uint64_t tr_base;
+ uint64_t ldtr_base;
+ uint64_t idtr_base;
+ uint64_t gdtr_base;
+ uint32_t cs_arbytes;
+ uint32_t ds_arbytes;
+ uint32_t es_arbytes;
+ uint32_t fs_arbytes;
+ uint32_t gs_arbytes;
+ uint32_t ss_arbytes;
+ uint32_t tr_arbytes;
+ uint32_t ldtr_arbytes;
+
+ uint64_t sysenter_cs;
uint64_t sysenter_esp;
uint64_t sysenter_eip;
@@ -139,6 +248,7 @@ struct hvm_hw_cpu {
uint64_t msr_cstar;
uint64_t msr_syscall_mask;
uint64_t msr_efer;
+ /*uint64_t msr_tsc_aux; COMPAT */
/* guest's idea of what rdtsc() would return */
uint64_t tsc;
@@ -158,8 +268,22 @@ struct hvm_hw_cpu {
uint32_t error_code;
};
-DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
+static inline int _hvm_hw_fix_cpu(void *h) {
+ struct hvm_hw_cpu *new=h;
+ struct hvm_hw_cpu_compat *old=h;
+
+ /* If we copy from the end backwards, we should
+ * be able to do the modification in-place */
+ new->error_code=old->error_code;
+ new->pending_event=old->pending_event;
+ new->tsc=old->tsc;
+ new->msr_tsc_aux=0;
+ return 0;
+}
+
+DECLARE_HVM_SAVE_TYPE_COMPAT(CPU, 2, struct hvm_hw_cpu, \
+ struct hvm_hw_cpu_compat, _hvm_hw_fix_cpu);
/*
* PIC
@@ -220,12 +344,7 @@ DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
* IO-APIC
*/
-#ifdef __ia64__
-#define VIOAPIC_IS_IOSAPIC 1
-#define VIOAPIC_NUM_PINS 24
-#else
#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
-#endif
struct hvm_hw_vioapic {
uint64_t base_address;
@@ -244,13 +363,8 @@ struct hvm_hw_vioapic {
uint8_t trig_mode:1;
uint8_t mask:1;
uint8_t reserve:7;
-#if !VIOAPIC_IS_IOSAPIC
uint8_t reserved[4];
uint8_t dest_id;
-#else
- uint8_t reserved[3];
- uint16_t dest_id;
-#endif
} fields;
} redirtbl[VIOAPIC_NUM_PINS];
};
@@ -266,6 +380,7 @@ struct hvm_hw_lapic {
uint64_t apic_base_msr;
uint32_t disabled; /* VLAPIC_xx_DISABLED */
uint32_t timer_divisor;
+ uint64_t tdt_msr;
};
DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
@@ -287,7 +402,7 @@ struct hvm_hw_pci_irqs {
* Indexed by: device*4 + INTx#.
*/
union {
- DECLARE_BITMAP(i, 32*4);
+ unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */
uint64_t pad[2];
};
};
@@ -300,7 +415,7 @@ struct hvm_hw_isa_irqs {
* Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
*/
union {
- DECLARE_BITMAP(i, 16);
+ unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */
uint64_t pad[1];
};
};
@@ -421,9 +536,54 @@ struct hvm_hw_mtrr {
DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
+/*
+ * The save area of XSAVE/XRSTOR.
+ */
+
+struct hvm_hw_cpu_xsave {
+ uint64_t xfeature_mask;
+ uint64_t xcr0; /* Updated by XSETBV */
+ uint64_t xcr0_accum; /* Updated by XSETBV */
+ struct {
+ struct { char x[512]; } fpu_sse;
+
+ struct {
+ uint64_t xstate_bv; /* Updated by XRSTOR */
+ uint64_t reserved[7];
+ } xsave_hdr; /* The 64-byte header */
+
+ struct { char x[0]; } ymm; /* YMM */
+ } save_area;
+};
+
+#define CPU_XSAVE_CODE 16
+
+/*
+ * Viridian hypervisor context.
+ */
+
+struct hvm_viridian_domain_context {
+ uint64_t hypercall_gpa;
+ uint64_t guest_os_id;
+};
+
+DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context);
+
+struct hvm_viridian_vcpu_context {
+ uint64_t apic_assist;
+};
+
+DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
+
+struct hvm_vmce_vcpu {
+ uint64_t caps;
+};
+
+DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu);
+
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 14
+#define HVM_SAVE_CODE_MAX 18
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
diff --git a/sys/xen/interface/arch-x86/xen-mca.h b/sys/xen/interface/arch-x86/xen-mca.h
index 103d41f..dca6b3e 100644
--- a/sys/xen/interface/arch-x86/xen-mca.h
+++ b/sys/xen/interface/arch-x86/xen-mca.h
@@ -56,13 +56,20 @@
/* Hypercall */
#define __HYPERVISOR_mca __HYPERVISOR_arch_0
-#define XEN_MCA_INTERFACE_VERSION 0x03000001
+/*
+ * The xen-unstable repo has interface version 0x03000001; out interface
+ * is incompatible with that and any future minor revisions, so we
+ * choose a different version number range that is numerically less
+ * than that used in xen-unstable.
+ */
+#define XEN_MCA_INTERFACE_VERSION 0x01ecc003
-/* IN: Dom0 calls hypercall from MC event handler. */
-#define XEN_MC_CORRECTABLE 0x0
-/* IN: Dom0/DomU calls hypercall from MC trap handler. */
-#define XEN_MC_TRAP 0x1
-/* XEN_MC_CORRECTABLE and XEN_MC_TRAP are mutually exclusive. */
+/* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */
+#define XEN_MC_NONURGENT 0x0001
+/* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */
+#define XEN_MC_URGENT 0x0002
+/* IN: Dom0 acknowledges previosly-fetched telemetry */
+#define XEN_MC_ACK 0x0004
/* OUT: All is ok */
#define XEN_MC_OK 0x0
@@ -97,6 +104,7 @@
#define MC_TYPE_GLOBAL 0
#define MC_TYPE_BANK 1
#define MC_TYPE_EXTENDED 2
+#define MC_TYPE_RECOVERY 3
struct mcinfo_common {
uint16_t type; /* structure type */
@@ -106,19 +114,24 @@ struct mcinfo_common {
#define MC_FLAG_CORRECTABLE (1 << 0)
#define MC_FLAG_UNCORRECTABLE (1 << 1)
-
+#define MC_FLAG_RECOVERABLE (1 << 2)
+#define MC_FLAG_POLLED (1 << 3)
+#define MC_FLAG_RESET (1 << 4)
+#define MC_FLAG_CMCI (1 << 5)
+#define MC_FLAG_MCE (1 << 6)
/* contains global x86 mc information */
struct mcinfo_global {
struct mcinfo_common common;
/* running domain at the time in error (most likely the impacted one) */
uint16_t mc_domid;
+ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
uint32_t mc_socketid; /* physical socket of the physical core */
uint16_t mc_coreid; /* physical impacted core */
uint16_t mc_core_threadid; /* core thread of physical core */
- uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
- uint64_t mc_gstatus; /* global status */
+ uint32_t mc_apicid;
uint32_t mc_flags;
+ uint64_t mc_gstatus; /* global status */
};
/* contains bank local x86 mc information */
@@ -132,6 +145,8 @@ struct mcinfo_bank {
uint64_t mc_addr; /* bank address, only valid
* if addr bit is set in mc_status */
uint64_t mc_misc;
+ uint64_t mc_ctrl2;
+ uint64_t mc_tsc;
};
@@ -150,20 +165,120 @@ struct mcinfo_extended {
* multiple times. */
uint32_t mc_msrs; /* Number of msr with valid values. */
- struct mcinfo_msr mc_msr[5];
+ /*
+ * Currently Intel extended MSR (32/64) include all gp registers
+ * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be
+ * useful at present. So expand this array to 16/32 to leave room.
+ */
+ struct mcinfo_msr mc_msr[sizeof(void *) * 4];
+};
+
+/* Recovery Action flags. Giving recovery result information to DOM0 */
+
+/* Xen takes successful recovery action, the error is recovered */
+#define REC_ACTION_RECOVERED (0x1 << 0)
+/* No action is performed by XEN */
+#define REC_ACTION_NONE (0x1 << 1)
+/* It's possible DOM0 might take action ownership in some case */
+#define REC_ACTION_NEED_RESET (0x1 << 2)
+
+/* Different Recovery Action types, if the action is performed successfully,
+ * REC_ACTION_RECOVERED flag will be returned.
+ */
+
+/* Page Offline Action */
+#define MC_ACTION_PAGE_OFFLINE (0x1 << 0)
+/* CPU offline Action */
+#define MC_ACTION_CPU_OFFLINE (0x1 << 1)
+/* L3 cache disable Action */
+#define MC_ACTION_CACHE_SHRINK (0x1 << 2)
+
+/* Below interface used between XEN/DOM0 for passing XEN's recovery action
+ * information to DOM0.
+ * usage Senario: After offlining broken page, XEN might pass its page offline
+ * recovery action result to DOM0. DOM0 will save the information in
+ * non-volatile memory for further proactive actions, such as offlining the
+ * easy broken page earlier when doing next reboot.
+*/
+struct page_offline_action
+{
+ /* Params for passing the offlined page number to DOM0 */
+ uint64_t mfn;
+ uint64_t status;
+};
+
+struct cpu_offline_action
+{
+ /* Params for passing the identity of the offlined CPU to DOM0 */
+ uint32_t mc_socketid;
+ uint16_t mc_coreid;
+ uint16_t mc_core_threadid;
+};
+
+#define MAX_UNION_SIZE 16
+struct mcinfo_recovery
+{
+ struct mcinfo_common common;
+ uint16_t mc_bank; /* bank nr */
+ uint8_t action_flags;
+ uint8_t action_types;
+ union {
+ struct page_offline_action page_retire;
+ struct cpu_offline_action cpu_offline;
+ uint8_t pad[MAX_UNION_SIZE];
+ } action_info;
};
+
#define MCINFO_HYPERCALLSIZE 1024
#define MCINFO_MAXSIZE 768
+#define MCINFO_FLAGS_UNCOMPLETE 0x1
struct mc_info {
/* Number of mcinfo_* entries in mi_data */
uint32_t mi_nentries;
-
- uint8_t mi_data[MCINFO_MAXSIZE - sizeof(uint32_t)];
+ uint32_t flags;
+ uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8];
};
typedef struct mc_info mc_info_t;
-
+DEFINE_XEN_GUEST_HANDLE(mc_info_t);
+
+#define __MC_MSR_ARRAYSIZE 8
+#define __MC_NMSRS 1
+#define MC_NCAPS 7 /* 7 CPU feature flag words */
+#define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */
+#define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */
+#define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */
+#define MC_CAPS_LINUX 3 /* Linux-defined */
+#define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */
+#define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */
+#define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */
+
+struct mcinfo_logical_cpu {
+ uint32_t mc_cpunr;
+ uint32_t mc_chipid;
+ uint16_t mc_coreid;
+ uint16_t mc_threadid;
+ uint32_t mc_apicid;
+ uint32_t mc_clusterid;
+ uint32_t mc_ncores;
+ uint32_t mc_ncores_active;
+ uint32_t mc_nthreads;
+ int32_t mc_cpuid_level;
+ uint32_t mc_family;
+ uint32_t mc_vendor;
+ uint32_t mc_model;
+ uint32_t mc_step;
+ char mc_vendorid[16];
+ char mc_brandid[64];
+ uint32_t mc_cpu_caps[MC_NCAPS];
+ uint32_t mc_cache_size;
+ uint32_t mc_cache_alignment;
+ int32_t mc_nmsrvals;
+ struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE];
+};
+typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t);
/*
@@ -181,12 +296,12 @@ typedef struct mc_info mc_info_t;
* struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
*/
#define x86_mcinfo_first(_mi) \
- (struct mcinfo_common *)((_mi)->mi_data)
+ ((struct mcinfo_common *)(_mi)->mi_data)
/* Prototype:
* struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
*/
#define x86_mcinfo_next(_mic) \
- (struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)
+ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size))
/* Prototype:
* void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
@@ -228,14 +343,15 @@ typedef struct mc_info mc_info_t;
#define XEN_MC_fetch 1
struct xen_mc_fetch {
/* IN/OUT variables. */
- uint32_t flags;
-
-/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
-/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */
+ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT,
+ XEN_MC_ACK if ack'ing an earlier fetch */
+ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED,
+ XEN_MC_NODATA, XEN_MC_NOMATCH */
+ uint32_t _pad0;
+ uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */
/* OUT variables. */
- uint32_t fetch_idx; /* only useful for Dom0 for the notify hypercall */
- struct mc_info mc_info;
+ XEN_GUEST_HANDLE(mc_info_t) data;
};
typedef struct xen_mc_fetch xen_mc_fetch_t;
DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t);
@@ -250,7 +366,6 @@ struct xen_mc_notifydomain {
uint16_t mc_domid; /* The unprivileged domain to notify. */
uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify.
* Usually echo'd value from the fetch hypercall. */
- uint32_t fetch_idx; /* echo'd value from the fetch hypercall. */
/* IN/OUT variables. */
uint32_t flags;
@@ -261,6 +376,47 @@ struct xen_mc_notifydomain {
typedef struct xen_mc_notifydomain xen_mc_notifydomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t);
+#define XEN_MC_physcpuinfo 3
+struct xen_mc_physcpuinfo {
+ /* IN/OUT */
+ uint32_t ncpus;
+ uint32_t _pad0;
+ /* OUT */
+ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info;
+};
+
+#define XEN_MC_msrinject 4
+#define MC_MSRINJ_MAXMSRS 8
+struct xen_mc_msrinject {
+ /* IN */
+ uint32_t mcinj_cpunr; /* target processor id */
+ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */
+ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */
+ uint32_t _pad0;
+ struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS];
+};
+
+/* Flags for mcinj_flags above; bits 16-31 are reserved */
+#define MC_MSRINJ_F_INTERPOSE 0x1
+
+#define XEN_MC_mceinject 5
+struct xen_mc_mceinject {
+ unsigned int mceinj_cpunr; /* target processor id */
+};
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#define XEN_MC_inject_v2 6
+#define XEN_MC_INJECT_TYPE_MASK 0x7
+#define XEN_MC_INJECT_TYPE_MCE 0x0
+#define XEN_MC_INJECT_TYPE_CMCI 0x1
+
+#define XEN_MC_INJECT_CPU_BROADCAST 0x8
+
+struct xen_mc_inject_v2 {
+ uint32_t flags;
+ struct xenctl_cpumap cpumap;
+};
+#endif
struct xen_mc {
uint32_t cmd;
@@ -268,7 +424,12 @@ struct xen_mc {
union {
struct xen_mc_fetch mc_fetch;
struct xen_mc_notifydomain mc_notifydomain;
- uint8_t pad[MCINFO_HYPERCALLSIZE];
+ struct xen_mc_physcpuinfo mc_physcpuinfo;
+ struct xen_mc_msrinject mc_msrinject;
+ struct xen_mc_mceinject mc_mceinject;
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+ struct xen_mc_inject_v2 mc_inject_v2;
+#endif
} u;
};
typedef struct xen_mc xen_mc_t;
diff --git a/sys/xen/interface/arch-x86/xen-x86_32.h b/sys/xen/interface/arch-x86/xen-x86_32.h
index 7a1db42..906e74a 100644
--- a/sys/xen/interface/arch-x86/xen-x86_32.h
+++ b/sys/xen/interface/arch-x86/xen-x86_32.h
@@ -24,30 +24,18 @@
* Copyright (c) 2004-2007, K A Fraser
*/
-#include <sys/param.h>
-#include <sys/types.h>
-
#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
/*
* Hypercall interface:
- * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
+ * Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6)
* Output: %eax
* Access is via hypercall page (set up by guest loader or via a Xen MSR):
* call hypercall_page + hypercall-number * 32
* Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
*/
-#if __XEN_INTERFACE_VERSION__ < 0x00030203
-/*
- * Legacy hypercall interface:
- * As above, except the entry sequence to the hypervisor is:
- * mov $hypercall-number*32,%eax ; int $0x82
- */
-#define TRAP_INSTR "int $0x82"
-#endif
-
/*
* These flat segments are in the Xen-private section of every GDT. Since these
* are also present in the initial GDT, many OSes will be able to avoid
@@ -111,8 +99,8 @@
__guest_handle_ ## name; \
typedef struct { union { type *p; uint64_aligned_t q; }; } \
__guest_handle_64_ ## name
-#undef set_xen_guest_handle
-#define set_xen_guest_handle(hnd, val) \
+#undef set_xen_guest_handle_raw
+#define set_xen_guest_handle_raw(hnd, val) \
do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
(hnd).p = val; \
} while ( 0 )
diff --git a/sys/xen/interface/arch-x86/xen-x86_64.h b/sys/xen/interface/arch-x86/xen-x86_64.h
index 1e54cf9..2654d24 100644
--- a/sys/xen/interface/arch-x86/xen-x86_64.h
+++ b/sys/xen/interface/arch-x86/xen-x86_64.h
@@ -29,23 +29,13 @@
/*
* Hypercall interface:
- * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
+ * Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6)
* Output: %rax
* Access is via hypercall page (set up by guest loader or via a Xen MSR):
* call hypercall_page + hypercall-number * 32
* Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
*/
-#if __XEN_INTERFACE_VERSION__ < 0x00030203
-/*
- * Legacy hypercall interface:
- * As above, except the entry sequence to the hypervisor is:
- * mov $hypercall-number*32,%eax ; syscall
- * Clobbered: %rcx, %r11, argument registers (as above)
- */
-#define TRAP_INSTR "syscall"
-#endif
-
/*
* 64-bit segment selectors
* These flat segments are in the Xen-private section of every GDT. Since these
diff --git a/sys/xen/interface/arch-x86/xen.h b/sys/xen/interface/arch-x86/xen.h
index 2c878ef..1c186d7 100644
--- a/sys/xen/interface/arch-x86/xen.h
+++ b/sys/xen/interface/arch-x86/xen.h
@@ -24,6 +24,8 @@
* Copyright (c) 2004-2006, K A Fraser
*/
+#include "../xen.h"
+
#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
#define __XEN_PUBLIC_ARCH_X86_XEN_H__
@@ -32,8 +34,7 @@
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
#else
-#error "using old handle"
-#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef type * __guest_handle_ ## name
#endif
@@ -43,15 +44,16 @@
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
-#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
+#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
#ifdef __XEN_TOOLS__
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
#endif
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(__i386__)
-#include <xen/interface/arch-x86/xen-x86_32.h>
+#include "xen-x86_32.h"
#elif defined(__x86_64__)
-#include <xen/interface/arch-x86/xen-x86_64.h>
+#include "xen-x86_64.h"
#endif
#ifndef __ASSEMBLY__
@@ -63,6 +65,11 @@ typedef unsigned long xen_pfn_t;
* SEGMENT DESCRIPTOR TABLES
*/
/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_gdt(const xen_pfn_t frames[], unsigned int entries);
+ * `
+ */
+/*
* A number of GDT entries are reserved by Xen. These are not situated at the
* start of the GDT because some stupid OSes export hard-coded selector values
* in their ABI. These hard-coded values are always near the start of the GDT,
@@ -72,15 +79,28 @@ typedef unsigned long xen_pfn_t;
#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 32
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+#define XEN_LEGACY_MAX_VCPUS 32
#ifndef __ASSEMBLY__
typedef unsigned long xen_ulong_t;
/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp);
+ * `
+ * Sets the stack segment and pointer for the current vcpu.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_trap_table(const struct trap_info traps[]);
+ * `
+ */
+/*
* Send an array of these to HYPERVISOR_set_trap_table().
+ * Terminate the array with a sentinel entry, with traps[].address==0.
* The privilege level specifies which modes may enter a trap via a software
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
* privilege levels as follows:
@@ -147,7 +167,7 @@ struct vcpu_guest_context {
unsigned int event_callback_cs; /* compat CS of event cb */
unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
};
- } u;
+ };
#else
unsigned long syscall_callback_eip;
#endif
@@ -175,6 +195,24 @@ typedef struct arch_shared_info arch_shared_info_t;
#endif /* !__ASSEMBLY__ */
/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_fpu_taskswitch(int set);
+ * `
+ * Sets (if set!=0) or clears (if set==0) CR0.TS.
+ */
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_debugreg(int regno, unsigned long value);
+ *
+ * ` unsigned long
+ * ` HYPERVISOR_get_debugreg(int regno);
+ * For 0<=reg<=7, returns the debug register value.
+ * For other values of reg, returns ((unsigned long)-EINVAL).
+ * (Unfortunately, this interface is defective.)
+ */
+
+/*
* Prefix forces emulation of some non-trapping instructions.
* Currently only CPUID.
*/
diff --git a/sys/xen/interface/arch-x86_32.h b/sys/xen/interface/arch-x86_32.h
index 1572fe3..45842b2 100644
--- a/sys/xen/interface/arch-x86_32.h
+++ b/sys/xen/interface/arch-x86_32.h
@@ -24,4 +24,4 @@
* Copyright (c) 2004-2006, K A Fraser
*/
-#include <xen/interface/arch-x86/xen.h>
+#include "arch-x86/xen.h"
diff --git a/sys/xen/interface/arch-x86_64.h b/sys/xen/interface/arch-x86_64.h
index fbb2639..409805f 100644
--- a/sys/xen/interface/arch-x86_64.h
+++ b/sys/xen/interface/arch-x86_64.h
@@ -25,3 +25,19 @@
*/
#include "arch-x86/xen.h"
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_set_callbacks(unsigned long event_selector,
+ * ` unsigned long event_address,
+ * ` unsigned long failsafe_selector,
+ * ` unsigned long failsafe_address);
+ * `
+ * Register for callbacks on events. When an event (from an event
+ * channel) occurs, event_address is used as the value of eip.
+ *
+ * A similar callback occurs if the segment selectors are invalid.
+ * failsafe_address is used as the value of eip.
+ *
+ * On x86_64, event_selector and failsafe_selector are ignored (???).
+ */
diff --git a/sys/xen/interface/domctl.h b/sys/xen/interface/domctl.h
index e7ddd5f..240ceb9 100644
--- a/sys/xen/interface/domctl.h
+++ b/sys/xen/interface/domctl.h
@@ -33,40 +33,37 @@
#endif
#include "xen.h"
+#include "grant_table.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
-
-struct xenctl_cpumap {
- XEN_GUEST_HANDLE_64(uint8_t) bitmap;
- uint32_t nr_cpus;
-};
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000008
/*
* NB. xen_domctl.domain is an IN/OUT parameter for this operation.
* If it is specified as zero, an id is auto-allocated and returned.
*/
-#define XEN_DOMCTL_createdomain 1
+/* XEN_DOMCTL_createdomain */
struct xen_domctl_createdomain {
/* IN parameters */
uint32_t ssidref;
xen_domain_handle_t handle;
/* Is this an HVM guest (as opposed to a PV guest)? */
-#define _XEN_DOMCTL_CDF_hvm_guest 0
-#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
+#define _XEN_DOMCTL_CDF_hvm_guest 0
+#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
/* Use hardware-assisted paging if available? */
-#define _XEN_DOMCTL_CDF_hap 1
-#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap)
+#define _XEN_DOMCTL_CDF_hap 1
+#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap)
+ /* Should domain memory integrity be verifed by tboot during Sx? */
+#define _XEN_DOMCTL_CDF_s3_integrity 2
+#define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity)
+ /* Disable out-of-sync shadow page tables? */
+#define _XEN_DOMCTL_CDF_oos_off 3
+#define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off)
uint32_t flags;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
-#define XEN_DOMCTL_destroydomain 2
-#define XEN_DOMCTL_pausedomain 3
-#define XEN_DOMCTL_unpausedomain 4
-#define XEN_DOMCTL_resumedomain 27
-
-#define XEN_DOMCTL_getdomaininfo 5
+/* XEN_DOMCTL_getdomaininfo */
struct xen_domctl_getdomaininfo {
/* OUT variables. */
domid_t domain; /* Also echoed in domctl.domain */
@@ -91,34 +88,34 @@ struct xen_domctl_getdomaininfo {
/* Being debugged. */
#define _XEN_DOMINF_debugged 6
#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
- /* CPU to which this domain is bound. */
-#define XEN_DOMINF_cpumask 255
-#define XEN_DOMINF_cpushift 8
/* XEN_DOMINF_shutdown guest-supplied code. */
#define XEN_DOMINF_shutdownmask 255
#define XEN_DOMINF_shutdownshift 16
uint32_t flags; /* XEN_DOMINF_* */
uint64_aligned_t tot_pages;
uint64_aligned_t max_pages;
+ uint64_aligned_t shr_pages;
+ uint64_aligned_t paged_pages;
uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
uint64_aligned_t cpu_time;
uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
uint32_t ssidref;
xen_domain_handle_t handle;
+ uint32_t cpupool;
};
typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
-#define XEN_DOMCTL_getmemlist 6
+/* XEN_DOMCTL_getmemlist */
struct xen_domctl_getmemlist {
/* IN variables. */
/* Max entries to write to output buffer. */
uint64_aligned_t max_pfns;
/* Start index in guest's page list. */
uint64_aligned_t start_pfn;
- XEN_GUEST_HANDLE_64(uint64_t) buffer;
+ XEN_GUEST_HANDLE_64(uint64) buffer;
/* OUT variables. */
uint64_aligned_t num_pfns;
};
@@ -126,7 +123,7 @@ typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
-#define XEN_DOMCTL_getpageframeinfo 7
+/* XEN_DOMCTL_getpageframeinfo */
#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28)
@@ -137,6 +134,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
+#define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */
+#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
struct xen_domctl_getpageframeinfo {
@@ -150,21 +149,29 @@ typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
-#define XEN_DOMCTL_getpageframeinfo2 8
+/* XEN_DOMCTL_getpageframeinfo2 */
struct xen_domctl_getpageframeinfo2 {
/* IN variables. */
uint64_aligned_t num;
/* IN/OUT variables. */
- XEN_GUEST_HANDLE_64(uint32_t) array;
+ XEN_GUEST_HANDLE_64(uint32) array;
};
typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
+/* XEN_DOMCTL_getpageframeinfo3 */
+struct xen_domctl_getpageframeinfo3 {
+ /* IN variables. */
+ uint64_aligned_t num;
+ /* IN/OUT variables. */
+ XEN_GUEST_HANDLE_64(xen_pfn_t) array;
+};
+
/*
* Control shadow pagetables operation
*/
-#define XEN_DOMCTL_shadow_op 10
+/* XEN_DOMCTL_shadow_op */
/* Disable shadow mode. */
#define XEN_DOMCTL_SHADOW_OP_OFF 0
@@ -229,7 +236,7 @@ struct xen_domctl_shadow_op {
uint32_t mb; /* Shadow memory allocation in MB */
/* OP_PEEK / OP_CLEAN */
- XEN_GUEST_HANDLE_64(uint8_t) dirty_bitmap;
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
struct xen_domctl_shadow_op_stats stats;
};
@@ -237,7 +244,7 @@ typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
-#define XEN_DOMCTL_max_mem 11
+/* XEN_DOMCTL_max_mem */
struct xen_domctl_max_mem {
/* IN variables. */
uint64_aligned_t max_memkb;
@@ -246,8 +253,8 @@ typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
-#define XEN_DOMCTL_setvcpucontext 12
-#define XEN_DOMCTL_getvcpucontext 13
+/* XEN_DOMCTL_setvcpucontext */
+/* XEN_DOMCTL_getvcpucontext */
struct xen_domctl_vcpucontext {
uint32_t vcpu; /* IN */
XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
@@ -256,7 +263,7 @@ typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
-#define XEN_DOMCTL_getvcpuinfo 14
+/* XEN_DOMCTL_getvcpuinfo */
struct xen_domctl_getvcpuinfo {
/* IN variables. */
uint32_t vcpu;
@@ -272,8 +279,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
/* Get/set which physical cpus a vcpu can execute on. */
-#define XEN_DOMCTL_setvcpuaffinity 9
-#define XEN_DOMCTL_getvcpuaffinity 25
+/* XEN_DOMCTL_setvcpuaffinity */
+/* XEN_DOMCTL_getvcpuaffinity */
struct xen_domctl_vcpuaffinity {
uint32_t vcpu; /* IN */
struct xenctl_cpumap cpumap; /* IN/OUT */
@@ -282,7 +289,7 @@ typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
-#define XEN_DOMCTL_max_vcpus 15
+/* XEN_DOMCTL_max_vcpus */
struct xen_domctl_max_vcpus {
uint32_t max; /* maximum number of vcpus */
};
@@ -290,10 +297,12 @@ typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
-#define XEN_DOMCTL_scheduler_op 16
+/* XEN_DOMCTL_scheduler_op */
/* Scheduler types. */
#define XEN_SCHEDULER_SEDF 4
#define XEN_SCHEDULER_CREDIT 5
+#define XEN_SCHEDULER_CREDIT2 6
+#define XEN_SCHEDULER_ARINC653 7
/* Set or get info? */
#define XEN_DOMCTL_SCHEDOP_putinfo 0
#define XEN_DOMCTL_SCHEDOP_getinfo 1
@@ -312,13 +321,16 @@ struct xen_domctl_scheduler_op {
uint16_t weight;
uint16_t cap;
} credit;
+ struct xen_domctl_sched_credit2 {
+ uint16_t weight;
+ } credit2;
} u;
};
typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
-#define XEN_DOMCTL_setdomainhandle 17
+/* XEN_DOMCTL_setdomainhandle */
struct xen_domctl_setdomainhandle {
xen_domain_handle_t handle;
};
@@ -326,7 +338,7 @@ typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
-#define XEN_DOMCTL_setdebugging 18
+/* XEN_DOMCTL_setdebugging */
struct xen_domctl_setdebugging {
uint8_t enable;
};
@@ -334,7 +346,7 @@ typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
-#define XEN_DOMCTL_irq_permission 19
+/* XEN_DOMCTL_irq_permission */
struct xen_domctl_irq_permission {
uint8_t pirq;
uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
@@ -343,7 +355,7 @@ typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
-#define XEN_DOMCTL_iomem_permission 20
+/* XEN_DOMCTL_iomem_permission */
struct xen_domctl_iomem_permission {
uint64_aligned_t first_mfn;/* first page (physical page number) in range */
uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
@@ -353,7 +365,7 @@ typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
-#define XEN_DOMCTL_ioport_permission 21
+/* XEN_DOMCTL_ioport_permission */
struct xen_domctl_ioport_permission {
uint32_t first_port; /* first port int range */
uint32_t nr_ports; /* size of port range */
@@ -363,7 +375,7 @@ typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
-#define XEN_DOMCTL_hypercall_init 22
+/* XEN_DOMCTL_hypercall_init */
struct xen_domctl_hypercall_init {
uint64_aligned_t gmfn; /* GMFN to be initialised */
};
@@ -371,7 +383,7 @@ typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
-#define XEN_DOMCTL_arch_setup 23
+/* XEN_DOMCTL_arch_setup */
#define _XEN_DOMAINSETUP_hvm_guest 0
#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
@@ -391,35 +403,33 @@ typedef struct xen_domctl_arch_setup {
DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
-#define XEN_DOMCTL_settimeoffset 24
+/* XEN_DOMCTL_settimeoffset */
struct xen_domctl_settimeoffset {
int32_t time_offset_seconds; /* applied to domain wallclock time */
};
typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
-
-#define XEN_DOMCTL_gethvmcontext 33
-#define XEN_DOMCTL_sethvmcontext 34
+/* XEN_DOMCTL_gethvmcontext */
+/* XEN_DOMCTL_sethvmcontext */
typedef struct xen_domctl_hvmcontext {
uint32_t size; /* IN/OUT: size of buffer / bytes filled */
- XEN_GUEST_HANDLE_64(uint8_t) buffer; /* IN/OUT: data, or call
- * gethvmcontext with NULL
- * buffer to get size
- * req'd */
+ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
+ * gethvmcontext with NULL
+ * buffer to get size req'd */
} xen_domctl_hvmcontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
-#define XEN_DOMCTL_set_address_size 35
-#define XEN_DOMCTL_get_address_size 36
+/* XEN_DOMCTL_set_address_size */
+/* XEN_DOMCTL_get_address_size */
typedef struct xen_domctl_address_size {
uint32_t size;
} xen_domctl_address_size_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
-#define XEN_DOMCTL_real_mode_area 26
+/* XEN_DOMCTL_real_mode_area */
struct xen_domctl_real_mode_area {
uint32_t log; /* log2 of Real Mode Area size */
};
@@ -427,10 +437,12 @@ typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
-#define XEN_DOMCTL_sendtrigger 28
+/* XEN_DOMCTL_sendtrigger */
#define XEN_DOMCTL_SENDTRIGGER_NMI 0
#define XEN_DOMCTL_SENDTRIGGER_RESET 1
#define XEN_DOMCTL_SENDTRIGGER_INIT 2
+#define XEN_DOMCTL_SENDTRIGGER_POWER 3
+#define XEN_DOMCTL_SENDTRIGGER_SLEEP 4
struct xen_domctl_sendtrigger {
uint32_t trigger; /* IN */
uint32_t vcpu; /* IN */
@@ -440,19 +452,19 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
-#define XEN_DOMCTL_assign_device 37
-#define XEN_DOMCTL_test_assign_device 45
-#define XEN_DOMCTL_deassign_device 47
+/* XEN_DOMCTL_assign_device */
+/* XEN_DOMCTL_test_assign_device */
+/* XEN_DOMCTL_deassign_device */
struct xen_domctl_assign_device {
- uint32_t machine_bdf; /* machine PCI ID of assigned device */
+ uint32_t machine_sbdf; /* machine PCI ID of assigned device */
};
typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
-/* Retrieve sibling devices infomation of machine_bdf */
-#define XEN_DOMCTL_get_device_group 50
+/* Retrieve sibling devices infomation of machine_sbdf */
+/* XEN_DOMCTL_get_device_group */
struct xen_domctl_get_device_group {
- uint32_t machine_bdf; /* IN */
+ uint32_t machine_sbdf; /* IN */
uint32_t max_sdevs; /* IN */
uint32_t num_sdevs; /* OUT */
XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
@@ -461,12 +473,13 @@ typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
/* Pass-through interrupts: bind real irq -> hvm devfn. */
-#define XEN_DOMCTL_bind_pt_irq 38
-#define XEN_DOMCTL_unbind_pt_irq 48
+/* XEN_DOMCTL_bind_pt_irq */
+/* XEN_DOMCTL_unbind_pt_irq */
typedef enum pt_irq_type_e {
PT_IRQ_TYPE_PCI,
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
+ PT_IRQ_TYPE_MSI_TRANSLATE,
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@@ -485,6 +498,7 @@ struct xen_domctl_bind_pt_irq {
struct {
uint8_t gvec;
uint32_t gflags;
+ uint64_aligned_t gtable;
} msi;
} u;
};
@@ -493,7 +507,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
/* Bind machine I/O address range -> HVM address range. */
-#define XEN_DOMCTL_memory_mapping 39
+/* XEN_DOMCTL_memory_mapping */
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
struct xen_domctl_memory_mapping {
@@ -508,7 +522,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
/* Bind machine I/O port range -> HVM I/O port range. */
-#define XEN_DOMCTL_ioport_mapping 40
+/* XEN_DOMCTL_ioport_mapping */
struct xen_domctl_ioport_mapping {
uint32_t first_gport; /* first guest IO port*/
uint32_t first_mport; /* first machine IO port */
@@ -522,7 +536,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
/*
* Pin caching type of RAM space for x86 HVM domU.
*/
-#define XEN_DOMCTL_pin_mem_cacheattr 41
+/* XEN_DOMCTL_pin_mem_cacheattr */
/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
#define XEN_DOMCTL_MEM_CACHEATTR_UC 0
#define XEN_DOMCTL_MEM_CACHEATTR_WC 1
@@ -532,20 +546,20 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
struct xen_domctl_pin_mem_cacheattr {
uint64_aligned_t start, end;
- unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
+ uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
};
typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
-#define XEN_DOMCTL_set_ext_vcpucontext 42
-#define XEN_DOMCTL_get_ext_vcpucontext 43
+/* XEN_DOMCTL_set_ext_vcpucontext */
+/* XEN_DOMCTL_get_ext_vcpucontext */
struct xen_domctl_ext_vcpucontext {
/* IN: VCPU that this call applies to. */
uint32_t vcpu;
/*
* SET: Size of struct (IN)
- * GET: Size of struct (OUT)
+ * GET: Size of struct (OUT, up to 128 bytes)
*/
uint32_t size;
#if defined(__i386__) || defined(__x86_64__)
@@ -557,6 +571,7 @@ struct xen_domctl_ext_vcpucontext {
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
+ uint64_aligned_t mcg_cap;
#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
@@ -565,7 +580,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
/*
* Set optimizaton features for a domain
*/
-#define XEN_DOMCTL_set_opt_feature 44
+/* XEN_DOMCTL_set_opt_feature */
struct xen_domctl_set_opt_feature {
#if defined(__ia64__)
struct xen_ia64_opt_feature optf;
@@ -580,7 +595,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
/*
* Set the target domain for a domain
*/
-#define XEN_DOMCTL_set_target 46
+/* XEN_DOMCTL_set_target */
struct xen_domctl_set_target {
domid_t target;
};
@@ -589,19 +604,19 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
#if defined(__i386__) || defined(__x86_64__)
# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
-# define XEN_DOMCTL_set_cpuid 49
+/* XEN_DOMCTL_set_cpuid */
struct xen_domctl_cpuid {
- unsigned int input[2];
- unsigned int eax;
- unsigned int ebx;
- unsigned int ecx;
- unsigned int edx;
+ uint32_t input[2];
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
};
typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
#endif
-#define XEN_DOMCTL_subscribe 29
+/* XEN_DOMCTL_subscribe */
struct xen_domctl_subscribe {
uint32_t port; /* IN */
};
@@ -612,12 +627,297 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
* Define the maximum machine address size which should be allocated
* to a guest.
*/
-#define XEN_DOMCTL_set_machine_address_size 51
-#define XEN_DOMCTL_get_machine_address_size 52
+/* XEN_DOMCTL_set_machine_address_size */
+/* XEN_DOMCTL_get_machine_address_size */
+
+/*
+ * Do not inject spurious page faults into this domain.
+ */
+/* XEN_DOMCTL_suppress_spurious_page_faults */
+
+/* XEN_DOMCTL_debug_op */
+#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0
+#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1
+struct xen_domctl_debug_op {
+ uint32_t op; /* IN */
+ uint32_t vcpu; /* IN */
+};
+typedef struct xen_domctl_debug_op xen_domctl_debug_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t);
+
+/*
+ * Request a particular record from the HVM context
+ */
+/* XEN_DOMCTL_gethvmcontext_partial */
+typedef struct xen_domctl_hvmcontext_partial {
+ uint32_t type; /* IN: Type of record required */
+ uint32_t instance; /* IN: Instance of that type */
+ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */
+} xen_domctl_hvmcontext_partial_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t);
+
+/* XEN_DOMCTL_disable_migrate */
+typedef struct xen_domctl_disable_migrate {
+ uint32_t disable; /* IN: 1: disable migration and restore */
+} xen_domctl_disable_migrate_t;
+
+
+/* XEN_DOMCTL_gettscinfo */
+/* XEN_DOMCTL_settscinfo */
+struct xen_guest_tsc_info {
+ uint32_t tsc_mode;
+ uint32_t gtsc_khz;
+ uint32_t incarnation;
+ uint32_t pad;
+ uint64_aligned_t elapsed_nsec;
+};
+typedef struct xen_guest_tsc_info xen_guest_tsc_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t);
+typedef struct xen_domctl_tsc_info {
+ XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */
+ xen_guest_tsc_info_t info; /* IN */
+} xen_domctl_tsc_info_t;
+
+/* XEN_DOMCTL_gdbsx_guestmemio guest mem io */
+struct xen_domctl_gdbsx_memio {
+ /* IN */
+ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */
+ uint64_aligned_t gva; /* guest virtual address */
+ uint64_aligned_t uva; /* user buffer virtual address */
+ uint32_t len; /* number of bytes to read/write */
+ uint8_t gwr; /* 0 = read from guest. 1 = write to guest */
+ /* OUT */
+ uint32_t remain; /* bytes remaining to be copied */
+};
+
+/* XEN_DOMCTL_gdbsx_pausevcpu */
+/* XEN_DOMCTL_gdbsx_unpausevcpu */
+struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */
+ uint32_t vcpu; /* which vcpu */
+};
+
+/* XEN_DOMCTL_gdbsx_domstatus */
+struct xen_domctl_gdbsx_domstatus {
+ /* OUT */
+ uint8_t paused; /* is the domain paused */
+ uint32_t vcpu_id; /* any vcpu in an event? */
+ uint32_t vcpu_ev; /* if yes, what event? */
+};
+
+/*
+ * Memory event operations
+ */
+
+/* XEN_DOMCTL_mem_event_op */
+
+/*
+ * Domain memory paging
+ * Page memory in and out.
+ * Domctl interface to set up and tear down the
+ * pager<->hypervisor interface. Use XENMEM_paging_op*
+ * to perform per-page operations.
+ *
+ * The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several
+ * non-standard error codes to indicate why paging could not be enabled:
+ * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
+ * EMLINK - guest has iommu passthrough enabled
+ * EXDEV - guest has PoD enabled
+ * EBUSY - guest has or had paging enabled, ring buffer still active
+ */
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
+
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
+
+/*
+ * Access permissions.
+ *
+ * As with paging, use the domctl for teardown/setup of the
+ * helper<->hypervisor interface.
+ *
+ * There are HVM hypercalls to set the per-page access permissions of every
+ * page in a domain. When one of these permissions--independent, read,
+ * write, and execute--is violated, the VCPU is paused and a memory event
+ * is sent with what happened. (See public/mem_event.h) .
+ *
+ * The memory event handler can then resume the VCPU and redo the access
+ * with a XENMEM_access_op_resume hypercall.
+ *
+ * The XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE domctl returns several
+ * non-standard error codes to indicate why access could not be enabled:
+ * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
+ * EBUSY - guest has or had access enabled, ring buffer still active
+ */
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
+
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
+/*
+ * Sharing ENOMEM helper.
+ *
+ * As with paging, use the domctl for teardown/setup of the
+ * helper<->hypervisor interface.
+ *
+ * If setup, this ring is used to communicate failed allocations
+ * in the unshare path. XENMEM_sharing_op_resume is used to wake up
+ * vcpus that could not unshare.
+ *
+ * Note that shring can be turned on (as per the domctl below)
+ * *without* this ring being setup.
+ */
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
+
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
+
+/* Use for teardown/setup of helper<->hypervisor interface for paging,
+ * access and sharing.*/
+struct xen_domctl_mem_event_op {
+ uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
+ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
+
+ uint32_t port; /* OUT: event channel for ring */
+};
+typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
+
+/*
+ * Memory sharing operations
+ */
+/* XEN_DOMCTL_mem_sharing_op.
+ * The CONTROL sub-domctl is used for bringup/teardown. */
+#define XEN_DOMCTL_MEM_SHARING_CONTROL 0
+
+struct xen_domctl_mem_sharing_op {
+ uint8_t op; /* XEN_DOMCTL_MEM_SHARING_* */
+
+ union {
+ uint8_t enable; /* CONTROL */
+ } u;
+};
+typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
+
+struct xen_domctl_audit_p2m {
+ /* OUT error counts */
+ uint64_t orphans;
+ uint64_t m2p_bad;
+ uint64_t p2m_bad;
+};
+typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t);
+
+struct xen_domctl_set_virq_handler {
+ uint32_t virq; /* IN */
+};
+typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t);
+
+#if defined(__i386__) || defined(__x86_64__)
+/* XEN_DOMCTL_setvcpuextstate */
+/* XEN_DOMCTL_getvcpuextstate */
+struct xen_domctl_vcpuextstate {
+ /* IN: VCPU that this call applies to. */
+ uint32_t vcpu;
+ /*
+ * SET: xfeature support mask of struct (IN)
+ * GET: xfeature support mask of struct (IN/OUT)
+ * xfeature mask is served as identifications of the saving format
+ * so that compatible CPUs can have a check on format to decide
+ * whether it can restore.
+ */
+ uint64_aligned_t xfeature_mask;
+ /*
+ * SET: Size of struct (IN)
+ * GET: Size of struct (IN/OUT)
+ */
+ uint64_aligned_t size;
+ XEN_GUEST_HANDLE_64(uint64) buffer;
+};
+typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
+#endif
+
+/* XEN_DOMCTL_set_access_required: sets whether a memory event listener
+ * must be present to handle page access events: if false, the page
+ * access will revert to full permissions if no one is listening;
+ * */
+struct xen_domctl_set_access_required {
+ uint8_t access_required;
+};
+typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
struct xen_domctl {
uint32_t cmd;
+#define XEN_DOMCTL_createdomain 1
+#define XEN_DOMCTL_destroydomain 2
+#define XEN_DOMCTL_pausedomain 3
+#define XEN_DOMCTL_unpausedomain 4
+#define XEN_DOMCTL_getdomaininfo 5
+#define XEN_DOMCTL_getmemlist 6
+#define XEN_DOMCTL_getpageframeinfo 7
+#define XEN_DOMCTL_getpageframeinfo2 8
+#define XEN_DOMCTL_setvcpuaffinity 9
+#define XEN_DOMCTL_shadow_op 10
+#define XEN_DOMCTL_max_mem 11
+#define XEN_DOMCTL_setvcpucontext 12
+#define XEN_DOMCTL_getvcpucontext 13
+#define XEN_DOMCTL_getvcpuinfo 14
+#define XEN_DOMCTL_max_vcpus 15
+#define XEN_DOMCTL_scheduler_op 16
+#define XEN_DOMCTL_setdomainhandle 17
+#define XEN_DOMCTL_setdebugging 18
+#define XEN_DOMCTL_irq_permission 19
+#define XEN_DOMCTL_iomem_permission 20
+#define XEN_DOMCTL_ioport_permission 21
+#define XEN_DOMCTL_hypercall_init 22
+#define XEN_DOMCTL_arch_setup 23
+#define XEN_DOMCTL_settimeoffset 24
+#define XEN_DOMCTL_getvcpuaffinity 25
+#define XEN_DOMCTL_real_mode_area 26
+#define XEN_DOMCTL_resumedomain 27
+#define XEN_DOMCTL_sendtrigger 28
+#define XEN_DOMCTL_subscribe 29
+#define XEN_DOMCTL_gethvmcontext 33
+#define XEN_DOMCTL_sethvmcontext 34
+#define XEN_DOMCTL_set_address_size 35
+#define XEN_DOMCTL_get_address_size 36
+#define XEN_DOMCTL_assign_device 37
+#define XEN_DOMCTL_bind_pt_irq 38
+#define XEN_DOMCTL_memory_mapping 39
+#define XEN_DOMCTL_ioport_mapping 40
+#define XEN_DOMCTL_pin_mem_cacheattr 41
+#define XEN_DOMCTL_set_ext_vcpucontext 42
+#define XEN_DOMCTL_get_ext_vcpucontext 43
+#define XEN_DOMCTL_set_opt_feature 44
+#define XEN_DOMCTL_test_assign_device 45
+#define XEN_DOMCTL_set_target 46
+#define XEN_DOMCTL_deassign_device 47
+#define XEN_DOMCTL_unbind_pt_irq 48
+#define XEN_DOMCTL_set_cpuid 49
+#define XEN_DOMCTL_get_device_group 50
+#define XEN_DOMCTL_set_machine_address_size 51
+#define XEN_DOMCTL_get_machine_address_size 52
+#define XEN_DOMCTL_suppress_spurious_page_faults 53
+#define XEN_DOMCTL_debug_op 54
+#define XEN_DOMCTL_gethvmcontext_partial 55
+#define XEN_DOMCTL_mem_event_op 56
+#define XEN_DOMCTL_mem_sharing_op 57
+#define XEN_DOMCTL_disable_migrate 58
+#define XEN_DOMCTL_gettscinfo 59
+#define XEN_DOMCTL_settscinfo 60
+#define XEN_DOMCTL_getpageframeinfo3 61
+#define XEN_DOMCTL_setvcpuextstate 62
+#define XEN_DOMCTL_getvcpuextstate 63
+#define XEN_DOMCTL_set_access_required 64
+#define XEN_DOMCTL_audit_p2m 65
+#define XEN_DOMCTL_set_virq_handler 66
+#define XEN_DOMCTL_gdbsx_guestmemio 1000
+#define XEN_DOMCTL_gdbsx_pausevcpu 1001
+#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
+#define XEN_DOMCTL_gdbsx_domstatus 1003
uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
domid_t domain;
union {
@@ -626,6 +926,7 @@ struct xen_domctl {
struct xen_domctl_getmemlist getmemlist;
struct xen_domctl_getpageframeinfo getpageframeinfo;
struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
+ struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
struct xen_domctl_vcpuaffinity vcpuaffinity;
struct xen_domctl_shadow_op shadow_op;
struct xen_domctl_max_mem max_mem;
@@ -641,8 +942,11 @@ struct xen_domctl {
struct xen_domctl_hypercall_init hypercall_init;
struct xen_domctl_arch_setup arch_setup;
struct xen_domctl_settimeoffset settimeoffset;
+ struct xen_domctl_disable_migrate disable_migrate;
+ struct xen_domctl_tsc_info tsc_info;
struct xen_domctl_real_mode_area real_mode_area;
struct xen_domctl_hvmcontext hvmcontext;
+ struct xen_domctl_hvmcontext_partial hvmcontext_partial;
struct xen_domctl_address_size address_size;
struct xen_domctl_sendtrigger sendtrigger;
struct xen_domctl_get_device_group get_device_group;
@@ -655,9 +959,19 @@ struct xen_domctl {
struct xen_domctl_set_opt_feature set_opt_feature;
struct xen_domctl_set_target set_target;
struct xen_domctl_subscribe subscribe;
+ struct xen_domctl_debug_op debug_op;
+ struct xen_domctl_mem_event_op mem_event_op;
+ struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
+ struct xen_domctl_vcpuextstate vcpuextstate;
#endif
+ struct xen_domctl_set_access_required access_required;
+ struct xen_domctl_audit_p2m audit_p2m;
+ struct xen_domctl_set_virq_handler set_virq_handler;
+ struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
+ struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
uint8_t pad[128];
} u;
};
diff --git a/sys/xen/interface/elfnote.h b/sys/xen/interface/elfnote.h
index 77be41b..42d76da 100644
--- a/sys/xen/interface/elfnote.h
+++ b/sys/xen/interface/elfnote.h
@@ -162,9 +162,39 @@
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
/*
+ * The (non-default) location the initial phys-to-machine map should be
+ * placed at by the hypervisor (Dom0) or the tools (DomU).
+ * The kernel must be prepared for this mapping to be established using
+ * large pages, despite such otherwise not being available to guests.
+ * The kernel must also be able to handle the page table pages used for
+ * this mapping not being accessible through the initial mapping.
+ * (Only x86-64 supports this at present.)
+ */
+#define XEN_ELFNOTE_INIT_P2M 15
+
+/*
+ * Whether or not the guest can deal with being passed an initrd not
+ * mapped through its initial page tables.
+ */
+#define XEN_ELFNOTE_MOD_START_PFN 16
+
+/*
+ * The features supported by this kernel (numeric).
+ *
+ * Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a
+ * kernel to specify support for features that older hypervisors don't
+ * know about. The set of features 4.2 and newer hypervisors will
+ * consider supported by the kernel is the combination of the sets
+ * specified through this and the string note.
+ *
+ * LEGACY: FEATURES
+ */
+#define XEN_ELFNOTE_SUPPORTED_FEATURES 17
+
+/*
* The number of the highest elfnote defined.
*/
-#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES
/*
* System information exported through crash notes.
diff --git a/sys/xen/interface/event_channel.h b/sys/xen/interface/event_channel.h
index d35cce5..07ff321 100644
--- a/sys/xen/interface/event_channel.h
+++ b/sys/xen/interface/event_channel.h
@@ -1,8 +1,8 @@
/******************************************************************************
* event_channel.h
- *
+ *
* Event channels between domains.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@@ -27,13 +27,52 @@
#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+#include "xen.h"
+
/*
- * Prototype for this hypercall is:
- * int event_channel_op(int cmd, void *args)
- * @cmd == EVTCHNOP_??? (event-channel operation).
- * @args == Operation-specific extra arguments (NULL if none).
+ * `incontents 150 evtchn Event Channels
+ *
+ * Event channels are the basic primitive provided by Xen for event
+ * notifications. An event is the Xen equivalent of a hardware
+ * interrupt. They essentially store one bit of information, the event
+ * of interest is signalled by transitioning this bit from 0 to 1.
+ *
+ * Notifications are received by a guest via an upcall from Xen,
+ * indicating when an event arrives (setting the bit). Further
+ * notifications are masked until the bit is cleared again (therefore,
+ * guests must check the value of the bit after re-enabling event
+ * delivery to ensure no missed notifications).
+ *
+ * Event notifications can be masked by setting a flag; this is
+ * equivalent to disabling interrupts and can be used to ensure
+ * atomicity of certain operations in the guest kernel.
+ *
+ * Event channels are represented by the evtchn_* fields in
+ * struct shared_info and struct vcpu_info.
*/
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args)
+ * `
+ * @cmd == EVTCHNOP_* (event-channel operation).
+ * @args == struct evtchn_* Operation-specific extra arguments (NULL if none).
+ */
+
+/* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */
+#define EVTCHNOP_bind_interdomain 0
+#define EVTCHNOP_bind_virq 1
+#define EVTCHNOP_bind_pirq 2
+#define EVTCHNOP_close 3
+#define EVTCHNOP_send 4
+#define EVTCHNOP_status 5
+#define EVTCHNOP_alloc_unbound 6
+#define EVTCHNOP_bind_ipi 7
+#define EVTCHNOP_bind_vcpu 8
+#define EVTCHNOP_unmask 9
+#define EVTCHNOP_reset 10
+/* ` } */
+
typedef uint32_t evtchn_port_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
@@ -45,7 +84,6 @@ DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
* 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
* 2. <rdom> may be DOMID_SELF, allowing loopback connections.
*/
-#define EVTCHNOP_alloc_unbound 6
struct evtchn_alloc_unbound {
/* IN parameters */
domid_t dom, remote_dom;
@@ -61,9 +99,8 @@ typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
* domain. A fresh port is allocated in the calling domain and returned as
* <local_port>.
* NOTES:
- * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ * 1. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
-#define EVTCHNOP_bind_interdomain 0
struct evtchn_bind_interdomain {
/* IN parameters. */
domid_t remote_dom;
@@ -85,10 +122,9 @@ typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
* The allocated event channel is bound to the specified vcpu and the
* binding cannot be changed.
*/
-#define EVTCHNOP_bind_virq 1
struct evtchn_bind_virq {
/* IN parameters. */
- uint32_t virq;
+ uint32_t virq; /* enum virq */
uint32_t vcpu;
/* OUT parameters. */
evtchn_port_t port;
@@ -96,12 +132,11 @@ struct evtchn_bind_virq {
typedef struct evtchn_bind_virq evtchn_bind_virq_t;
/*
- * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
+ * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ <irq>).
* NOTES:
* 1. A physical IRQ may be bound to at most one event channel per domain.
* 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
*/
-#define EVTCHNOP_bind_pirq 2
struct evtchn_bind_pirq {
/* IN parameters. */
uint32_t pirq;
@@ -118,7 +153,6 @@ typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
* 1. The allocated event channel is bound to the specified vcpu. The binding
* may not be changed.
*/
-#define EVTCHNOP_bind_ipi 7
struct evtchn_bind_ipi {
uint32_t vcpu;
/* OUT parameters. */
@@ -131,7 +165,6 @@ typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
* interdomain then the remote end is placed in the unbound state
* (EVTCHNSTAT_unbound), awaiting a new connection.
*/
-#define EVTCHNOP_close 3
struct evtchn_close {
/* IN parameters. */
evtchn_port_t port;
@@ -142,7 +175,6 @@ typedef struct evtchn_close evtchn_close_t;
* EVTCHNOP_send: Send an event to the remote end of the channel whose local
* endpoint is <port>.
*/
-#define EVTCHNOP_send 4
struct evtchn_send {
/* IN parameters. */
evtchn_port_t port;
@@ -157,7 +189,6 @@ typedef struct evtchn_send evtchn_send_t;
* 2. Only a sufficiently-privileged domain may obtain the status of an event
* channel for which <dom> is not DOMID_SELF.
*/
-#define EVTCHNOP_status 5
struct evtchn_status {
/* IN parameters */
domid_t dom;
@@ -174,13 +205,13 @@ struct evtchn_status {
union {
struct {
domid_t dom;
- } unbound; /* EVTCHNSTAT_unbound */
+ } unbound; /* EVTCHNSTAT_unbound */
struct {
domid_t dom;
evtchn_port_t port;
- } interdomain; /* EVTCHNSTAT_interdomain */
- uint32_t pirq; /* EVTCHNSTAT_pirq */
- uint32_t virq; /* EVTCHNSTAT_virq */
+ } interdomain; /* EVTCHNSTAT_interdomain */
+ uint32_t pirq; /* EVTCHNSTAT_pirq */
+ uint32_t virq; /* EVTCHNSTAT_virq */
} u;
};
typedef struct evtchn_status evtchn_status_t;
@@ -197,7 +228,6 @@ typedef struct evtchn_status evtchn_status_t;
* the channel is allocated (a port that is freed and subsequently reused
* has its binding reset to vcpu0).
*/
-#define EVTCHNOP_bind_vcpu 8
struct evtchn_bind_vcpu {
/* IN parameters. */
evtchn_port_t port;
@@ -209,7 +239,6 @@ typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
* EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
* a notification to the appropriate VCPU if an event is pending.
*/
-#define EVTCHNOP_unmask 9
struct evtchn_unmask {
/* IN parameters. */
evtchn_port_t port;
@@ -222,7 +251,6 @@ typedef struct evtchn_unmask evtchn_unmask_t;
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
*/
-#define EVTCHNOP_reset 10
struct evtchn_reset {
/* IN parameters. */
domid_t dom;
@@ -230,11 +258,13 @@ struct evtchn_reset {
typedef struct evtchn_reset evtchn_reset_t;
/*
- * Argument to event_channel_op_compat() hypercall. Superceded by new
- * event_channel_op() hypercall since 0x00030202.
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op)
+ * `
+ * Superceded by new event_channel_op() hypercall since 0x00030202.
*/
struct evtchn_op {
- uint32_t cmd; /* EVTCHNOP_* */
+ uint32_t cmd; /* enum event_channel_op */
union {
struct evtchn_alloc_unbound alloc_unbound;
struct evtchn_bind_interdomain bind_interdomain;
diff --git a/sys/xen/interface/features.h b/sys/xen/interface/features.h
index 05fc5dc..b4533cc 100644
--- a/sys/xen/interface/features.h
+++ b/sys/xen/interface/features.h
@@ -59,6 +59,27 @@
/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
#define XENFEAT_mmu_pt_update_preserve_ad 5
+/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
+#define XENFEAT_highmem_assist 6
+
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits 7
+
+/* x86: Does this Xen host support the HVM callback vector type? */
+#define XENFEAT_hvm_callback_vector 8
+
+/* x86: pvclock algorithm is safe to use on HVM */
+#define XENFEAT_hvm_safe_pvclock 9
+
+/* x86: pirq can be used by HVM guests */
+#define XENFEAT_hvm_pirqs 10
+
+/* operation as Dom0 is supported */
+#define XENFEAT_dom0 11
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/sys/xen/interface/grant_table.h b/sys/xen/interface/grant_table.h
index e76ca67..28d9476 100644
--- a/sys/xen/interface/grant_table.h
+++ b/sys/xen/interface/grant_table.h
@@ -1,9 +1,9 @@
/******************************************************************************
* grant_table.h
- *
+ *
* Interface for granting foreign access to page frames, and receiving
* page-ownership transfers.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
@@ -28,6 +28,30 @@
#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
#define __XEN_PUBLIC_GRANT_TABLE_H__
+#include "xen.h"
+
+/*
+ * `incontents 150 gnttab Grant Tables
+ *
+ * Xen's grant tables provide a generic mechanism to memory sharing
+ * between domains. This shared memory interface underpins the split
+ * device drivers for block and network IO.
+ *
+ * Each domain has its own grant table. This is a data structure that
+ * is shared with Xen; it allows the domain to tell Xen what kind of
+ * permissions other domains have on its pages. Entries in the grant
+ * table are identified by grant references. A grant reference is an
+ * integer, which indexes into the grant table. It acts as a
+ * capability which the grantee can use to perform operations on the
+ * granter’s memory.
+ *
+ * This capability-based system allows shared-memory communications
+ * between unprivileged domains. A grant reference also encapsulates
+ * the details of a shared page, removing the need for a domain to
+ * know the real machine address of a page it is sharing. This makes
+ * it possible to share memory correctly with domains running in
+ * fully virtualised memory.
+ */
/***********************************
* GRANT TABLE REPRESENTATION
@@ -35,11 +59,12 @@
/* Some rough guidelines on accessing and updating grant-table entries
* in a concurrency-safe manner. For more information, Linux contains a
- * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
- *
+ * reference implementation for guest OSes (drivers/xen/grant_table.c, see
+ * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD
+ *
* NB. WMB is a no-op on current-generation x86 processors. However, a
* compiler barrier will still be required.
- *
+ *
* Introducing a valid entry into the grant table:
* 1. Write ent->domid.
* 2. Write ent->frame:
@@ -48,7 +73,7 @@
* frame, or zero if none.
* 3. Write memory barrier (WMB).
* 4. Write ent->flags, inc. valid type.
- *
+ *
* Invalidating an unused GTF_permit_access entry:
* 1. flags = ent->flags.
* 2. Observe that !(flags & (GTF_reading|GTF_writing)).
@@ -60,7 +85,7 @@
* This cannot be done directly. Request assistance from the domain controller
* which can set a timeout on the use of a grant entry and take necessary
* action. (NB. This is not yet implemented!).
- *
+ *
* Invalidating an unused GTF_accept_transfer entry:
* 1. flags = ent->flags.
* 2. Observe that !(flags & GTF_transfer_committed). [*]
@@ -78,18 +103,32 @@
*
* Changing a GTF_permit_access from writable to read-only:
* Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
- *
+ *
* Changing a GTF_permit_access from read-only to writable:
* Use SMP-safe bit-setting instruction.
*/
/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
+/*
* A grant table comprises a packed array of grant entries in one or more
* page frames shared between Xen and a guest.
* [XEN]: This field is written by Xen and read by the sharing guest.
* [GST]: This field is written by the guest and read by Xen.
*/
-struct grant_entry {
+
+/*
+ * Version 1 of the grant table entry structure is maintained purely
+ * for backwards compatibility. New guests should use version 2.
+ */
+#if __XEN_INTERFACE_VERSION__ < 0x0003020a
+#define grant_entry_v1 grant_entry
+#define grant_entry_v1_t grant_entry_t
+#endif
+struct grant_entry_v1 {
/* GTF_xxx: various type and flag information. [XEN,GST] */
uint16_t flags;
/* The domain being granted foreign privileges. [GST] */
@@ -100,7 +139,14 @@ struct grant_entry {
*/
uint32_t frame;
};
-typedef struct grant_entry grant_entry_t;
+typedef struct grant_entry_v1 grant_entry_v1_t;
+
+/* The first few grant table entries will be preserved across grant table
+ * version changes and may be pre-populated at domain creation by tools.
+ */
+#define GNTTAB_NR_RESERVED_ENTRIES 8
+#define GNTTAB_RESERVED_CONSOLE 0
+#define GNTTAB_RESERVED_XENSTORE 1
/*
* Type of grant entry.
@@ -108,10 +154,13 @@ typedef struct grant_entry grant_entry_t;
* GTF_permit_access: Allow @domid to map/access @frame.
* GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
* to this guest. Xen writes the page number to @frame.
+ * GTF_transitive: Allow @domid to transitively access a subrange of
+ * @trans_grant in @trans_domid. No mappings are allowed.
*/
#define GTF_invalid (0U<<0)
#define GTF_permit_access (1U<<0)
#define GTF_accept_transfer (2U<<0)
+#define GTF_transitive (3U<<0)
#define GTF_type_mask (3U<<0)
/*
@@ -120,6 +169,9 @@ typedef struct grant_entry grant_entry_t;
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
* GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
+ * GTF_sub_page: Grant access to only a subrange of the page. @domid
+ * will only be allowed to copy from the grant, and not
+ * map it. [GST]
*/
#define _GTF_readonly (2)
#define GTF_readonly (1U<<_GTF_readonly)
@@ -133,6 +185,8 @@ typedef struct grant_entry grant_entry_t;
#define GTF_PCD (1U<<_GTF_PCD)
#define _GTF_PAT (7)
#define GTF_PAT (1U<<_GTF_PAT)
+#define _GTF_sub_page (8)
+#define GTF_sub_page (1U<<_GTF_sub_page)
/*
* Subflags for GTF_accept_transfer:
@@ -149,17 +203,114 @@ typedef struct grant_entry grant_entry_t;
#define _GTF_transfer_completed (3)
#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
+/*
+ * Version 2 grant table entries. These fulfil the same role as
+ * version 1 entries, but can represent more complicated operations.
+ * Any given domain will have either a version 1 or a version 2 table,
+ * and every entry in the table will be the same version.
+ *
+ * The interface by which domains use grant references does not depend
+ * on the grant table version in use by the other domain.
+ */
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+/*
+ * Version 1 and version 2 grant entries share a common prefix. The
+ * fields of the prefix are documented as part of struct
+ * grant_entry_v1.
+ */
+struct grant_entry_header {
+ uint16_t flags;
+ domid_t domid;
+};
+typedef struct grant_entry_header grant_entry_header_t;
+
+/*
+ * Version 2 of the grant entry structure.
+ */
+union grant_entry_v2 {
+ grant_entry_header_t hdr;
+
+ /*
+ * This member is used for V1-style full page grants, where either:
+ *
+ * -- hdr.type is GTF_accept_transfer, or
+ * -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
+ *
+ * In that case, the frame field has the same semantics as the
+ * field of the same name in the V1 entry structure.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ uint32_t pad0;
+ uint64_t frame;
+ } full_page;
+
+ /*
+ * If the grant type is GTF_grant_access and GTF_sub_page is set,
+ * @domid is allowed to access bytes [@page_off,@page_off+@length)
+ * in frame @frame.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ uint16_t page_off;
+ uint16_t length;
+ uint64_t frame;
+ } sub_page;
+
+ /*
+ * If the grant is GTF_transitive, @domid is allowed to use the
+ * grant @gref in domain @trans_domid, as if it was the local
+ * domain. Obviously, the transitive access must be compatible
+ * with the original grant.
+ *
+ * The current version of Xen does not allow transitive grants
+ * to be mapped.
+ */
+ struct {
+ grant_entry_header_t hdr;
+ domid_t trans_domid;
+ uint16_t pad0;
+ grant_ref_t gref;
+ } transitive;
+
+ uint32_t __spacer[4]; /* Pad to a power of two */
+};
+typedef union grant_entry_v2 grant_entry_v2_t;
+
+typedef uint16_t grant_status_t;
+
+#endif /* __XEN_INTERFACE_VERSION__ */
/***********************************
* GRANT TABLE QUERIES AND USES
*/
-/*
- * Reference to a grant entry in a specified domain's grant table.
+/* ` enum neg_errnoval
+ * ` HYPERVISOR_grant_table_op(enum grant_table_op cmd,
+ * ` void *args,
+ * ` unsigned int count)
+ * `
+ *
+ * @args points to an array of a per-command data structure. The array
+ * has @count members
*/
-typedef uint32_t grant_ref_t;
-#define GRANT_REF_INVALID 0xffffffff
+/* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */
+#define GNTTABOP_map_grant_ref 0
+#define GNTTABOP_unmap_grant_ref 1
+#define GNTTABOP_setup_table 2
+#define GNTTABOP_dump_table 3
+#define GNTTABOP_transfer 4
+#define GNTTABOP_copy 5
+#define GNTTABOP_query_size 6
+#define GNTTABOP_unmap_and_replace 7
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+#define GNTTABOP_set_version 8
+#define GNTTABOP_get_status_frames 9
+#define GNTTABOP_get_version 10
+#define GNTTABOP_swap_grant_ref 11
+#endif /* __XEN_INTERFACE_VERSION__ */
+/* ` } */
/*
* Handle to track a mapping created via a grant reference.
@@ -177,13 +328,12 @@ typedef uint32_t grant_handle_t;
* 2. If GNTMAP_host_map is specified then a mapping will be added at
* either a host virtual address in the current address space, or at
* a PTE at the specified machine address. The type of mapping to
- * perform is selected through the GNTMAP_contains_pte flag, and the
+ * perform is selected through the GNTMAP_contains_pte flag, and the
* address is specified in <host_addr>.
* 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
* host mapping is destroyed by other means then it is *NOT* guaranteed
* to be accounted to the correct grant reference!
*/
-#define GNTTABOP_map_grant_ref 0
struct gnttab_map_grant_ref {
/* IN parameters. */
uint64_t host_addr;
@@ -191,7 +341,7 @@ struct gnttab_map_grant_ref {
grant_ref_t ref;
domid_t dom;
/* OUT parameters. */
- int16_t status; /* GNTST_* */
+ int16_t status; /* => enum grant_status */
grant_handle_t handle;
uint64_t dev_bus_addr;
};
@@ -209,14 +359,13 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
* 3. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
-#define GNTTABOP_unmap_grant_ref 1
struct gnttab_unmap_grant_ref {
/* IN parameters. */
uint64_t host_addr;
uint64_t dev_bus_addr;
grant_handle_t handle;
/* OUT parameters. */
- int16_t status; /* GNTST_* */
+ int16_t status; /* => enum grant_status */
};
typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
@@ -230,13 +379,12 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
* 3. Xen may not support more than a single grant-table page per domain.
*/
-#define GNTTABOP_setup_table 2
struct gnttab_setup_table {
/* IN parameters. */
domid_t dom;
uint32_t nr_frames;
/* OUT parameters. */
- int16_t status; /* GNTST_* */
+ int16_t status; /* => enum grant_status */
XEN_GUEST_HANDLE(ulong) frame_list;
};
typedef struct gnttab_setup_table gnttab_setup_table_t;
@@ -246,12 +394,11 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
* GNTTABOP_dump_table: Dump the contents of the grant table to the
* xen console. Debugging use only.
*/
-#define GNTTABOP_dump_table 3
struct gnttab_dump_table {
/* IN parameters. */
domid_t dom;
/* OUT parameters. */
- int16_t status; /* GNTST_* */
+ int16_t status; /* => enum grant_status */
};
typedef struct gnttab_dump_table gnttab_dump_table_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
@@ -260,11 +407,10 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
* GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
* foreign domain has previously registered its interest in the transfer via
* <domid, ref>.
- *
+ *
* Note that, even if the transfer fails, the specified page no longer belongs
* to the calling domain *unless* the error is GNTST_bad_page.
*/
-#define GNTTABOP_transfer 4
struct gnttab_transfer {
/* IN parameters. */
xen_pfn_t mfn;
@@ -299,9 +445,10 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
#define _GNTCOPY_dest_gref (1)
#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
+#define _GNTCOPY_can_fail (2)
+#define GNTCOPY_can_fail (1<<_GNTCOPY_can_fail)
-#define GNTTABOP_copy 5
-typedef struct gnttab_copy {
+struct gnttab_copy {
/* IN parameters. */
struct {
union {
@@ -315,7 +462,8 @@ typedef struct gnttab_copy {
uint16_t flags; /* GNTCOPY_* */
/* OUT parameters. */
int16_t status;
-} gnttab_copy_t;
+};
+typedef struct gnttab_copy gnttab_copy_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
/*
@@ -325,14 +473,13 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
*/
-#define GNTTABOP_query_size 6
struct gnttab_query_size {
/* IN parameters. */
domid_t dom;
/* OUT parameters. */
uint32_t nr_frames;
uint32_t max_nr_frames;
- int16_t status; /* GNTST_* */
+ int16_t status; /* => enum grant_status */
};
typedef struct gnttab_query_size gnttab_query_size_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
@@ -348,21 +495,87 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
* 2. After executing a batch of unmaps, it is guaranteed that no stale
* mappings will remain in the device or host TLBs.
*/
-#define GNTTABOP_unmap_and_replace 7
struct gnttab_unmap_and_replace {
/* IN parameters. */
uint64_t host_addr;
uint64_t new_addr;
grant_handle_t handle;
/* OUT parameters. */
- int16_t status; /* GNTST_* */
+ int16_t status; /* => enum grant_status */
};
typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
+#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
+/*
+ * GNTTABOP_set_version: Request a particular version of the grant
+ * table shared table structure. This operation can only be performed
+ * once in any given domain. It must be performed before any grants
+ * are activated; otherwise, the domain will be stuck with version 1.
+ * The only defined versions are 1 and 2.
+ */
+struct gnttab_set_version {
+ /* IN/OUT parameters */
+ uint32_t version;
+};
+typedef struct gnttab_set_version gnttab_set_version_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t);
+
/*
- * Bitfield values for update_pin_status.flags.
+ * GNTTABOP_get_status_frames: Get the list of frames used to store grant
+ * status for <dom>. In grant format version 2, the status is separated
+ * from the other shared grant fields to allow more efficient synchronization
+ * using barriers instead of atomic cmpexch operations.
+ * <nr_frames> specify the size of vector <frame_list>.
+ * The frame addresses are returned in the <frame_list>.
+ * Only <nr_frames> addresses are returned, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+struct gnttab_get_status_frames {
+ /* IN parameters. */
+ uint32_t nr_frames;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* => enum grant_status */
+ XEN_GUEST_HANDLE(uint64_t) frame_list;
+};
+typedef struct gnttab_get_status_frames gnttab_get_status_frames_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t);
+
+/*
+ * GNTTABOP_get_version: Get the grant table version which is in
+ * effect for domain <dom>.
+ */
+struct gnttab_get_version {
+ /* IN parameters */
+ domid_t dom;
+ uint16_t pad;
+ /* OUT parameters */
+ uint32_t version;
+};
+typedef struct gnttab_get_version gnttab_get_version_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t);
+
+/*
+ * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries.
+ */
+struct gnttab_swap_grant_ref {
+ /* IN parameters */
+ grant_ref_t ref_a;
+ grant_ref_t ref_b;
+ /* OUT parameters */
+ int16_t status; /* => enum grant_status */
+};
+typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
+
+#endif /* __XEN_INTERFACE_VERSION__ */
+
+/*
+ * Bitfield values for gnttab_map_grant_ref.flags.
*/
/* Map the grant entry for access by I/O devices. */
#define _GNTMAP_device_map (0)
@@ -389,9 +602,20 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
#define _GNTMAP_contains_pte (4)
#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
+#define _GNTMAP_can_fail (5)
+#define GNTMAP_can_fail (1<<_GNTMAP_can_fail)
+
+/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0 (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
/*
* Values for error status returns. All errors are -ve.
*/
+/* ` enum grant_status { */
#define GNTST_okay (0) /* Normal return. */
#define GNTST_general_error (-1) /* General undefined error. */
#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
@@ -404,6 +628,8 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
#define GNTST_address_too_big (-11) /* transfer page address too large. */
+#define GNTST_eagain (-12) /* Operation not done; try again. */
+/* ` } */
#define GNTTABOP_error_msgs { \
"okay", \
@@ -417,7 +643,8 @@ DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
"permission denied", \
"bad page", \
"copy arguments cross page boundary", \
- "page address size too large" \
+ "page address size too large", \
+ "operation not done; try again" \
}
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
diff --git a/sys/xen/interface/hvm/hvm_info_table.h b/sys/xen/interface/hvm/hvm_info_table.h
index dfe34db..36085fa 100644
--- a/sys/xen/interface/hvm/hvm_info_table.h
+++ b/sys/xen/interface/hvm/hvm_info_table.h
@@ -29,13 +29,44 @@
#define HVM_INFO_OFFSET 0x800
#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
+/* Maximum we can support with current vLAPIC ID mapping. */
+#define HVM_MAX_VCPUS 128
+
struct hvm_info_table {
char signature[8]; /* "HVM INFO" */
uint32_t length;
uint8_t checksum;
- uint8_t acpi_enabled;
+
+ /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */
uint8_t apic_mode;
+
+ /* How many CPUs does this domain have? */
uint32_t nr_vcpus;
+
+ /*
+ * MEMORY MAP provided by HVM domain builder.
+ * Notes:
+ * 1. page_to_phys(x) = x << 12
+ * 2. If a field is zero, the corresponding range does not exist.
+ */
+ /*
+ * 0x0 to page_to_phys(low_mem_pgend)-1:
+ * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF)
+ */
+ uint32_t low_mem_pgend;
+ /*
+ * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF:
+ * Reserved for special memory mappings
+ */
+ uint32_t reserved_mem_pgstart;
+ /*
+ * 0x100000000 to page_to_phys(high_mem_pgend)-1:
+ * RAM above 4GB
+ */
+ uint32_t high_mem_pgend;
+
+ /* Bitmap of which CPUs are online at boot time. */
+ uint8_t vcpu_online[(HVM_MAX_VCPUS + 7)/8];
};
#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
diff --git a/sys/xen/interface/hvm/hvm_op.h b/sys/xen/interface/hvm/hvm_op.h
index f0ada2d..a9aab4b 100644
--- a/sys/xen/interface/hvm/hvm_op.h
+++ b/sys/xen/interface/hvm/hvm_op.h
@@ -21,6 +21,9 @@
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__
+#include "../xen.h"
+#include "../trace.h"
+
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
#define HVMOP_get_param 1
@@ -73,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5
+typedef enum {
+ HVMMEM_ram_rw, /* Normal read/write guest RAM */
+ HVMMEM_ram_ro, /* Read-only; writes are discarded */
+ HVMMEM_mmio_dm, /* Reads and write go to the device model */
+} hvmmem_type_t;
+
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
@@ -106,25 +115,160 @@ typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
#define HVMOP_set_mem_type 8
-typedef enum {
- HVMMEM_ram_rw, /* Normal read/write guest RAM */
- HVMMEM_ram_ro, /* Read-only; writes are discarded */
- HVMMEM_mmio_dm, /* Reads and write go to the device model */
-} hvmmem_type_t;
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
/* Domain to be updated. */
domid_t domid;
/* Memory type */
- hvmmem_type_t hvmmem_type;
+ uint16_t hvmmem_type;
+ /* Number of pages. */
+ uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
- /* Number of pages. */
- uint64_aligned_t nr;
};
typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+/* Hint from PV drivers for pagetable destruction. */
+#define HVMOP_pagetable_dying 9
+struct xen_hvm_pagetable_dying {
+ /* Domain with a pagetable about to be destroyed. */
+ domid_t domid;
+ uint16_t pad[3]; /* align next field on 8-byte boundary */
+ /* guest physical address of the toplevel pagetable dying */
+ uint64_t gpa;
+};
+typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t);
+
+/* Get the current Xen time, in nanoseconds since system boot. */
+#define HVMOP_get_time 10
+struct xen_hvm_get_time {
+ uint64_t now; /* OUT */
+};
+typedef struct xen_hvm_get_time xen_hvm_get_time_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t);
+
+#define HVMOP_xentrace 11
+struct xen_hvm_xentrace {
+ uint16_t event, extra_bytes;
+ uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)];
+};
+typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
+
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#define HVMOP_set_mem_access 12
+typedef enum {
+ HVMMEM_access_n,
+ HVMMEM_access_r,
+ HVMMEM_access_w,
+ HVMMEM_access_rw,
+ HVMMEM_access_x,
+ HVMMEM_access_rx,
+ HVMMEM_access_wx,
+ HVMMEM_access_rwx,
+ HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically
+ * change to r-w on a write */
+ HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu */
+ HVMMEM_access_default /* Take the domain default */
+} hvmmem_access_t;
+/* Notify that a region of memory is to have specific access types */
+struct xen_hvm_set_mem_access {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* Memory type */
+ uint16_t hvmmem_access; /* hvm_access_t */
+ /* Number of pages, ignored on setting default access */
+ uint32_t nr;
+ /* First pfn, or ~0ull to set the default access for new pages */
+ uint64_aligned_t first_pfn;
+};
+typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
+
+#define HVMOP_get_mem_access 13
+/* Get the specific access type for that region of memory */
+struct xen_hvm_get_mem_access {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* Memory type: OUT */
+ uint16_t hvmmem_access; /* hvm_access_t */
+ /* pfn, or ~0ull for default access for new pages. IN */
+ uint64_aligned_t pfn;
+};
+typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
+
+#define HVMOP_inject_trap 14
+/* Inject a trap into a VCPU, which will get taken up on the next
+ * scheduling of it. Note that the caller should know enough of the
+ * state of the CPU before injecting, to know what the effect of
+ * injecting the trap will be.
+ */
+struct xen_hvm_inject_trap {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* VCPU */
+ uint32_t vcpuid;
+ /* Vector number */
+ uint32_t vector;
+ /* Trap type (HVMOP_TRAP_*) */
+ uint32_t type;
+/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
+# define HVMOP_TRAP_ext_int 0 /* external interrupt */
+# define HVMOP_TRAP_nmi 2 /* nmi */
+# define HVMOP_TRAP_hw_exc 3 /* hardware exception */
+# define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
+# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
+# define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
+ /* Error code, or ~0u to skip */
+ uint32_t error_code;
+ /* Intruction length */
+ uint32_t insn_len;
+ /* CR2 for page faults */
+ uint64_aligned_t cr2;
+};
+typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+#define HVMOP_get_mem_type 15
+/* Return hvmmem_type_t for the specified pfn. */
+struct xen_hvm_get_mem_type {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* OUT variable. */
+ uint16_t mem_type;
+ uint16_t pad[2]; /* align next field on 8-byte boundary */
+ /* IN variable. */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+
+/* Following tools-only interfaces may change in future. */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/* MSI injection for emulated devices */
+#define HVMOP_inject_msi 16
+struct xen_hvm_inject_msi {
+ /* Domain to be injected */
+ domid_t domid;
+ /* Data -- lower 32 bits */
+ uint32_t data;
+ /* Address (0xfeexxxxx) */
+ uint64_t addr;
+};
+typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
diff --git a/sys/xen/interface/hvm/ioreq.h b/sys/xen/interface/hvm/ioreq.h
index 5b68730..4022a1d 100644
--- a/sys/xen/interface/hvm/ioreq.h
+++ b/sys/xen/interface/hvm/ioreq.h
@@ -43,32 +43,24 @@
* virq
*/
struct ioreq {
- uint64_t addr; /* physical address */
- uint64_t size; /* size in bytes */
- uint64_t count; /* for rep prefixes */
- uint64_t data; /* data (or paddr of data) */
+ uint64_t addr; /* physical address */
+ uint64_t data; /* data (or paddr of data) */
+ uint32_t count; /* for rep prefixes */
+ uint32_t size; /* size in bytes */
+ uint32_t vp_eport; /* evtchn for notifications to/from device model */
+ uint16_t _pad0;
uint8_t state:4;
- uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
- * of the real data to use. */
- uint8_t dir:1; /* 1=read, 0=write */
+ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
+ * of the real data to use. */
+ uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
- uint8_t pad:1;
- uint8_t type; /* I/O type */
- uint8_t _pad0[6];
- uint64_t io_count; /* How many IO done on a vcpu */
+ uint8_t _pad1:1;
+ uint8_t type; /* I/O type */
};
typedef struct ioreq ioreq_t;
-struct vcpu_iodata {
- struct ioreq vp_ioreq;
- /* Event channel port, used for notifications to/from the device model. */
- uint32_t vp_eport;
- uint32_t _pad0;
-};
-typedef struct vcpu_iodata vcpu_iodata_t;
-
struct shared_iopage {
- struct vcpu_iodata vcpu_iodata[1];
+ struct ioreq vcpu_ioreq[1];
};
typedef struct shared_iopage shared_iopage_t;
@@ -108,11 +100,32 @@ struct buffered_piopage {
};
#endif /* defined(__ia64__) */
-#define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40
-#define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
-#define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
-#define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20)
-#define ACPI_GPE0_BLK_LEN 0x08
+/*
+ * ACPI Control/Event register locations. Location is controlled by a
+ * version number in HVM_PARAM_ACPI_IOPORTS_LOCATION.
+ */
+
+/* Version 0 (default): Traditional Xen locations. */
+#define ACPI_PM1A_EVT_BLK_ADDRESS_V0 0x1f40
+#define ACPI_PM1A_CNT_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x04)
+#define ACPI_PM_TMR_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x08)
+#define ACPI_GPE0_BLK_ADDRESS_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0 + 0x20)
+#define ACPI_GPE0_BLK_LEN_V0 0x08
+
+/* Version 1: Locations preferred by modern Qemu. */
+#define ACPI_PM1A_EVT_BLK_ADDRESS_V1 0xb000
+#define ACPI_PM1A_CNT_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x04)
+#define ACPI_PM_TMR_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x08)
+#define ACPI_GPE0_BLK_ADDRESS_V1 0xafe0
+#define ACPI_GPE0_BLK_LEN_V1 0x04
+
+/* Compatibility definitions for the default location (version 0). */
+#define ACPI_PM1A_EVT_BLK_ADDRESS ACPI_PM1A_EVT_BLK_ADDRESS_V0
+#define ACPI_PM1A_CNT_BLK_ADDRESS ACPI_PM1A_CNT_BLK_ADDRESS_V0
+#define ACPI_PM_TMR_BLK_ADDRESS ACPI_PM_TMR_BLK_ADDRESS_V0
+#define ACPI_GPE0_BLK_ADDRESS ACPI_GPE0_BLK_ADDRESS_V0
+#define ACPI_GPE0_BLK_LEN ACPI_GPE0_BLK_LEN_V0
+
#endif /* _IOREQ_H_ */
diff --git a/sys/xen/interface/hvm/params.h b/sys/xen/interface/hvm/params.h
index d846731..55c1b57 100644
--- a/sys/xen/interface/hvm/params.h
+++ b/sys/xen/interface/hvm/params.h
@@ -21,7 +21,7 @@
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
#define __XEN_PUBLIC_HVM_PARAMS_H__
-#include <xen/interface/hvm/hvm_op.h>
+#include "hvm_op.h"
/*
* Parameter space for HVMOP_{set,get}_param.
@@ -33,6 +33,9 @@
* val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
* Domain = val[47:32], Bus = val[31:16],
* DevFn = val[15: 8], IntX = val[ 1: 0]
+ * val[63:56] == 2: val[7:0] is a vector number, check for
+ * XENFEAT_hvm_callback_vector to know if this delivery
+ * method is available.
* If val == 0 then CPU0 event-channel notifications are not delivered.
*/
#define HVM_PARAM_CALLBACK_IRQ 0
@@ -49,11 +52,19 @@
#define HVM_PARAM_IOREQ_PFN 5
#define HVM_PARAM_BUFIOREQ_PFN 6
+#define HVM_PARAM_BUFIOREQ_EVTCHN 26
#ifdef __ia64__
+
#define HVM_PARAM_NVRAM_FD 7
#define HVM_PARAM_VHPT_SIZE 8
#define HVM_PARAM_BUFPIOREQ_PFN 9
+
+#elif defined(__i386__) || defined(__x86_64__)
+
+/* Expose Viridian interfaces to this HVM guest? */
+#define HVM_PARAM_VIRIDIAN 9
+
#endif
/*
@@ -93,32 +104,49 @@
/* ACPI S state: currently support S0 and S3 on x86. */
#define HVM_PARAM_ACPI_S_STATE 14
-#define HVM_NR_PARAMS 15
+/* TSS used on Intel when CR0.PE=0. */
+#define HVM_PARAM_VM86_TSS 15
-#ifdef XENHVM
-/**
- * Retrieve an HVM setting from the hypervisor.
- *
- * \param index The index of the HVM parameter to retrieve.
- *
- * \return On error, 0. Otherwise the value of the requested parameter.
+/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
+#define HVM_PARAM_VPT_ALIGN 16
+
+/* Console debug shared memory ring and event channel */
+#define HVM_PARAM_CONSOLE_PFN 17
+#define HVM_PARAM_CONSOLE_EVTCHN 18
+
+/*
+ * Select location of ACPI PM1a and TMR control blocks. Currently two locations
+ * are supported, specified by version 0 or 1 in this parameter:
+ * - 0: default, use the old addresses
+ * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48
+ * - 1: use the new default qemu addresses
+ * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008
+ * You can find these address definitions in <hvm/ioreq.h>
*/
-static inline unsigned long
-hvm_get_parameter(int index)
-{
- struct xen_hvm_param xhv;
- int error;
-
- xhv.domid = DOMID_SELF;
- xhv.index = index;
- error = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
- if (error) {
- printf("hvm_get_parameter: failed to get %d, error %d\n",
- index, error);
- return (0);
- }
- return (xhv.value);
-}
-#endif
+#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
+
+/* Enable blocking memory events, async or sync (pause vcpu until response)
+ * onchangeonly indicates messages only on a change of value */
+#define HVM_PARAM_MEMORY_EVENT_CR0 20
+#define HVM_PARAM_MEMORY_EVENT_CR3 21
+#define HVM_PARAM_MEMORY_EVENT_CR4 22
+#define HVM_PARAM_MEMORY_EVENT_INT3 23
+#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
+
+#define HVMPME_MODE_MASK (3 << 0)
+#define HVMPME_mode_disabled 0
+#define HVMPME_mode_async 1
+#define HVMPME_mode_sync 2
+#define HVMPME_onchangeonly (1 << 2)
+
+/* Boolean: Enable nestedhvm (hvm only) */
+#define HVM_PARAM_NESTEDHVM 24
+
+/* Params for the mem event rings */
+#define HVM_PARAM_PAGING_RING_PFN 27
+#define HVM_PARAM_ACCESS_RING_PFN 28
+#define HVM_PARAM_SHARING_RING_PFN 29
+
+#define HVM_NR_PARAMS 30
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff --git a/sys/xen/interface/hvm/save.h b/sys/xen/interface/hvm/save.h
index d45f0c1..58f8433 100644
--- a/sys/xen/interface/hvm/save.h
+++ b/sys/xen/interface/hvm/save.h
@@ -61,13 +61,36 @@ struct hvm_save_descriptor {
* ugliness.
*/
-#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
- struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
+#ifdef __XEN__
+# define DECLARE_HVM_SAVE_TYPE_COMPAT(_x, _code, _type, _ctype, _fix) \
+ static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h) { return _fix(h); } \
+ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}; \
+ struct __HVM_SAVE_TYPE_COMPAT_##_x { _ctype t; }
+
+# include <xen/lib.h> /* BUG() */
+# define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
+ static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h) { BUG(); return -1; } \
+ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}; \
+ struct __HVM_SAVE_TYPE_COMPAT_##_x { _type t; }
+#else
+# define DECLARE_HVM_SAVE_TYPE_COMPAT(_x, _code, _type, _ctype, _fix) \
+ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}
+
+# define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
+ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}
+#endif
#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
+#ifdef __XEN__
+# define HVM_SAVE_TYPE_COMPAT(_x) typeof (((struct __HVM_SAVE_TYPE_COMPAT_##_x *)(0))->t)
+# define HVM_SAVE_LENGTH_COMPAT(_x) (sizeof (HVM_SAVE_TYPE_COMPAT(_x)))
+
+# define HVM_SAVE_HAS_COMPAT(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->cpt)-1)
+# define HVM_SAVE_FIX_COMPAT(_x, _dst) __HVM_SAVE_FIX_COMPAT_##_x(_dst)
+#endif
/*
* The series of save records is teminated by a zero-type, zero-length
@@ -81,6 +104,8 @@ DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
#include "../arch-x86/hvm/save.h"
#elif defined(__ia64__)
#include "../arch-ia64/hvm/save.h"
+#elif defined(__arm__)
+#include "../arch-arm/hvm/save.h"
#else
#error "unsupported architecture"
#endif
diff --git a/sys/xen/interface/hvm/vmx_assist.h b/sys/xen/interface/hvm/vmx_assist.h
deleted file mode 100644
index 4ef17fe..0000000
--- a/sys/xen/interface/hvm/vmx_assist.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * vmx_assist.h: Context definitions for the VMXASSIST world switch.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Leendert van Doorn, leendert@watson.ibm.com
- * Copyright (c) 2005, International Business Machines Corporation.
- */
-
-#ifndef _VMX_ASSIST_H_
-#define _VMX_ASSIST_H_
-
-#define VMXASSIST_BASE 0xD0000
-#define VMXASSIST_MAGIC 0x17101966
-#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
-
-#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
-#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
-
-#ifndef __ASSEMBLY__
-
-#define NR_EXCEPTION_HANDLER 32
-#define NR_INTERRUPT_HANDLERS 16
-#define NR_TRAPS (NR_EXCEPTION_HANDLER+NR_INTERRUPT_HANDLERS)
-
-union vmcs_arbytes {
- struct arbyte_fields {
- unsigned int seg_type : 4,
- s : 1,
- dpl : 2,
- p : 1,
- reserved0 : 4,
- avl : 1,
- reserved1 : 1,
- default_ops_size: 1,
- g : 1,
- null_bit : 1,
- reserved2 : 15;
- } fields;
- unsigned int bytes;
-};
-
-/*
- * World switch state
- */
-struct vmx_assist_context {
- uint32_t eip; /* execution pointer */
- uint32_t esp; /* stack pointer */
- uint32_t eflags; /* flags register */
- uint32_t cr0;
- uint32_t cr3; /* page table directory */
- uint32_t cr4;
- uint32_t idtr_limit; /* idt */
- uint32_t idtr_base;
- uint32_t gdtr_limit; /* gdt */
- uint32_t gdtr_base;
- uint32_t cs_sel; /* cs selector */
- uint32_t cs_limit;
- uint32_t cs_base;
- union vmcs_arbytes cs_arbytes;
- uint32_t ds_sel; /* ds selector */
- uint32_t ds_limit;
- uint32_t ds_base;
- union vmcs_arbytes ds_arbytes;
- uint32_t es_sel; /* es selector */
- uint32_t es_limit;
- uint32_t es_base;
- union vmcs_arbytes es_arbytes;
- uint32_t ss_sel; /* ss selector */
- uint32_t ss_limit;
- uint32_t ss_base;
- union vmcs_arbytes ss_arbytes;
- uint32_t fs_sel; /* fs selector */
- uint32_t fs_limit;
- uint32_t fs_base;
- union vmcs_arbytes fs_arbytes;
- uint32_t gs_sel; /* gs selector */
- uint32_t gs_limit;
- uint32_t gs_base;
- union vmcs_arbytes gs_arbytes;
- uint32_t tr_sel; /* task selector */
- uint32_t tr_limit;
- uint32_t tr_base;
- union vmcs_arbytes tr_arbytes;
- uint32_t ldtr_sel; /* ldtr selector */
- uint32_t ldtr_limit;
- uint32_t ldtr_base;
- union vmcs_arbytes ldtr_arbytes;
-
- unsigned char rm_irqbase[2];
-};
-typedef struct vmx_assist_context vmx_assist_context_t;
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _VMX_ASSIST_H_ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/sys/xen/interface/io/blkif.h b/sys/xen/interface/io/blkif.h
index b6c930b..9155662 100644
--- a/sys/xen/interface/io/blkif.h
+++ b/sys/xen/interface/io/blkif.h
@@ -28,8 +28,8 @@
#ifndef __XEN_PUBLIC_IO_BLKIF_H__
#define __XEN_PUBLIC_IO_BLKIF_H__
-#include <xen/interface/io/ring.h>
-#include <xen/interface/grant_table.h>
+#include "ring.h"
+#include "../grant_table.h"
/*
* Front->back notifications: When enqueuing a new request, sending a
diff --git a/sys/xen/interface/io/fsif.h b/sys/xen/interface/io/fsif.h
new file mode 100644
index 0000000..8fc2174
--- /dev/null
+++ b/sys/xen/interface/io/fsif.h
@@ -0,0 +1,192 @@
+/******************************************************************************
+ * fsif.h
+ *
+ * Interface to FS level split device drivers.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
+ */
+
+#ifndef __XEN_PUBLIC_IO_FSIF_H__
+#define __XEN_PUBLIC_IO_FSIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+#define REQ_FILE_OPEN 1
+#define REQ_FILE_CLOSE 2
+#define REQ_FILE_READ 3
+#define REQ_FILE_WRITE 4
+#define REQ_STAT 5
+#define REQ_FILE_TRUNCATE 6
+#define REQ_REMOVE 7
+#define REQ_RENAME 8
+#define REQ_CREATE 9
+#define REQ_DIR_LIST 10
+#define REQ_CHMOD 11
+#define REQ_FS_SPACE 12
+#define REQ_FILE_SYNC 13
+
+struct fsif_open_request {
+ grant_ref_t gref;
+};
+
+struct fsif_close_request {
+ uint32_t fd;
+};
+
+struct fsif_read_request {
+ uint32_t fd;
+ int32_t pad;
+ uint64_t len;
+ uint64_t offset;
+ grant_ref_t grefs[1]; /* Variable length */
+};
+
+struct fsif_write_request {
+ uint32_t fd;
+ int32_t pad;
+ uint64_t len;
+ uint64_t offset;
+ grant_ref_t grefs[1]; /* Variable length */
+};
+
+struct fsif_stat_request {
+ uint32_t fd;
+};
+
+/* This structure is a copy of some fields from stat structure, returned
+ * via the ring. */
+struct fsif_stat_response {
+ int32_t stat_mode;
+ uint32_t stat_uid;
+ uint32_t stat_gid;
+ int32_t stat_ret;
+ int64_t stat_size;
+ int64_t stat_atime;
+ int64_t stat_mtime;
+ int64_t stat_ctime;
+};
+
+struct fsif_truncate_request {
+ uint32_t fd;
+ int32_t pad;
+ int64_t length;
+};
+
+struct fsif_remove_request {
+ grant_ref_t gref;
+};
+
+struct fsif_rename_request {
+ uint16_t old_name_offset;
+ uint16_t new_name_offset;
+ grant_ref_t gref;
+};
+
+struct fsif_create_request {
+ int8_t directory;
+ int8_t pad;
+ int16_t pad2;
+ int32_t mode;
+ grant_ref_t gref;
+};
+
+struct fsif_list_request {
+ uint32_t offset;
+ grant_ref_t gref;
+};
+
+#define NR_FILES_SHIFT 0
+#define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */
+#define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT)
+#define ERROR_SIZE 32 /* 32 bits for the error mask */
+#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT)
+#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
+#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE)
+#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT)
+
+struct fsif_chmod_request {
+ uint32_t fd;
+ int32_t mode;
+};
+
+struct fsif_space_request {
+ grant_ref_t gref;
+};
+
+struct fsif_sync_request {
+ uint32_t fd;
+};
+
+
+/* FS operation request */
+struct fsif_request {
+ uint8_t type; /* Type of the request */
+ uint8_t pad;
+ uint16_t id; /* Request ID, copied to the response */
+ uint32_t pad2;
+ union {
+ struct fsif_open_request fopen;
+ struct fsif_close_request fclose;
+ struct fsif_read_request fread;
+ struct fsif_write_request fwrite;
+ struct fsif_stat_request fstat;
+ struct fsif_truncate_request ftruncate;
+ struct fsif_remove_request fremove;
+ struct fsif_rename_request frename;
+ struct fsif_create_request fcreate;
+ struct fsif_list_request flist;
+ struct fsif_chmod_request fchmod;
+ struct fsif_space_request fspace;
+ struct fsif_sync_request fsync;
+ } u;
+};
+typedef struct fsif_request fsif_request_t;
+
+/* FS operation response */
+struct fsif_response {
+ uint16_t id;
+ uint16_t pad1;
+ uint32_t pad2;
+ union {
+ uint64_t ret_val;
+ struct fsif_stat_response fstat;
+ } u;
+};
+
+typedef struct fsif_response fsif_response_t;
+
+#define FSIF_RING_ENTRY_SIZE 64
+
+#define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \
+ sizeof(grant_ref_t) + 1)
+#define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \
+ sizeof(grant_ref_t) + 1)
+
+DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
+
+#define STATE_INITIALISED "init"
+#define STATE_READY "ready"
+#define STATE_CLOSING "closing"
+#define STATE_CLOSED "closed"
+
+
+#endif
diff --git a/sys/xen/interface/io/libxenvchan.h b/sys/xen/interface/io/libxenvchan.h
new file mode 100644
index 0000000..5c3d3d4
--- /dev/null
+++ b/sys/xen/interface/io/libxenvchan.h
@@ -0,0 +1,97 @@
+/**
+ * @file
+ * @section AUTHORS
+ *
+ * Copyright (C) 2010 Rafal Wojtczuk <rafal@invisiblethingslab.com>
+ *
+ * Authors:
+ * Rafal Wojtczuk <rafal@invisiblethingslab.com>
+ * Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * @section LICENSE
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * @section DESCRIPTION
+ *
+ * Originally borrowed from the Qubes OS Project, http://www.qubes-os.org,
+ * this code has been substantially rewritten to use the gntdev and gntalloc
+ * devices instead of raw MFNs and map_foreign_range.
+ *
+ * This is a library for inter-domain communication. A standard Xen ring
+ * buffer is used, with a datagram-based interface built on top. The grant
+ * reference and event channels are shared in XenStore under a user-specified
+ * path.
+ *
+ * The ring.h macros define an asymmetric interface to a shared data structure
+ * that assumes all rings reside in a single contiguous memory space. This is
+ * not suitable for vchan because the interface to the ring is symmetric except
+ * for the setup. Unlike the producer-consumer rings defined in ring.h, the
+ * size of the rings used in vchan are determined at execution time instead of
+ * compile time, so the macros in ring.h cannot be used to access the rings.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+struct ring_shared {
+ uint32_t cons, prod;
+};
+
+#define VCHAN_NOTIFY_WRITE 0x1
+#define VCHAN_NOTIFY_READ 0x2
+
+/**
+ * vchan_interface: primary shared data structure
+ */
+struct vchan_interface {
+ /**
+ * Standard consumer/producer interface, one pair per buffer
+ * left is client write, server read
+ * right is client read, server write
+ */
+ struct ring_shared left, right;
+ /**
+ * size of the rings, which determines their location
+ * 10 - at offset 1024 in ring's page
+ * 11 - at offset 2048 in ring's page
+ * 12+ - uses 2^(N-12) grants to describe the multi-page ring
+ * These should remain constant once the page is shared.
+ * Only one of the two orders can be 10 (or 11).
+ */
+ uint16_t left_order, right_order;
+ /**
+ * Shutdown detection:
+ * 0: client (or server) has exited
+ * 1: client (or server) is connected
+ * 2: client has not yet connected
+ */
+ uint8_t cli_live, srv_live;
+ /**
+ * Notification bits:
+ * VCHAN_NOTIFY_WRITE: send notify when data is written
+ * VCHAN_NOTIFY_READ: send notify when data is read (consumed)
+ * cli_notify is used for the client to inform the server of its action
+ */
+ uint8_t cli_notify, srv_notify;
+ /**
+ * Grant list: ordering is left, right. Must not extend into actual ring
+ * or grow beyond the end of the initial shared page.
+ * These should remain constant once the page is shared, to allow
+ * for possible remapping by a client that restarts.
+ */
+ uint32_t grants[0];
+};
+
diff --git a/sys/xen/interface/io/netif.h b/sys/xen/interface/io/netif.h
index 06e82e7..aca5df4 100644
--- a/sys/xen/interface/io/netif.h
+++ b/sys/xen/interface/io/netif.h
@@ -41,7 +41,7 @@
/*
* This is the 'wire' format for packets:
* Request 1: netif_tx_request -- NETTXF_* (any flags)
- * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
+ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
* [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_FLAG_MORE)
* Request 4: netif_tx_request -- NETTXF_more_data
* Request 5: netif_tx_request -- NETTXF_more_data
diff --git a/sys/xen/interface/io/pciif.h b/sys/xen/interface/io/pciif.h
index 0a0ffcc..c4177f3 100644
--- a/sys/xen/interface/io/pciif.h
+++ b/sys/xen/interface/io/pciif.h
@@ -29,15 +29,23 @@
/* xen_pci_sharedinfo flags */
#define _XEN_PCIF_active (0)
-#define XEN_PCIF_active (1<<_XEN_PCI_active)
+#define XEN_PCIF_active (1<<_XEN_PCIF_active)
+#define _XEN_PCIB_AERHANDLER (1)
+#define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER)
+#define _XEN_PCIB_active (2)
+#define XEN_PCIB_active (1<<_XEN_PCIB_active)
/* xen_pci_op commands */
-#define XEN_PCI_OP_conf_read (0)
-#define XEN_PCI_OP_conf_write (1)
-#define XEN_PCI_OP_enable_msi (2)
-#define XEN_PCI_OP_disable_msi (3)
-#define XEN_PCI_OP_enable_msix (4)
-#define XEN_PCI_OP_disable_msix (5)
+#define XEN_PCI_OP_conf_read (0)
+#define XEN_PCI_OP_conf_write (1)
+#define XEN_PCI_OP_enable_msi (2)
+#define XEN_PCI_OP_disable_msi (3)
+#define XEN_PCI_OP_enable_msix (4)
+#define XEN_PCI_OP_disable_msix (5)
+#define XEN_PCI_OP_aer_detected (6)
+#define XEN_PCI_OP_aer_resume (7)
+#define XEN_PCI_OP_aer_mmio (8)
+#define XEN_PCI_OP_aer_slotreset (9)
/* xen_pci_op error numbers */
#define XEN_PCI_ERR_success (0)
@@ -82,10 +90,25 @@ struct xen_pci_op {
struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC];
};
+/*used for pcie aer handling*/
+struct xen_pcie_aer_op
+{
+
+ /* IN: what action to perform: XEN_PCI_OP_* */
+ uint32_t cmd;
+ /*IN/OUT: return aer_op result or carry error_detected state as input*/
+ int32_t err;
+
+ /* IN: which device to touch */
+ uint32_t domain; /* PCI Domain/Segment*/
+ uint32_t bus;
+ uint32_t devfn;
+};
struct xen_pci_sharedinfo {
/* flags - XEN_PCIF_* */
uint32_t flags;
struct xen_pci_op op;
+ struct xen_pcie_aer_op aer_op;
};
#endif /* __XEN_PCI_COMMON_H__ */
diff --git a/sys/xen/interface/io/protocols.h b/sys/xen/interface/io/protocols.h
index fd52934..0b7a2ea 100644
--- a/sys/xen/interface/io/protocols.h
+++ b/sys/xen/interface/io/protocols.h
@@ -26,7 +26,7 @@
#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
-#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
+#define XEN_IO_PROTO_ABI_ARM "arm-abi"
#if defined(__i386__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
@@ -34,8 +34,8 @@
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
#elif defined(__ia64__)
# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
-#elif defined(__powerpc64__)
-# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
+#elif defined(__arm__)
+# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM
#else
# error arch fixup needed here
#endif
diff --git a/sys/xen/interface/io/ring.h b/sys/xen/interface/io/ring.h
index 6b7fd74..1d668c4 100644
--- a/sys/xen/interface/io/ring.h
+++ b/sys/xen/interface/io/ring.h
@@ -53,9 +53,15 @@ typedef unsigned int RING_IDX;
/*
* Calculate size of a shared ring, given the total available space for the
* ring and indexes (_sz), and the name tag of the request/response structure.
- * A ring contains as many entries as will fit, rounded down to the nearest
+ * A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
+#define __CONST_RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
+ sizeof(((struct _s##_sring *)0)->ring[0])))
+/*
+ * The same for passing in an actual pointer instead of a name tag.
+ */
#define __RING_SIZE(_s, _sz) \
(__RD32(((_sz) - __RING_HEADER_SIZE(_s)) / sizeof((_s)->ring[0])))
@@ -113,7 +119,16 @@ union __name##_sring_entry { \
struct __name##_sring { \
RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \
- uint8_t pad[48]; \
+ union { \
+ struct { \
+ uint8_t smartpoll_active; \
+ } netif; \
+ struct { \
+ uint8_t msg; \
+ } tapif_user; \
+ uint8_t pvt_pad[4]; \
+ } private; \
+ uint8_t __pad[44]; \
union __name##_sring_entry ring[1]; /* variable-length */ \
}; \
\
@@ -157,7 +172,8 @@ typedef struct __name##_back_ring __name##_back_ring_t
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = (_s)->rsp_prod = 0; \
(_s)->req_event = (_s)->rsp_event = 1; \
- (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \
+ (void)memset((_s)->private.pvt_pad, 0, sizeof((_s)->private.pvt_pad)); \
+ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
#define FRONT_RING_INIT(_r, _s, __size) do { \
diff --git a/sys/xen/interface/io/usbif.h b/sys/xen/interface/io/usbif.h
new file mode 100644
index 0000000..6099c29
--- /dev/null
+++ b/sys/xen/interface/io/usbif.h
@@ -0,0 +1,151 @@
+/*
+ * usbif.h
+ *
+ * USB I/O interface for Xen guest OSes.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_IO_USBIF_H__
+#define __XEN_PUBLIC_IO_USBIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+enum usb_spec_version {
+ USB_VER_UNKNOWN = 0,
+ USB_VER_USB11,
+ USB_VER_USB20,
+ USB_VER_USB30, /* not supported yet */
+};
+
+/*
+ * USB pipe in usbif_request
+ *
+ * bits 0-5 are specific bits for virtual USB driver.
+ * bits 7-31 are standard urb pipe.
+ *
+ * - port number(NEW): bits 0-4
+ * (USB_MAXCHILDREN is 31)
+ *
+ * - operation flag(NEW): bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
+ *
+ * - direction: bit 7
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
+ *
+ * - device address: bits 8-14
+ *
+ * - endpoint: bits 15-18
+ *
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ */
+#define usbif_pipeportnum(pipe) ((pipe) & 0x1f)
+#define usbif_setportnum_pipe(pipe, portnum) \
+ ((pipe)|(portnum))
+
+#define usbif_pipeunlink(pipe) ((pipe) & 0x20)
+#define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe))
+#define usbif_setunlink_pipe(pipe) ((pipe)|(0x20))
+
+#define USBIF_BACK_MAX_PENDING_REQS (128)
+#define USBIF_MAX_SEGMENTS_PER_REQUEST (16)
+
+/*
+ * RING for transferring urbs.
+ */
+struct usbif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+struct usbif_urb_request {
+ uint16_t id; /* request id */
+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+
+ /* basic urb parameter */
+ uint32_t pipe;
+ uint16_t transfer_flags;
+ uint16_t buffer_length;
+ union {
+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t start_frame; /* start frame */
+ uint16_t number_of_packets; /* number of ISO packet */
+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
+ } isoc;
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t pad[3];
+ } intr;
+
+ struct {
+ uint16_t unlink_id; /* unlink request id */
+ uint16_t pad[3];
+ } unlink;
+
+ } u;
+
+ /* urb data segments */
+ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST];
+};
+typedef struct usbif_urb_request usbif_urb_request_t;
+
+struct usbif_urb_response {
+ uint16_t id; /* request id */
+ uint16_t start_frame; /* start frame (ISO) */
+ int32_t status; /* status (non-ISO) */
+ int32_t actual_length; /* actual transfer length */
+ int32_t error_count; /* number of ISO errors */
+};
+typedef struct usbif_urb_response usbif_urb_response_t;
+
+DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response);
+#define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE)
+
+/*
+ * RING for notifying connect/disconnect events to frontend
+ */
+struct usbif_conn_request {
+ uint16_t id;
+};
+typedef struct usbif_conn_request usbif_conn_request_t;
+
+struct usbif_conn_response {
+ uint16_t id; /* request id */
+ uint8_t portnum; /* port number */
+ uint8_t speed; /* usb_device_speed */
+};
+typedef struct usbif_conn_response usbif_conn_response_t;
+
+DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response);
+#define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE)
+
+#endif /* __XEN_PUBLIC_IO_USBIF_H__ */
diff --git a/sys/xen/interface/io/vscsiif.h b/sys/xen/interface/io/vscsiif.h
new file mode 100644
index 0000000..3ce2914
--- /dev/null
+++ b/sys/xen/interface/io/vscsiif.h
@@ -0,0 +1,105 @@
+/******************************************************************************
+ * vscsiif.h
+ *
+ * Based on the blkif.h code.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/* command between backend and frontend */
+#define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */
+#define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/
+#define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/
+
+
+#define VSCSIIF_BACK_MAX_PENDING_REQS 128
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating al least 16 "vscsiif_request"
+ * structures on one page (4096bytes) and number of scatter gather
+ * needed, we decided to use 26 as a magic number.
+ */
+#define VSCSIIF_SG_TABLESIZE 26
+
+/*
+ * base on linux kernel 2.6.18
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE 16
+#define VSCSIIF_SENSE_BUFFERSIZE 96
+
+
+struct vscsiif_request {
+ uint16_t rqid; /* private guest value, echoed in resp */
+ uint8_t act; /* command between backend and frontend */
+ uint8_t cmd_len;
+
+ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+ uint16_t timeout_per_command; /* The command is issued by twice
+ the value in Backend. */
+ uint16_t channel, id, lun;
+ uint16_t padding;
+ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
+ DMA_FROM_DEVICE(2)
+ DMA_NONE(3) requests */
+ uint8_t nr_segments; /* Number of pieces of scatter-gather */
+
+ struct scsiif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+ } seg[VSCSIIF_SG_TABLESIZE];
+ uint32_t reserved[3];
+};
+typedef struct vscsiif_request vscsiif_request_t;
+
+struct vscsiif_response {
+ uint16_t rqid;
+ uint8_t padding;
+ uint8_t sense_len;
+ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+ int32_t rslt;
+ uint32_t residual_len; /* request bufflen -
+ return the value from physical device */
+ uint32_t reserved[36];
+};
+typedef struct vscsiif_response vscsiif_response_t;
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/io/xs_wire.h b/sys/xen/interface/io/xs_wire.h
index f6a49ab..7e454c4 100644
--- a/sys/xen/interface/io/xs_wire.h
+++ b/sys/xen/interface/io/xs_wire.h
@@ -47,7 +47,9 @@ enum xsd_sockmsg_type
XS_ERROR,
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
- XS_SET_TARGET
+ XS_SET_TARGET,
+ XS_RESTRICT,
+ XS_RESET_WATCHES
};
#define XS_WRITE_NONE "NONE"
@@ -60,6 +62,7 @@ struct xsd_errors
int errnum;
const char *errstring;
};
+#ifdef EINVAL
#define XSD_ERROR(x) { x, #x }
/* LINTED: static unused */
static struct xsd_errors xsd_errors[]
@@ -82,6 +85,7 @@ __attribute__((unused))
XSD_ERROR(EAGAIN),
XSD_ERROR(EISCONN)
};
+#endif
struct xsd_sockmsg
{
diff --git a/sys/xen/interface/kexec.h b/sys/xen/interface/kexec.h
index fc19f2f..0425222 100644
--- a/sys/xen/interface/kexec.h
+++ b/sys/xen/interface/kexec.h
@@ -155,27 +155,6 @@ typedef struct xen_kexec_range {
unsigned long start;
} xen_kexec_range_t;
-/* vmcoreinfo stuff */
-#define VMCOREINFO_BYTES (4096)
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO_XEN"
-void arch_crash_save_vmcoreinfo(void);
-void vmcoreinfo_append_str(const char *fmt, ...)
- __attribute__ ((format (printf, 1, 2)));
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SYMBOL_ALIAS(alias, name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #alias, (unsigned long)&name)
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%zu\n", #name, sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_OFFSET_ALIAS(name, field, alias) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #alias, \
- (unsigned long)offsetof(struct name, field))
-
#endif /* _XEN_PUBLIC_KEXEC_H */
/*
diff --git a/sys/xen/interface/mem_event.h b/sys/xen/interface/mem_event.h
new file mode 100644
index 0000000..5d0bd4c
--- /dev/null
+++ b/sys/xen/interface/mem_event.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ * mem_event.h
+ *
+ * Memory event common structures.
+ *
+ * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_PUBLIC_MEM_EVENT_H
+#define _XEN_PUBLIC_MEM_EVENT_H
+
+#include "xen.h"
+#include "io/ring.h"
+
+/* Memory event flags */
+#define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
+#define MEM_EVENT_FLAG_DROP_PAGE (1 << 1)
+#define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2)
+#define MEM_EVENT_FLAG_FOREIGN (1 << 3)
+#define MEM_EVENT_FLAG_DUMMY (1 << 4)
+
+/* Reasons for the memory event request */
+#define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */
+#define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address */
+#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */
+#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */
+#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */
+#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
+#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn are RIP */
+
+typedef struct mem_event_st {
+ uint32_t flags;
+ uint32_t vcpu_id;
+
+ uint64_t gfn;
+ uint64_t offset;
+ uint64_t gla; /* if gla_valid */
+
+ uint32_t p2mt;
+
+ uint16_t access_r:1;
+ uint16_t access_w:1;
+ uint16_t access_x:1;
+ uint16_t gla_valid:1;
+ uint16_t available:12;
+
+ uint16_t reason;
+} mem_event_request_t, mem_event_response_t;
+
+DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/memory.h b/sys/xen/interface/memory.h
index d7b9fff..86d02c8 100644
--- a/sys/xen/interface/memory.h
+++ b/sys/xen/interface/memory.h
@@ -27,6 +27,8 @@
#ifndef __XEN_PUBLIC_MEMORY_H__
#define __XEN_PUBLIC_MEMORY_H__
+#include "xen.h"
+
/*
* Increase or decrease the specified domain's memory reservation. Returns the
* number of extents successfully allocated or freed.
@@ -48,6 +50,11 @@
/* NUMA node to allocate from. */
#define XENMEMF_node(x) (((x) + 1) << 8)
#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
+/* Flag to populate physmap with populate-on-demand entries */
+#define XENMEMF_populate_on_demand (1<<16)
+/* Flag to request allocation only from the node specified */
+#define XENMEMF_exact_node_request (1<<17)
+#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
#endif
struct xen_memory_reservation {
@@ -201,12 +208,18 @@ struct xen_add_to_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
+ /* Number of pages to go through for gmfn_range */
+ uint16_t size;
+
/* Source mapping space. */
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
-#define XENMAPSPACE_mfn 2 /* usual MFN */
+#define XENMAPSPACE_gmfn 2 /* GMFN */
+#define XENMAPSPACE_gmfn_range 3 /* GMFN range */
unsigned int space;
+#define XENMAPIDX_grant_table_status 0x80000000
+
/* Index into source mapping space. */
xen_ulong_t idx;
@@ -232,29 +245,8 @@ struct xen_remove_from_physmap {
typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
-/*
- * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
- * code on failure. This call only works for auto-translated guests.
- */
-#define XENMEM_translate_gpfn_list 8
-struct xen_translate_gpfn_list {
- /* Which domain to translate for? */
- domid_t domid;
-
- /* Length of list. */
- xen_ulong_t nr_gpfns;
-
- /* List of GPFNs to translate. */
- XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
-
- /*
- * Output list to contain MFN translations. May be the same as the input
- * list (in which case each input GPFN is overwritten with the output MFN).
- */
- XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
-};
-typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
-DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
+/*** REMOVED ***/
+/*#define XENMEM_translate_gpfn_list 8*/
/*
* Returns the pseudo-physical memory map as it was when the domain
@@ -299,6 +291,114 @@ struct xen_foreign_memory_map {
typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
+#define XENMEM_set_pod_target 16
+#define XENMEM_get_pod_target 17
+struct xen_pod_target {
+ /* IN */
+ uint64_t target_pages;
+ /* OUT */
+ uint64_t tot_pages;
+ uint64_t pod_cache_pages;
+ uint64_t pod_entries;
+ /* IN */
+ domid_t domid;
+};
+typedef struct xen_pod_target xen_pod_target_t;
+
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+
+/*
+ * Get the number of MFNs saved through memory sharing.
+ * The call never fails.
+ */
+#define XENMEM_get_sharing_freed_pages 18
+#define XENMEM_get_sharing_shared_pages 19
+
+#define XENMEM_paging_op 20
+#define XENMEM_paging_op_nominate 0
+#define XENMEM_paging_op_evict 1
+#define XENMEM_paging_op_prep 2
+
+#define XENMEM_access_op 21
+#define XENMEM_access_op_resume 0
+
+struct xen_mem_event_op {
+ uint8_t op; /* XENMEM_*_op_* */
+ domid_t domain;
+
+
+ /* PAGING_PREP IN: buffer to immediately fill page in */
+ uint64_aligned_t buffer;
+ /* Other OPs */
+ uint64_aligned_t gfn; /* IN: gfn of page being operated on */
+};
+typedef struct xen_mem_event_op xen_mem_event_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+
+#define XENMEM_sharing_op 22
+#define XENMEM_sharing_op_nominate_gfn 0
+#define XENMEM_sharing_op_nominate_gref 1
+#define XENMEM_sharing_op_share 2
+#define XENMEM_sharing_op_resume 3
+#define XENMEM_sharing_op_debug_gfn 4
+#define XENMEM_sharing_op_debug_mfn 5
+#define XENMEM_sharing_op_debug_gref 6
+#define XENMEM_sharing_op_add_physmap 7
+#define XENMEM_sharing_op_audit 8
+
+#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
+#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
+
+/* The following allows sharing of grant refs. This is useful
+ * for sharing utilities sitting as "filters" in IO backends
+ * (e.g. memshr + blktap(2)). The IO backend is only exposed
+ * to grant references, and this allows sharing of the grefs */
+#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62)
+
+#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
+ (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
+#define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
+ ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
+#define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
+ ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
+
+struct xen_mem_sharing_op {
+ uint8_t op; /* XENMEM_sharing_op_* */
+ domid_t domain;
+
+ union {
+ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
+ union {
+ uint64_aligned_t gfn; /* IN: gfn to nominate */
+ uint32_t grant_ref; /* IN: grant ref to nominate */
+ } u;
+ uint64_aligned_t handle; /* OUT: the handle */
+ } nominate;
+ struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
+ uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
+ uint64_aligned_t source_handle; /* IN: handle to the source page */
+ uint64_aligned_t client_gfn; /* IN: the client gfn */
+ uint64_aligned_t client_handle; /* IN: handle to the client page */
+ domid_t client_domain; /* IN: the client domain id */
+ } share;
+ struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
+ union {
+ uint64_aligned_t gfn; /* IN: gfn to debug */
+ uint64_aligned_t mfn; /* IN: mfn to debug */
+ uint32_t gref; /* IN: gref to debug */
+ } u;
+ } debug;
+ } u;
+};
+typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
/*
diff --git a/sys/xen/interface/nmi.h b/sys/xen/interface/nmi.h
index b2b8401..2fd21d2 100644
--- a/sys/xen/interface/nmi.h
+++ b/sys/xen/interface/nmi.h
@@ -27,6 +27,8 @@
#ifndef __XEN_PUBLIC_NMI_H__
#define __XEN_PUBLIC_NMI_H__
+#include "xen.h"
+
/*
* NMI reason codes:
* Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
diff --git a/sys/xen/interface/physdev.h b/sys/xen/interface/physdev.h
index 8057277..b78eeba 100644
--- a/sys/xen/interface/physdev.h
+++ b/sys/xen/interface/physdev.h
@@ -21,6 +21,8 @@
#ifndef __XEN_PUBLIC_PHYSDEV_H__
#define __XEN_PUBLIC_PHYSDEV_H__
+#include "xen.h"
+
/*
* Prototype for this hypercall is:
* int physdev_op(int cmd, void *args)
@@ -41,6 +43,29 @@ typedef struct physdev_eoi physdev_eoi_t;
DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v1 17
+/*
+ * Register a shared page for the hypervisor to indicate whether the
+ * guest must issue PHYSDEVOP_eoi. This hypercall is very similar to
+ * PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of
+ * PHYSDEVOP_eoi. The page registered is used as a bit array indexed by
+ * Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v2 28
+struct physdev_pirq_eoi_gmfn {
+ /* IN */
+ xen_pfn_t gmfn;
+};
+typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t);
+
+/*
* Query the status of an IRQ line.
* @arg == pointer to physdev_irq_status_query structure.
*/
@@ -125,6 +150,7 @@ DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
#define MAP_PIRQ_TYPE_MSI 0x0
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -135,7 +161,7 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
- /* IN */
+ /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
int bus;
/* IN */
int devfn;
@@ -168,6 +194,31 @@ struct physdev_manage_pci {
typedef struct physdev_manage_pci physdev_manage_pci_t;
DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
+#define PHYSDEVOP_restore_msi 19
+struct physdev_restore_msi {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_restore_msi physdev_restore_msi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t);
+
+#define PHYSDEVOP_manage_pci_add_ext 20
+struct physdev_manage_pci_ext {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+ unsigned is_extfn;
+ unsigned is_virtfn;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+};
+
+typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t);
+
/*
* Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
* hypercall since 0x00030202.
@@ -185,6 +236,82 @@ struct physdev_op {
typedef struct physdev_op physdev_op_t;
DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
+#define PHYSDEVOP_setup_gsi 21
+struct physdev_setup_gsi {
+ int gsi;
+ /* IN */
+ uint8_t triggering;
+ /* IN */
+ uint8_t polarity;
+ /* IN */
+};
+
+typedef struct physdev_setup_gsi physdev_setup_gsi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t);
+
+/* leave PHYSDEVOP 22 free */
+
+/* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI
+ * the hypercall returns a free pirq */
+#define PHYSDEVOP_get_free_pirq 23
+struct physdev_get_free_pirq {
+ /* IN */
+ int type;
+ /* OUT */
+ uint32_t pirq;
+};
+
+typedef struct physdev_get_free_pirq physdev_get_free_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t);
+
+#define XEN_PCI_MMCFG_RESERVED 0x1
+
+#define PHYSDEVOP_pci_mmcfg_reserved 24
+struct physdev_pci_mmcfg_reserved {
+ uint64_t address;
+ uint16_t segment;
+ uint8_t start_bus;
+ uint8_t end_bus;
+ uint32_t flags;
+};
+typedef struct physdev_pci_mmcfg_reserved physdev_pci_mmcfg_reserved_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_mmcfg_reserved_t);
+
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint32_t optarr[];
+#elif defined(__GNUC__)
+ uint32_t optarr[0];
+#endif
+};
+typedef struct physdev_pci_device_add physdev_pci_device_add_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_pci_device physdev_pci_device_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
+
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
@@ -206,6 +333,12 @@ DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+#if __XEN_INTERFACE_VERSION__ < 0x00040200
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
+#else
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v2
+#endif
+
#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
/*
diff --git a/sys/xen/interface/platform.h b/sys/xen/interface/platform.h
index c69fdab..ad51634 100644
--- a/sys/xen/interface/platform.h
+++ b/sys/xen/interface/platform.h
@@ -114,10 +114,110 @@ struct xenpf_platform_quirk {
typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
+#define XENPF_efi_runtime_call 49
+#define XEN_EFI_get_time 1
+#define XEN_EFI_set_time 2
+#define XEN_EFI_get_wakeup_time 3
+#define XEN_EFI_set_wakeup_time 4
+#define XEN_EFI_get_next_high_monotonic_count 5
+#define XEN_EFI_get_variable 6
+#define XEN_EFI_set_variable 7
+#define XEN_EFI_get_next_variable_name 8
+#define XEN_EFI_query_variable_info 9
+#define XEN_EFI_query_capsule_capabilities 10
+#define XEN_EFI_update_capsule 11
+struct xenpf_efi_runtime_call {
+ uint32_t function;
+ /*
+ * This field is generally used for per sub-function flags (defined
+ * below), except for the XEN_EFI_get_next_high_monotonic_count case,
+ * where it holds the single returned value.
+ */
+ uint32_t misc;
+ unsigned long status;
+ union {
+#define XEN_EFI_GET_TIME_SET_CLEARS_NS 0x00000001
+ struct {
+ struct xenpf_efi_time {
+ uint16_t year;
+ uint8_t month;
+ uint8_t day;
+ uint8_t hour;
+ uint8_t min;
+ uint8_t sec;
+ uint32_t ns;
+ int16_t tz;
+ uint8_t daylight;
+ } time;
+ uint32_t resolution;
+ uint32_t accuracy;
+ } get_time;
+
+ struct xenpf_efi_time set_time;
+
+#define XEN_EFI_GET_WAKEUP_TIME_ENABLED 0x00000001
+#define XEN_EFI_GET_WAKEUP_TIME_PENDING 0x00000002
+ struct xenpf_efi_time get_wakeup_time;
+
+#define XEN_EFI_SET_WAKEUP_TIME_ENABLE 0x00000001
+#define XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY 0x00000002
+ struct xenpf_efi_time set_wakeup_time;
+
+#define XEN_EFI_VARIABLE_NON_VOLATILE 0x00000001
+#define XEN_EFI_VARIABLE_BOOTSERVICE_ACCESS 0x00000002
+#define XEN_EFI_VARIABLE_RUNTIME_ACCESS 0x00000004
+ struct {
+ XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
+ unsigned long size;
+ XEN_GUEST_HANDLE(void) data;
+ struct xenpf_efi_guid {
+ uint32_t data1;
+ uint16_t data2;
+ uint16_t data3;
+ uint8_t data4[8];
+ } vendor_guid;
+ } get_variable, set_variable;
+
+ struct {
+ unsigned long size;
+ XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
+ struct xenpf_efi_guid vendor_guid;
+ } get_next_variable_name;
+
+ struct {
+ uint32_t attr;
+ uint64_t max_store_size;
+ uint64_t remain_store_size;
+ uint64_t max_size;
+ } query_variable_info;
+
+ struct {
+ XEN_GUEST_HANDLE(void) capsule_header_array;
+ unsigned long capsule_count;
+ uint64_t max_capsule_size;
+ unsigned int reset_type;
+ } query_capsule_capabilities;
+
+ struct {
+ XEN_GUEST_HANDLE(void) capsule_header_array;
+ unsigned long capsule_count;
+ uint64_t sg_list; /* machine address */
+ } update_capsule;
+ } u;
+};
+typedef struct xenpf_efi_runtime_call xenpf_efi_runtime_call_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_efi_runtime_call_t);
+
#define XENPF_firmware_info 50
#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */
#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */
+#define XEN_FW_EFI_INFO 4 /* from EFI */
+#define XEN_FW_EFI_VERSION 0
+#define XEN_FW_EFI_CONFIG_TABLE 1
+#define XEN_FW_EFI_VENDOR 2
+#define XEN_FW_EFI_MEM_INFO 3
+#define XEN_FW_EFI_RT_VERSION 4
struct xenpf_firmware_info {
/* IN variables. */
uint32_t type;
@@ -148,6 +248,24 @@ struct xenpf_firmware_info {
/* must refer to 128-byte buffer */
XEN_GUEST_HANDLE(uint8) edid;
} vbeddc_info; /* XEN_FW_VBEDDC_INFO */
+ union xenpf_efi_info {
+ uint32_t version;
+ struct {
+ uint64_t addr; /* EFI_CONFIGURATION_TABLE */
+ uint32_t nent;
+ } cfg;
+ struct {
+ uint32_t revision;
+ uint32_t bufsz; /* input, in bytes */
+ XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */
+ } vendor;
+ struct {
+ uint64_t addr;
+ uint64_t size;
+ uint64_t attr;
+ uint32_t type;
+ } mem;
+ } efi_info; /* XEN_FW_EFI_INFO */
} u;
};
typedef struct xenpf_firmware_info xenpf_firmware_info_t;
@@ -210,6 +328,7 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t);
#define XEN_PM_CX 0
#define XEN_PM_PX 1
#define XEN_PM_TX 2
+#define XEN_PM_PDC 3
/* Px sub info type */
#define XEN_PX_PCT 1
@@ -307,11 +426,88 @@ struct xenpf_set_processor_pminfo {
union {
struct xen_processor_power power;/* Cx: _CST/_CSD */
struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */
+ XEN_GUEST_HANDLE(uint32) pdc; /* _PDC */
} u;
};
typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t;
DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t);
+#define XENPF_get_cpuinfo 55
+struct xenpf_pcpuinfo {
+ /* IN */
+ uint32_t xen_cpuid;
+ /* OUT */
+ /* The maxium cpu_id that is present */
+ uint32_t max_present;
+#define XEN_PCPU_FLAGS_ONLINE 1
+ /* Correponding xen_cpuid is not present*/
+#define XEN_PCPU_FLAGS_INVALID 2
+ uint32_t flags;
+ uint32_t apic_id;
+ uint32_t acpi_id;
+};
+typedef struct xenpf_pcpuinfo xenpf_pcpuinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_pcpuinfo_t);
+
+#define XENPF_get_cpu_version 48
+struct xenpf_pcpu_version {
+ /* IN */
+ uint32_t xen_cpuid;
+ /* OUT */
+ /* The maxium cpu_id that is present */
+ uint32_t max_present;
+ char vendor_id[12];
+ uint32_t family;
+ uint32_t model;
+ uint32_t stepping;
+};
+typedef struct xenpf_pcpu_version xenpf_pcpu_version_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_pcpu_version_t);
+
+#define XENPF_cpu_online 56
+#define XENPF_cpu_offline 57
+struct xenpf_cpu_ol
+{
+ uint32_t cpuid;
+};
+typedef struct xenpf_cpu_ol xenpf_cpu_ol_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_cpu_ol_t);
+
+#define XENPF_cpu_hotadd 58
+struct xenpf_cpu_hotadd
+{
+ uint32_t apic_id;
+ uint32_t acpi_id;
+ uint32_t pxm;
+};
+
+#define XENPF_mem_hotadd 59
+struct xenpf_mem_hotadd
+{
+ uint64_t spfn;
+ uint64_t epfn;
+ uint32_t pxm;
+ uint32_t flags;
+};
+
+#define XENPF_core_parking 60
+
+#define XEN_CORE_PARKING_SET 1
+#define XEN_CORE_PARKING_GET 2
+struct xenpf_core_parking {
+ /* IN variables */
+ uint32_t type;
+ /* IN variables: set cpu nums expected to be idled */
+ /* OUT variables: get cpu nums actually be idled */
+ uint32_t idle_nums;
+};
+typedef struct xenpf_core_parking xenpf_core_parking_t;
+DEFINE_XEN_GUEST_HANDLE(xenpf_core_parking_t);
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_platform_op(const struct xen_platform_op*);
+ */
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -322,11 +518,18 @@ struct xen_platform_op {
struct xenpf_read_memtype read_memtype;
struct xenpf_microcode_update microcode;
struct xenpf_platform_quirk platform_quirk;
+ struct xenpf_efi_runtime_call efi_runtime_call;
struct xenpf_firmware_info firmware_info;
struct xenpf_enter_acpi_sleep enter_acpi_sleep;
struct xenpf_change_freq change_freq;
struct xenpf_getidletime getidletime;
struct xenpf_set_processor_pminfo set_pminfo;
+ struct xenpf_pcpuinfo pcpu_info;
+ struct xenpf_pcpu_version pcpu_version;
+ struct xenpf_cpu_ol cpu_ol;
+ struct xenpf_cpu_hotadd cpu_add;
+ struct xenpf_mem_hotadd mem_add;
+ struct xenpf_core_parking core_parking;
uint8_t pad[128];
} u;
};
diff --git a/sys/xen/interface/sched.h b/sys/xen/interface/sched.h
index 2227a95..7f87420 100644
--- a/sys/xen/interface/sched.h
+++ b/sys/xen/interface/sched.h
@@ -99,6 +99,29 @@ typedef struct sched_remote_shutdown sched_remote_shutdown_t;
DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
/*
+ * Latch a shutdown code, so that when the domain later shuts down it
+ * reports this code to the control tools.
+ * @arg == as for SCHEDOP_shutdown.
+ */
+#define SCHEDOP_shutdown_code 5
+
+/*
+ * Setup, poke and destroy a domain watchdog timer.
+ * @arg == pointer to sched_watchdog structure.
+ * With id == 0, setup a domain watchdog timer to cause domain shutdown
+ * after timeout, returns watchdog id.
+ * With id != 0 and timeout == 0, destroy domain watchdog timer.
+ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
+ */
+#define SCHEDOP_watchdog 6
+struct sched_watchdog {
+ uint32_t id; /* watchdog ID */
+ uint32_t timeout; /* timeout */
+};
+typedef struct sched_watchdog sched_watchdog_t;
+DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
+
+/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
@@ -107,6 +130,7 @@ DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/sys/xen/interface/sysctl.h b/sys/xen/interface/sysctl.h
index 6b10954..3225b2a 100644
--- a/sys/xen/interface/sysctl.h
+++ b/sys/xen/interface/sysctl.h
@@ -34,12 +34,12 @@
#include "xen.h"
#include "domctl.h"
-#define XEN_SYSCTL_INTERFACE_VERSION 0x00000006
+#define XEN_SYSCTL_INTERFACE_VERSION 0x00000009
/*
* Read console content from Xen buffer ring.
*/
-#define XEN_SYSCTL_readconsole 1
+/* XEN_SYSCTL_readconsole */
struct xen_sysctl_readconsole {
/* IN: Non-zero -> clear after reading. */
uint8_t clear;
@@ -60,7 +60,7 @@ typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
/* Get trace buffers machine base address */
-#define XEN_SYSCTL_tbuf_op 2
+/* XEN_SYSCTL_tbuf_op */
struct xen_sysctl_tbuf_op {
/* IN variables */
#define XEN_SYSCTL_TBUFOP_get_info 0
@@ -75,7 +75,7 @@ struct xen_sysctl_tbuf_op {
uint32_t evt_mask;
/* OUT variables */
uint64_aligned_t buffer_mfn;
- uint32_t size;
+ uint32_t size; /* Also an IN variable! */
};
typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
@@ -83,7 +83,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
/*
* Get physical information about the host machine
*/
-#define XEN_SYSCTL_physinfo 3
+/* XEN_SYSCTL_physinfo */
/* (x86) The platform supports HVM guests. */
#define _XEN_SYSCTL_PHYSCAP_hvm 0
#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
@@ -93,30 +93,16 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
struct xen_sysctl_physinfo {
uint32_t threads_per_core;
uint32_t cores_per_socket;
- uint32_t nr_cpus;
- uint32_t nr_nodes;
+ uint32_t nr_cpus; /* # CPUs currently online */
+ uint32_t max_cpu_id; /* Largest possible CPU ID on this host */
+ uint32_t nr_nodes; /* # nodes currently online */
+ uint32_t max_node_id; /* Largest possible node ID on this host */
uint32_t cpu_khz;
uint64_aligned_t total_pages;
uint64_aligned_t free_pages;
uint64_aligned_t scrub_pages;
uint32_t hw_cap[8];
- /*
- * IN: maximum addressable entry in the caller-provided cpu_to_node array.
- * OUT: largest cpu identifier in the system.
- * If OUT is greater than IN then the cpu_to_node array is truncated!
- */
- uint32_t max_cpu_id;
- /*
- * If not NULL, this array is filled with node identifier for each cpu.
- * If a cpu has no node information (e.g., cpu not present) then the
- * sentinel value ~0u is written.
- * The size of this array is specified by the caller in @max_cpu_id.
- * If the actual @max_cpu_id is smaller than the array then the trailing
- * elements of the array will not be written by the sysctl.
- */
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
-
/* XEN_SYSCTL_PHYSCAP_??? */
uint32_t capabilities;
};
@@ -126,7 +112,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
/*
* Get the ID of the current scheduler.
*/
-#define XEN_SYSCTL_sched_id 4
+/* XEN_SYSCTL_sched_id */
struct xen_sysctl_sched_id {
/* OUT variable */
uint32_t sched_id;
@@ -135,7 +121,7 @@ typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
/* Interface for controlling Xen software performance counters. */
-#define XEN_SYSCTL_perfc_op 5
+/* XEN_SYSCTL_perfc_op */
/* Sub-operations: */
#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
@@ -162,7 +148,7 @@ struct xen_sysctl_perfc_op {
typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
-#define XEN_SYSCTL_getdomaininfolist 6
+/* XEN_SYSCTL_getdomaininfolist */
struct xen_sysctl_getdomaininfolist {
/* IN variables. */
domid_t first_domain;
@@ -175,7 +161,7 @@ typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
/* Inject debug keys into Xen. */
-#define XEN_SYSCTL_debug_keys 7
+/* XEN_SYSCTL_debug_keys */
struct xen_sysctl_debug_keys {
/* IN variables. */
XEN_GUEST_HANDLE_64(char) keys;
@@ -185,7 +171,7 @@ typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
/* Get physical CPU information. */
-#define XEN_SYSCTL_getcpuinfo 8
+/* XEN_SYSCTL_getcpuinfo */
struct xen_sysctl_cpuinfo {
uint64_aligned_t idletime;
};
@@ -201,7 +187,7 @@ struct xen_sysctl_getcpuinfo {
typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
-#define XEN_SYSCTL_availheap 9
+/* XEN_SYSCTL_availheap */
struct xen_sysctl_availheap {
/* IN variables. */
uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
@@ -213,7 +199,7 @@ struct xen_sysctl_availheap {
typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
-#define XEN_SYSCTL_get_pmstat 10
+/* XEN_SYSCTL_get_pmstat */
struct pm_px_val {
uint64_aligned_t freq; /* Px core frequency */
uint64_aligned_t residency; /* Px residency time */
@@ -239,6 +225,13 @@ struct pm_cx_stat {
uint64_aligned_t idle_time; /* idle time from boot */
XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */
XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
+ uint64_aligned_t pc2;
+ uint64_aligned_t pc3;
+ uint64_aligned_t pc6;
+ uint64_aligned_t pc7;
+ uint64_aligned_t cc3;
+ uint64_aligned_t cc6;
+ uint64_aligned_t cc7;
};
struct xen_sysctl_get_pmstat {
@@ -262,7 +255,7 @@ struct xen_sysctl_get_pmstat {
typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
-#define XEN_SYSCTL_cpu_hotplug 11
+/* XEN_SYSCTL_cpu_hotplug */
struct xen_sysctl_cpu_hotplug {
/* IN variables */
uint32_t cpu; /* Physical cpu. */
@@ -273,14 +266,363 @@ struct xen_sysctl_cpu_hotplug {
typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
+/*
+ * Get/set xen power management, include
+ * 1. cpufreq governors and related parameters
+ */
+/* XEN_SYSCTL_pm_op */
+struct xen_userspace {
+ uint32_t scaling_setspeed;
+};
+typedef struct xen_userspace xen_userspace_t;
+
+struct xen_ondemand {
+ uint32_t sampling_rate_max;
+ uint32_t sampling_rate_min;
+
+ uint32_t sampling_rate;
+ uint32_t up_threshold;
+};
+typedef struct xen_ondemand xen_ondemand_t;
+
+/*
+ * cpufreq para name of this structure named
+ * same as sysfs file name of native linux
+ */
+#define CPUFREQ_NAME_LEN 16
+struct xen_get_cpufreq_para {
+ /* IN/OUT variable */
+ uint32_t cpu_num;
+ uint32_t freq_num;
+ uint32_t gov_num;
+
+ /* for all governors */
+ /* OUT variable */
+ XEN_GUEST_HANDLE_64(uint32) affected_cpus;
+ XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies;
+ XEN_GUEST_HANDLE_64(char) scaling_available_governors;
+ char scaling_driver[CPUFREQ_NAME_LEN];
+
+ uint32_t cpuinfo_cur_freq;
+ uint32_t cpuinfo_max_freq;
+ uint32_t cpuinfo_min_freq;
+ uint32_t scaling_cur_freq;
+
+ char scaling_governor[CPUFREQ_NAME_LEN];
+ uint32_t scaling_max_freq;
+ uint32_t scaling_min_freq;
+
+ /* for specific governor */
+ union {
+ struct xen_userspace userspace;
+ struct xen_ondemand ondemand;
+ } u;
+
+ int32_t turbo_enabled;
+};
+
+struct xen_set_cpufreq_gov {
+ char scaling_governor[CPUFREQ_NAME_LEN];
+};
+
+struct xen_set_cpufreq_para {
+ #define SCALING_MAX_FREQ 1
+ #define SCALING_MIN_FREQ 2
+ #define SCALING_SETSPEED 3
+ #define SAMPLING_RATE 4
+ #define UP_THRESHOLD 5
+
+ uint32_t ctrl_type;
+ uint32_t ctrl_value;
+};
+
+struct xen_sysctl_pm_op {
+ #define PM_PARA_CATEGORY_MASK 0xf0
+ #define CPUFREQ_PARA 0x10
+
+ /* cpufreq command type */
+ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01)
+ #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02)
+ #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03)
+ #define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04)
+
+ /* set/reset scheduler power saving option */
+ #define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21
+
+ /* cpuidle max_cstate access command */
+ #define XEN_SYSCTL_pm_op_get_max_cstate 0x22
+ #define XEN_SYSCTL_pm_op_set_max_cstate 0x23
+
+ /* set scheduler migration cost value */
+ #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24
+ #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25
+
+ /* enable/disable turbo mode when in dbs governor */
+ #define XEN_SYSCTL_pm_op_enable_turbo 0x26
+ #define XEN_SYSCTL_pm_op_disable_turbo 0x27
+
+ uint32_t cmd;
+ uint32_t cpuid;
+ union {
+ struct xen_get_cpufreq_para get_para;
+ struct xen_set_cpufreq_gov set_gov;
+ struct xen_set_cpufreq_para set_para;
+ uint64_aligned_t get_avgfreq;
+ uint32_t set_sched_opt_smt;
+ uint32_t get_max_cstate;
+ uint32_t set_max_cstate;
+ uint32_t get_vcpu_migration_delay;
+ uint32_t set_vcpu_migration_delay;
+ } u;
+};
+
+/* XEN_SYSCTL_page_offline_op */
+struct xen_sysctl_page_offline_op {
+ /* IN: range of page to be offlined */
+#define sysctl_page_offline 1
+#define sysctl_page_online 2
+#define sysctl_query_page_offline 3
+ uint32_t cmd;
+ uint32_t start;
+ uint32_t end;
+ /* OUT: result of page offline request */
+ /*
+ * bit 0~15: result flags
+ * bit 16~31: owner
+ */
+ XEN_GUEST_HANDLE(uint32) status;
+};
+
+#define PG_OFFLINE_STATUS_MASK (0xFFUL)
+
+/* The result is invalid, i.e. HV does not handle it */
+#define PG_OFFLINE_INVALID (0x1UL << 0)
+
+#define PG_OFFLINE_OFFLINED (0x1UL << 1)
+#define PG_OFFLINE_PENDING (0x1UL << 2)
+#define PG_OFFLINE_FAILED (0x1UL << 3)
+#define PG_OFFLINE_AGAIN (0x1UL << 4)
+
+#define PG_ONLINE_FAILED PG_OFFLINE_FAILED
+#define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED
+
+#define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1)
+#define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2)
+#define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3)
+#define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4)
+
+#define PG_OFFLINE_MISC_MASK (0xFFUL << 4)
+
+/* valid when PG_OFFLINE_FAILED or PG_OFFLINE_PENDING */
+#define PG_OFFLINE_XENPAGE (0x1UL << 8)
+#define PG_OFFLINE_DOM0PAGE (0x1UL << 9)
+#define PG_OFFLINE_ANONYMOUS (0x1UL << 10)
+#define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11)
+#define PG_OFFLINE_OWNED (0x1UL << 12)
+
+#define PG_OFFLINE_BROKEN (0x1UL << 13)
+#define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN
+
+#define PG_OFFLINE_OWNER_SHIFT 16
+
+/* XEN_SYSCTL_lockprof_op */
+/* Sub-operations: */
+#define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */
+#define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */
+/* Record-type: */
+#define LOCKPROF_TYPE_GLOBAL 0 /* global lock, idx meaningless */
+#define LOCKPROF_TYPE_PERDOM 1 /* per-domain lock, idx is domid */
+#define LOCKPROF_TYPE_N 2 /* number of types */
+struct xen_sysctl_lockprof_data {
+ char name[40]; /* lock name (may include up to 2 %d specifiers) */
+ int32_t type; /* LOCKPROF_TYPE_??? */
+ int32_t idx; /* index (e.g. domain id) */
+ uint64_aligned_t lock_cnt; /* # of locking succeeded */
+ uint64_aligned_t block_cnt; /* # of wait for lock */
+ uint64_aligned_t lock_time; /* nsecs lock held */
+ uint64_aligned_t block_time; /* nsecs waited for lock */
+};
+typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t);
+struct xen_sysctl_lockprof_op {
+ /* IN variables. */
+ uint32_t cmd; /* XEN_SYSCTL_LOCKPROF_??? */
+ uint32_t max_elem; /* size of output buffer */
+ /* OUT variables (query only). */
+ uint32_t nr_elem; /* number of elements available */
+ uint64_aligned_t time; /* nsecs of profile measurement */
+ /* profile information (or NULL) */
+ XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
+};
+typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
+
+/* XEN_SYSCTL_topologyinfo */
+#define INVALID_TOPOLOGY_ID (~0U)
+struct xen_sysctl_topologyinfo {
+ /*
+ * IN: maximum addressable entry in the caller-provided arrays.
+ * OUT: largest cpu identifier in the system.
+ * If OUT is greater than IN then the arrays are truncated!
+ * If OUT is leass than IN then the array tails are not written by sysctl.
+ */
+ uint32_t max_cpu_index;
+
+ /*
+ * If not NULL, these arrays are filled with core/socket/node identifier
+ * for each cpu.
+ * If a cpu has no core/socket/node information (e.g., cpu not present)
+ * then the sentinel value ~0u is written to each array.
+ * The number of array elements written by the sysctl is:
+ * min(@max_cpu_index_IN,@max_cpu_index_OUT)+1
+ */
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_core;
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_socket;
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+};
+typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
+
+/* XEN_SYSCTL_numainfo */
+#define INVALID_NUMAINFO_ID (~0U)
+struct xen_sysctl_numainfo {
+ /*
+ * IN: maximum addressable entry in the caller-provided arrays.
+ * OUT: largest node identifier in the system.
+ * If OUT is greater than IN then the arrays are truncated!
+ */
+ uint32_t max_node_index;
+
+ /* NB. Entries are 0 if node is not present. */
+ XEN_GUEST_HANDLE_64(uint64) node_to_memsize;
+ XEN_GUEST_HANDLE_64(uint64) node_to_memfree;
+
+ /*
+ * Array, of size (max_node_index+1)^2, listing memory access distances
+ * between nodes. If an entry has no node distance information (e.g., node
+ * not present) then the value ~0u is written.
+ *
+ * Note that the array rows must be indexed by multiplying by the minimum
+ * of the caller-provided max_node_index and the returned value of
+ * max_node_index. That is, if the largest node index in the system is
+ * smaller than the caller can handle, a smaller 2-d array is constructed
+ * within the space provided by the caller. When this occurs, trailing
+ * space provided by the caller is not modified. If the largest node index
+ * in the system is larger than the caller can handle, then a 2-d array of
+ * the maximum size handleable by the caller is constructed.
+ */
+ XEN_GUEST_HANDLE_64(uint32) node_to_node_distance;
+};
+typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
+
+/* XEN_SYSCTL_cpupool_op */
+#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
+#define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */
+#define XEN_SYSCTL_CPUPOOL_OP_INFO 3 /* I */
+#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU 4 /* A */
+#define XEN_SYSCTL_CPUPOOL_OP_RMCPU 5 /* R */
+#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */
+#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO 7 /* F */
+#define XEN_SYSCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF
+struct xen_sysctl_cpupool_op {
+ uint32_t op; /* IN */
+ uint32_t cpupool_id; /* IN: CDIARM OUT: CI */
+ uint32_t sched_id; /* IN: C OUT: I */
+ uint32_t domid; /* IN: M */
+ uint32_t cpu; /* IN: AR */
+ uint32_t n_dom; /* OUT: I */
+ struct xenctl_cpumap cpumap; /* OUT: IF */
+};
+typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
+
+#define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64
+/*
+ * This structure is used to pass a new ARINC653 schedule from a
+ * privileged domain (ie dom0) to Xen.
+ */
+struct xen_sysctl_arinc653_schedule {
+ /* major_frame holds the time for the new schedule's major frame
+ * in nanoseconds. */
+ uint64_aligned_t major_frame;
+ /* num_sched_entries holds how many of the entries in the
+ * sched_entries[] array are valid. */
+ uint8_t num_sched_entries;
+ /* The sched_entries array holds the actual schedule entries. */
+ struct {
+ /* dom_handle must match a domain's UUID */
+ xen_domain_handle_t dom_handle;
+ /* If a domain has multiple VCPUs, vcpu_id specifies which one
+ * this schedule entry applies to. It should be set to 0 if
+ * there is only one VCPU for the domain. */
+ unsigned int vcpu_id;
+ /* runtime specifies the amount of time that should be allocated
+ * to this VCPU per major frame. It is specified in nanoseconds */
+ uint64_aligned_t runtime;
+ } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
+};
+typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t);
+
+struct xen_sysctl_credit_schedule {
+ /* Length of timeslice in milliseconds */
+#define XEN_SYSCTL_CSCHED_TSLICE_MAX 1000
+#define XEN_SYSCTL_CSCHED_TSLICE_MIN 1
+ unsigned tslice_ms;
+ /* Rate limit (minimum timeslice) in microseconds */
+#define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000
+#define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100
+ unsigned ratelimit_us;
+};
+typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
+
+/* XEN_SYSCTL_scheduler_op */
+/* Set or get info? */
+#define XEN_SYSCTL_SCHEDOP_putinfo 0
+#define XEN_SYSCTL_SCHEDOP_getinfo 1
+struct xen_sysctl_scheduler_op {
+ uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */
+ uint32_t sched_id; /* XEN_SCHEDULER_* (domctl.h) */
+ uint32_t cmd; /* XEN_SYSCTL_SCHEDOP_* */
+ union {
+ struct xen_sysctl_sched_arinc653 {
+ XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule;
+ } sched_arinc653;
+ struct xen_sysctl_credit_schedule sched_credit;
+ } u;
+};
+typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
struct xen_sysctl {
uint32_t cmd;
+#define XEN_SYSCTL_readconsole 1
+#define XEN_SYSCTL_tbuf_op 2
+#define XEN_SYSCTL_physinfo 3
+#define XEN_SYSCTL_sched_id 4
+#define XEN_SYSCTL_perfc_op 5
+#define XEN_SYSCTL_getdomaininfolist 6
+#define XEN_SYSCTL_debug_keys 7
+#define XEN_SYSCTL_getcpuinfo 8
+#define XEN_SYSCTL_availheap 9
+#define XEN_SYSCTL_get_pmstat 10
+#define XEN_SYSCTL_cpu_hotplug 11
+#define XEN_SYSCTL_pm_op 12
+#define XEN_SYSCTL_page_offline_op 14
+#define XEN_SYSCTL_lockprof_op 15
+#define XEN_SYSCTL_topologyinfo 16
+#define XEN_SYSCTL_numainfo 17
+#define XEN_SYSCTL_cpupool_op 18
+#define XEN_SYSCTL_scheduler_op 19
uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
union {
struct xen_sysctl_readconsole readconsole;
struct xen_sysctl_tbuf_op tbuf_op;
struct xen_sysctl_physinfo physinfo;
+ struct xen_sysctl_topologyinfo topologyinfo;
+ struct xen_sysctl_numainfo numainfo;
struct xen_sysctl_sched_id sched_id;
struct xen_sysctl_perfc_op perfc_op;
struct xen_sysctl_getdomaininfolist getdomaininfolist;
@@ -289,6 +631,11 @@ struct xen_sysctl {
struct xen_sysctl_availheap availheap;
struct xen_sysctl_get_pmstat get_pmstat;
struct xen_sysctl_cpu_hotplug cpu_hotplug;
+ struct xen_sysctl_pm_op pm_op;
+ struct xen_sysctl_page_offline_op page_offline;
+ struct xen_sysctl_lockprof_op lockprof_op;
+ struct xen_sysctl_cpupool_op cpupool_op;
+ struct xen_sysctl_scheduler_op scheduler_op;
uint8_t pad[128];
} u;
};
diff --git a/sys/xen/interface/tmem.h b/sys/xen/interface/tmem.h
new file mode 100644
index 0000000..74bd1c6
--- /dev/null
+++ b/sys/xen/interface/tmem.h
@@ -0,0 +1,148 @@
+/******************************************************************************
+ * tmem.h
+ *
+ * Guest OS interface to Xen Transcendent Memory.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_TMEM_H__
+#define __XEN_PUBLIC_TMEM_H__
+
+#include "xen.h"
+
+/* version of ABI */
+#define TMEM_SPEC_VERSION 1
+
+/* Commands to HYPERVISOR_tmem_op() */
+#define TMEM_CONTROL 0
+#define TMEM_NEW_POOL 1
+#define TMEM_DESTROY_POOL 2
+#define TMEM_NEW_PAGE 3
+#define TMEM_PUT_PAGE 4
+#define TMEM_GET_PAGE 5
+#define TMEM_FLUSH_PAGE 6
+#define TMEM_FLUSH_OBJECT 7
+#define TMEM_READ 8
+#define TMEM_WRITE 9
+#define TMEM_XCHG 10
+
+/* Privileged commands to HYPERVISOR_tmem_op() */
+#define TMEM_AUTH 101
+#define TMEM_RESTORE_NEW 102
+
+/* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */
+#define TMEMC_THAW 0
+#define TMEMC_FREEZE 1
+#define TMEMC_FLUSH 2
+#define TMEMC_DESTROY 3
+#define TMEMC_LIST 4
+#define TMEMC_SET_WEIGHT 5
+#define TMEMC_SET_CAP 6
+#define TMEMC_SET_COMPRESS 7
+#define TMEMC_QUERY_FREEABLE_MB 8
+#define TMEMC_SAVE_BEGIN 10
+#define TMEMC_SAVE_GET_VERSION 11
+#define TMEMC_SAVE_GET_MAXPOOLS 12
+#define TMEMC_SAVE_GET_CLIENT_WEIGHT 13
+#define TMEMC_SAVE_GET_CLIENT_CAP 14
+#define TMEMC_SAVE_GET_CLIENT_FLAGS 15
+#define TMEMC_SAVE_GET_POOL_FLAGS 16
+#define TMEMC_SAVE_GET_POOL_NPAGES 17
+#define TMEMC_SAVE_GET_POOL_UUID 18
+#define TMEMC_SAVE_GET_NEXT_PAGE 19
+#define TMEMC_SAVE_GET_NEXT_INV 20
+#define TMEMC_SAVE_END 21
+#define TMEMC_RESTORE_BEGIN 30
+#define TMEMC_RESTORE_PUT_PAGE 32
+#define TMEMC_RESTORE_FLUSH_PAGE 33
+
+/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
+#define TMEM_POOL_PERSIST 1
+#define TMEM_POOL_SHARED 2
+#define TMEM_POOL_PRECOMPRESSED 4
+#define TMEM_POOL_PAGESIZE_SHIFT 4
+#define TMEM_POOL_PAGESIZE_MASK 0xf
+#define TMEM_POOL_VERSION_SHIFT 24
+#define TMEM_POOL_VERSION_MASK 0xff
+#define TMEM_POOL_RESERVED_BITS 0x00ffff00
+
+/* Bits for client flags (save/restore) */
+#define TMEM_CLIENT_COMPRESS 1
+#define TMEM_CLIENT_FROZEN 2
+
+/* Special errno values */
+#define EFROZEN 1000
+#define EEMPTY 1001
+
+
+#ifndef __ASSEMBLY__
+typedef xen_pfn_t tmem_cli_mfn_t;
+typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t;
+struct tmem_op {
+ uint32_t cmd;
+ int32_t pool_id;
+ union {
+ struct {
+ uint64_t uuid[2];
+ uint32_t flags;
+ uint32_t arg1;
+ } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */
+ struct {
+ uint32_t subop;
+ uint32_t cli_id;
+ uint32_t arg1;
+ uint32_t arg2;
+ uint64_t oid[3];
+ tmem_cli_va_t buf;
+ } ctrl; /* for cmd == TMEM_CONTROL */
+ struct {
+
+ uint64_t oid[3];
+ uint32_t index;
+ uint32_t tmem_offset;
+ uint32_t pfn_offset;
+ uint32_t len;
+ tmem_cli_mfn_t cmfn; /* client machine page frame */
+ } gen; /* for all other cmd ("generic") */
+ } u;
+};
+typedef struct tmem_op tmem_op_t;
+DEFINE_XEN_GUEST_HANDLE(tmem_op_t);
+
+struct tmem_handle {
+ uint32_t pool_id;
+ uint32_t index;
+ uint64_t oid[3];
+};
+#endif
+
+#endif /* __XEN_PUBLIC_TMEM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/sys/xen/interface/trace.h b/sys/xen/interface/trace.h
index a11b30f..0dfabe9 100644
--- a/sys/xen/interface/trace.h
+++ b/sys/xen/interface/trace.h
@@ -38,6 +38,8 @@
#define TRC_MEM 0x0010f000 /* Xen memory trace */
#define TRC_PV 0x0020f000 /* Xen PV traces */
#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */
+#define TRC_HW 0x0080f000 /* Xen hardware-related traces */
+#define TRC_GUEST 0x0800f000 /* Guest-generated traces */
#define TRC_ALL 0x0ffff000
#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
#define TRC_HD_CYCLE_FLAG (1UL<<31)
@@ -52,14 +54,20 @@
#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
+#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
+/* Trace classes for Hardware */
+#define TRC_HW_PM 0x00801000 /* Power management traces */
+#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
+
/* Trace events per class */
#define TRC_LOST_RECORDS (TRC_GEN + 1)
#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
-#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2)
#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)
@@ -75,10 +83,17 @@
#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13)
#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
+#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16)
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
#define TRC_PV_HYPERCALL (TRC_PV + 1)
#define TRC_PV_TRAP (TRC_PV + 3)
@@ -111,6 +126,7 @@
#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15)
/* trace events per subclass */
+#define TRC_HVM_NESTEDFLAG (0x400)
#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
@@ -140,11 +156,37 @@
#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
-#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
-#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
+#define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16)
+#define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17)
#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
+#define TRC_HVM_RDTSC (TRC_HVM_HANDLER + 0x1a)
+#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21)
+#define TRC_HVM_REALMODE_EMULATE (TRC_HVM_HANDLER + 0x22)
+#define TRC_HVM_TRAP (TRC_HVM_HANDLER + 0x23)
+#define TRC_HVM_TRAP_DEBUG (TRC_HVM_HANDLER + 0x24)
+#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
+
+#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
+#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
+
+/* trace events for per class */
+#define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01)
+#define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02)
+#define TRC_PM_IDLE_EXIT (TRC_HW_PM + 0x03)
+
+/* Trace events for IRQs */
+#define TRC_HW_IRQ_MOVE_CLEANUP_DELAY (TRC_HW_IRQ + 0x1)
+#define TRC_HW_IRQ_MOVE_CLEANUP (TRC_HW_IRQ + 0x2)
+#define TRC_HW_IRQ_BIND_VECTOR (TRC_HW_IRQ + 0x3)
+#define TRC_HW_IRQ_CLEAR_VECTOR (TRC_HW_IRQ + 0x4)
+#define TRC_HW_IRQ_MOVE_FINISH (TRC_HW_IRQ + 0x5)
+#define TRC_HW_IRQ_ASSIGN_VECTOR (TRC_HW_IRQ + 0x6)
+#define TRC_HW_IRQ_UNMAPPED_VECTOR (TRC_HW_IRQ + 0x7)
+#define TRC_HW_IRQ_HANDLED (TRC_HW_IRQ + 0x8)
+
/* This structure represents a single trace buffer record. */
struct t_rec {
@@ -180,6 +222,16 @@ struct t_buf {
/* Records follow immediately after the meta-data header. */
};
+/* Structure used to pass MFNs to the trace buffers back to trace consumers.
+ * Offset is an offset into the mapped structure where the mfn list will be held.
+ * MFNs will be at ((unsigned long *)(t_info))+(t_info->cpu_offset[cpu]).
+ */
+struct t_info {
+ uint16_t tbuf_size; /* Size in pages of each trace buffer */
+ uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */
+ /* MFN lists immediately after the header */
+};
+
#endif /* __XEN_PUBLIC_TRACE_H__ */
/*
diff --git a/sys/xen/interface/vcpu.h b/sys/xen/interface/vcpu.h
index ab65493..d52f121 100644
--- a/sys/xen/interface/vcpu.h
+++ b/sys/xen/interface/vcpu.h
@@ -27,6 +27,8 @@
#ifndef __XEN_PUBLIC_VCPU_H__
#define __XEN_PUBLIC_VCPU_H__
+#include "xen.h"
+
/*
* Prototype for this hypercall is:
* int vcpu_op(int cmd, int vcpuid, void *extra_args)
@@ -185,8 +187,7 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
/*
* Get the physical ID information for a pinned vcpu's underlying physical
* processor. The physical ID informmation is architecture-specific.
- * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
- * greater are reserved.
+ * On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
* This command returns -EINVAL if it is not a valid operation for this VCPU.
*/
#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
@@ -195,10 +196,36 @@ struct vcpu_get_physid {
};
typedef struct vcpu_get_physid vcpu_get_physid_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
-#define xen_vcpu_physid_to_x86_apicid(physid) \
- ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
-#define xen_vcpu_physid_to_x86_acpiid(physid) \
- ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
+#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
+#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
+
+/*
+ * Register a memory location to get a secondary copy of the vcpu time
+ * parameters. The master copy still exists as part of the vcpu shared
+ * memory area, and this secondary copy is updated whenever the master copy
+ * is updated (and using the same versioning scheme for synchronisation).
+ *
+ * The intent is that this copy may be mapped (RO) into userspace so
+ * that usermode can compute system time using the time info and the
+ * tsc. Usermode will see an array of vcpu_time_info structures, one
+ * for each vcpu, and choose the right one by an existing mechanism
+ * which allows it to get the current vcpu number (such as via a
+ * segment limit). It can then apply the normal algorithm to compute
+ * system time from the tsc.
+ *
+ * @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
+ */
+#define VCPUOP_register_vcpu_time_memory_area 13
+DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t);
+struct vcpu_register_time_memory_area {
+ union {
+ XEN_GUEST_HANDLE(vcpu_time_info_t) h;
+ struct vcpu_time_info *v;
+ uint64_t p;
+ } addr;
+};
+typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/sys/xen/interface/version.h b/sys/xen/interface/version.h
index 944ca62..8742c2b 100644
--- a/sys/xen/interface/version.h
+++ b/sys/xen/interface/version.h
@@ -78,6 +78,9 @@ typedef struct xen_feature_info xen_feature_info_t;
/* arg == xen_domain_handle_t. */
#define XENVER_guest_handle 8
+#define XENVER_commandline 9
+typedef char xen_commandline_t[1024];
+
#endif /* __XEN_PUBLIC_VERSION_H__ */
/*
diff --git a/sys/xen/interface/xen-compat.h b/sys/xen/interface/xen-compat.h
index 329be07..d8c55bf 100644
--- a/sys/xen/interface/xen-compat.h
+++ b/sys/xen/interface/xen-compat.h
@@ -27,7 +27,7 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040200
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
diff --git a/sys/xen/interface/xen.h b/sys/xen/interface/xen.h
index 4b444b4..b2f6c50 100644
--- a/sys/xen/interface/xen.h
+++ b/sys/xen/interface/xen.h
@@ -33,6 +33,8 @@
#include "arch-x86/xen.h"
#elif defined(__ia64__)
#include "arch-ia64.h"
+#elif defined(__arm__)
+#include "arch-arm.h"
#else
#error "Unsupported architecture"
#endif
@@ -47,6 +49,7 @@ DEFINE_XEN_GUEST_HANDLE(long);
__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
DEFINE_XEN_GUEST_HANDLE(void);
+DEFINE_XEN_GUEST_HANDLE(uint64_t);
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#endif
@@ -54,6 +57,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
* HYPERCALLS
*/
+/* `incontents 100 hcalls List of hypercalls
+ * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()
+ */
+
#define __HYPERVISOR_set_trap_table 0
#define __HYPERVISOR_mmu_update 1
#define __HYPERVISOR_set_gdt 2
@@ -91,6 +98,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define __HYPERVISOR_sysctl 35
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
+#define __HYPERVISOR_tmem_op 38
+#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
@@ -102,6 +111,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define __HYPERVISOR_arch_6 54
#define __HYPERVISOR_arch_7 55
+/* ` } */
+
/*
* HYPERCALL COMPATIBILITY.
*/
@@ -135,6 +146,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
* The latter can be allocated only once per guest: they must initially be
* allocated to VCPU0 but can subsequently be re-bound.
*/
+/* ` enum virq { */
#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
@@ -143,6 +155,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
+#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
+#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
+#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -153,26 +169,72 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define VIRQ_ARCH_5 21
#define VIRQ_ARCH_6 22
#define VIRQ_ARCH_7 23
+/* ` } */
#define NR_VIRQS 24
/*
- * MMU-UPDATE REQUESTS
- *
- * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
- * A foreigndom (FD) can be specified (or DOMID_SELF for none).
- * Where the FD has some effect, it is described below.
- * ptr[1:0] specifies the appropriate MMU_* command.
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmu_update(const struct mmu_update reqs[],
+ * ` unsigned count, unsigned *done_out,
+ * ` unsigned foreigndom)
+ * `
+ * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
+ * @count is the length of the above array.
+ * @pdone is an output parameter indicating number of completed operations
+ * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
+ * hypercall invocation. Can be DOMID_SELF.
+ * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
+ * in this hypercall invocation. The value of this field
+ * (x) encodes the PFD as follows:
+ * x == 0 => PFD == DOMID_SELF
+ * x != 0 => PFD == x - 1
*
+ * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
+ * -------------
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
- * Updates an entry in a page table. If updating an L1 table, and the new
- * table entry is valid/present, the mapped frame must belong to the FD, if
- * an FD has been specified. If attempting to map an I/O page then the
- * caller assumes the privilege of the FD.
+ * Updates an entry in a page table belonging to PFD. If updating an L1 table,
+ * and the new table entry is valid/present, the mapped frame must belong to
+ * FD. If attempting to map an I/O page then the caller assumes the privilege
+ * of the FD.
* FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
* FD == DOMID_XEN: Map restricted areas of Xen's heap space.
* ptr[:2] -- Machine address of the page-table entry to modify.
* val -- Value to write.
+ *
+ * There also certain implicit requirements when using this hypercall. The
+ * pages that make up a pagetable must be mapped read-only in the guest.
+ * This prevents uncontrolled guest updates to the pagetable. Xen strictly
+ * enforces this, and will disallow any pagetable update which will end up
+ * mapping pagetable page RW, and will disallow using any writable page as a
+ * pagetable. In practice it means that when constructing a page table for a
+ * process, thread, etc, we MUST be very dilligient in following these rules:
+ * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
+ * the entries.
+ * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
+ * or L2).
+ * 3). Start filling out the PTE table (L1) with the PTE entries. Once
+ * done, make sure to set each of those entries to RO (so writeable bit
+ * is unset). Once that has been completed, set the PMD (L2) for this
+ * PTE table as RO.
+ * 4). When completed with all of the PMD (L2) entries, and all of them have
+ * been set to RO, make sure to set RO the PUD (L3). Do the same
+ * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
+ * 5). Now before you can use those pages (so setting the cr3), you MUST also
+ * pin them so that the hypervisor can verify the entries. This is done
+ * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
+ * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
+ * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
+ * issued.
+ * For 32-bit guests, the L4 is not used (as there is less pagetables), so
+ * instead use L3.
+ * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
+ * hypercall. Also if so desired the OS can also try to write to the PTE
+ * and be trapped by the hypervisor (as the PTE entry is RO).
+ *
+ * To deallocate the pages, the operations are the reverse of the steps
+ * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
+ * pagetable MUST not be in use (meaning that the cr3 is not set to it).
*
* ptr[1:0] == MMU_MACHPHYS_UPDATE:
* Updates an entry in the machine->pseudo-physical mapping table.
@@ -183,6 +245,72 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
* ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
* As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
* with those in @val.
+ *
+ * @val is usually the machine frame number along with some attributes.
+ * The attributes by default follow the architecture defined bits. Meaning that
+ * if this is a X86_64 machine and four page table layout is used, the layout
+ * of val is:
+ * - 63 if set means No execute (NX)
+ * - 46-13 the machine frame number
+ * - 12 available for guest
+ * - 11 available for guest
+ * - 10 available for guest
+ * - 9 available for guest
+ * - 8 global
+ * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
+ * - 6 dirty
+ * - 5 accessed
+ * - 4 page cached disabled
+ * - 3 page write through
+ * - 2 userspace accessible
+ * - 1 writeable
+ * - 0 present
+ *
+ * The one bits that does not fit with the default layout is the PAGE_PSE
+ * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
+ * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
+ * (or 2MB) instead of using the PAGE_PSE bit.
+ *
+ * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
+ * using it as the Page Attribute Table (PAT) bit - for details on it please
+ * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
+ * pages instead of using MTRRs.
+ *
+ * The PAT MSR is as follow (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +---+----+----+----+-----+----+----+
+ * WC | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +---+----+----+----+-----+----+----+
+ * WC | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +---+----+----+----+-----+----+----+
+ * WC | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +---+----+----+----+-----+----+----+
+ *
+ * The lookup of this index table translates to looking up
+ * Bit 7, Bit 4, and Bit 3 of val entry:
+ *
+ * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
+ *
+ * If all bits are off, then we are using PAT0. If bit 3 turned on,
+ * then we are using PAT1, if bit 3 and bit 4, then PAT2..
+ *
+ * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
+ * that if a guest that follows Linux's PAT setup and would like to set Write
+ * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
+ * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
+ * caching as:
+ *
+ * WB = none (so PAT0)
+ * WC = PWT (bit 3 on)
+ * UC = PWT | PCD (bit 3 and 4 are on).
+ *
+ * To make it work with Xen, it needs to translate the WC bit as so:
+ *
+ * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
+ *
+ * And to translate back it would:
+ *
+ * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
@@ -227,10 +355,24 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
*
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
*
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ *
+ * cmd: MMUEXT_[UN]MARK_SUPER
+ * mfn: Machine frame number of head of superpage to be [un]marked.
*/
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
@@ -247,12 +389,18 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define MMUEXT_FLUSH_CACHE 12
#define MMUEXT_SET_LDT 13
#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
+#define MMUEXT_MARK_SUPER 19
+#define MMUEXT_UNMARK_SUPER 20
#ifndef __ASSEMBLY__
struct mmuext_op {
unsigned int cmd;
union {
- /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
xen_pfn_t mfn;
/* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
unsigned long linear_addr;
@@ -262,10 +410,12 @@ struct mmuext_op {
unsigned int nr_ents;
/* TLB_FLUSH_MULTI, INVLPG_MULTI */
#if __XEN_INTERFACE_VERSION__ >= 0x00030205
- XEN_GUEST_HANDLE(void) vcpumask;
+ XEN_GUEST_HANDLE(const_void) vcpumask;
#else
- void *vcpumask;
+ const void *vcpumask;
#endif
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
} arg2;
};
typedef struct mmuext_op mmuext_op_t;
@@ -343,6 +493,16 @@ typedef uint16_t domid_t;
#define DOMID_XEN (0x7FF2U)
/*
+ * DOMID_COW is used as the owner of sharable pages */
+#define DOMID_COW (0x7FF3U)
+
+/* DOMID_INVALID is used to identify pages with unknown owner. */
+#define DOMID_INVALID (0x7FF4U)
+
+/* Idle domain. */
+#define DOMID_IDLE (0x7FFFU)
+
+/*
* Send an array of these to HYPERVISOR_mmu_update().
* NB. The fields are natural pointer/address size for this architecture.
*/
@@ -442,7 +602,7 @@ typedef struct vcpu_info vcpu_info_t;
* of this structure remaining constant.
*/
struct shared_info {
- struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
+ struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
/*
* A domain can create "event channels" on which it can send and receive
@@ -501,6 +661,7 @@ typedef struct shared_info shared_info_t;
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
* c. list of allocated page frames [mfn_list, nr_pages]
+ * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
* d. start_info_t structure [register ESI (x86)]
* e. bootstrap page tables [pt_base, CR3 (x86)]
* f. bootstrap stack [register ESP (x86)]
@@ -539,9 +700,14 @@ struct start_info {
unsigned long pt_base; /* VIRTUAL address of page directory. */
unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
- unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module */
+ /* (PFN of pre-loaded module if */
+ /* SIF_MOD_START_PFN set in flags). */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
int8_t cmd_line[MAX_GUEST_CMDLINE];
+ /* The pfn range here covers both page table and p->m table frames. */
+ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
+ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
};
typedef struct start_info start_info_t;
@@ -554,12 +720,41 @@ typedef struct start_info start_info_t;
/* These flags are passed in the 'flags' field of start_info_t. */
#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+/*
+ * A multiboot module is a package containing modules very similar to a
+ * multiboot module array. The only differences are:
+ * - the array of module descriptors is by convention simply at the beginning
+ * of the multiboot module,
+ * - addresses in the module descriptors are based on the beginning of the
+ * multiboot module,
+ * - the number of modules is determined by a termination descriptor that has
+ * mod_start == 0.
+ *
+ * This permits to both build it statically and reference it in a configuration
+ * file, and let the PV guest easily rebase the addresses to virtual addresses
+ * and at the same time count the number of modules.
+ */
+struct xen_multiboot_mod_list
+{
+ /* Address of first byte of the module */
+ uint32_t mod_start;
+ /* Address of last byte of the module (inclusive) */
+ uint32_t mod_end;
+ /* Address of zero-terminated command line */
+ uint32_t cmdline;
+ /* Unused, must be zero */
+ uint32_t pad;
+};
+
typedef struct dom0_vga_console_info {
uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
#define XEN_VGATYPE_TEXT_MODE_3 0x03
#define XEN_VGATYPE_VESA_LFB 0x23
+#define XEN_VGATYPE_EFI_LFB 0x70
union {
struct {
@@ -618,14 +813,23 @@ __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
/* Default definitions for macros used by domctl/sysctl. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
#ifndef XEN_GUEST_HANDLE_64
#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
#endif
+
+#ifndef __ASSEMBLY__
+struct xenctl_cpumap {
+ XEN_GUEST_HANDLE_64(uint8) bitmap;
+ uint32_t nr_cpus;
+};
#endif
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
#endif /* __XEN_PUBLIC_XEN_H__ */
/*
diff --git a/sys/xen/interface/xenoprof.h b/sys/xen/interface/xenoprof.h
index 183078d..a0c6987 100644
--- a/sys/xen/interface/xenoprof.h
+++ b/sys/xen/interface/xenoprof.h
@@ -50,7 +50,11 @@
#define XENOPROF_shutdown 13
#define XENOPROF_get_buffer 14
#define XENOPROF_set_backtrace 15
-#define XENOPROF_last_op 15
+
+/* AMD IBS support */
+#define XENOPROF_get_ibs_caps 16
+#define XENOPROF_ibs_counter 17
+#define XENOPROF_last_op 17
#define MAX_OPROF_EVENTS 32
#define MAX_OPROF_DOMAINS 25
@@ -64,7 +68,7 @@ struct event_log {
};
/* PC value that indicates a special code */
-#define XENOPROF_ESCAPE_CODE ~0UL
+#define XENOPROF_ESCAPE_CODE (~0ULL)
/* Transient events for the xenoprof->oprofile cpu buf */
#define XENOPROF_TRACE_BEGIN 1
@@ -124,6 +128,16 @@ typedef struct xenoprof_passive {
} xenoprof_passive_t;
DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
+struct xenoprof_ibs_counter {
+ uint64_t op_enabled;
+ uint64_t fetch_enabled;
+ uint64_t max_cnt_fetch;
+ uint64_t max_cnt_op;
+ uint64_t rand_en;
+ uint64_t dispatched_ops;
+};
+typedef struct xenoprof_ibs_counter xenoprof_ibs_counter_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_ibs_counter_t);
#endif /* __XEN_PUBLIC_XENOPROF_H__ */
diff --git a/sys/xen/interface/xsm/flask_op.h b/sys/xen/interface/xsm/flask_op.h
new file mode 100644
index 0000000..1a251c9
--- /dev/null
+++ b/sys/xen/interface/xsm/flask_op.h
@@ -0,0 +1,193 @@
+/*
+ * This file contains the flask_op hypercall commands and definitions.
+ *
+ * Author: George Coker, <gscoker@alpha.ncsc.mil>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __FLASK_OP_H__
+#define __FLASK_OP_H__
+
+#define XEN_FLASK_INTERFACE_VERSION 1
+
+struct xen_flask_load {
+ XEN_GUEST_HANDLE(char) buffer;
+ uint32_t size;
+};
+
+struct xen_flask_setenforce {
+ uint32_t enforcing;
+};
+
+struct xen_flask_sid_context {
+ /* IN/OUT: sid to convert to/from string */
+ uint32_t sid;
+ /* IN: size of the context buffer
+ * OUT: actual size of the output context string
+ */
+ uint32_t size;
+ XEN_GUEST_HANDLE(char) context;
+};
+
+struct xen_flask_access {
+ /* IN: access request */
+ uint32_t ssid;
+ uint32_t tsid;
+ uint32_t tclass;
+ uint32_t req;
+ /* OUT: AVC data */
+ uint32_t allowed;
+ uint32_t audit_allow;
+ uint32_t audit_deny;
+ uint32_t seqno;
+};
+
+struct xen_flask_transition {
+ /* IN: transition SIDs and class */
+ uint32_t ssid;
+ uint32_t tsid;
+ uint32_t tclass;
+ /* OUT: new SID */
+ uint32_t newsid;
+};
+
+struct xen_flask_userlist {
+ /* IN: starting SID for list */
+ uint32_t start_sid;
+ /* IN: size of user string and output buffer
+ * OUT: number of SIDs returned */
+ uint32_t size;
+ union {
+ /* IN: user to enumerate SIDs */
+ XEN_GUEST_HANDLE(char) user;
+ /* OUT: SID list */
+ XEN_GUEST_HANDLE(uint32) sids;
+ } u;
+};
+
+struct xen_flask_boolean {
+ /* IN/OUT: numeric identifier for boolean [GET/SET]
+ * If -1, name will be used and bool_id will be filled in. */
+ uint32_t bool_id;
+ /* OUT: current enforcing value of boolean [GET/SET] */
+ uint8_t enforcing;
+ /* OUT: pending value of boolean [GET/SET] */
+ uint8_t pending;
+ /* IN: new value of boolean [SET] */
+ uint8_t new_value;
+ /* IN: commit new value instead of only setting pending [SET] */
+ uint8_t commit;
+ /* IN: size of boolean name buffer [GET/SET]
+ * OUT: actual size of name [GET only] */
+ uint32_t size;
+ /* IN: if bool_id is -1, used to find boolean [GET/SET]
+ * OUT: textual name of boolean [GET only]
+ */
+ XEN_GUEST_HANDLE(char) name;
+};
+
+struct xen_flask_setavc_threshold {
+ /* IN */
+ uint32_t threshold;
+};
+
+struct xen_flask_hash_stats {
+ /* OUT */
+ uint32_t entries;
+ uint32_t buckets_used;
+ uint32_t buckets_total;
+ uint32_t max_chain_len;
+};
+
+struct xen_flask_cache_stats {
+ /* IN */
+ uint32_t cpu;
+ /* OUT */
+ uint32_t lookups;
+ uint32_t hits;
+ uint32_t misses;
+ uint32_t allocations;
+ uint32_t reclaims;
+ uint32_t frees;
+};
+
+struct xen_flask_ocontext {
+ /* IN */
+ uint32_t ocon;
+ uint32_t sid;
+ uint64_t low, high;
+};
+
+struct xen_flask_peersid {
+ /* IN */
+ evtchn_port_t evtchn;
+ /* OUT */
+ uint32_t sid;
+};
+
+struct xen_flask_op {
+ uint32_t cmd;
+#define FLASK_LOAD 1
+#define FLASK_GETENFORCE 2
+#define FLASK_SETENFORCE 3
+#define FLASK_CONTEXT_TO_SID 4
+#define FLASK_SID_TO_CONTEXT 5
+#define FLASK_ACCESS 6
+#define FLASK_CREATE 7
+#define FLASK_RELABEL 8
+#define FLASK_USER 9
+#define FLASK_POLICYVERS 10
+#define FLASK_GETBOOL 11
+#define FLASK_SETBOOL 12
+#define FLASK_COMMITBOOLS 13
+#define FLASK_MLS 14
+#define FLASK_DISABLE 15
+#define FLASK_GETAVC_THRESHOLD 16
+#define FLASK_SETAVC_THRESHOLD 17
+#define FLASK_AVC_HASHSTATS 18
+#define FLASK_AVC_CACHESTATS 19
+#define FLASK_MEMBER 20
+#define FLASK_ADD_OCONTEXT 21
+#define FLASK_DEL_OCONTEXT 22
+#define FLASK_GET_PEER_SID 23
+ uint32_t interface_version; /* XEN_FLASK_INTERFACE_VERSION */
+ union {
+ struct xen_flask_load load;
+ struct xen_flask_setenforce enforce;
+ /* FLASK_CONTEXT_TO_SID and FLASK_SID_TO_CONTEXT */
+ struct xen_flask_sid_context sid_context;
+ struct xen_flask_access access;
+ /* FLASK_CREATE, FLASK_RELABEL, FLASK_MEMBER */
+ struct xen_flask_transition transition;
+ struct xen_flask_userlist userlist;
+ /* FLASK_GETBOOL, FLASK_SETBOOL */
+ struct xen_flask_boolean boolean;
+ struct xen_flask_setavc_threshold setavc_threshold;
+ struct xen_flask_hash_stats hash_stats;
+ struct xen_flask_cache_stats cache_stats;
+ /* FLASK_ADD_OCONTEXT, FLASK_DEL_OCONTEXT */
+ struct xen_flask_ocontext ocontext;
+ struct xen_flask_peersid peersid;
+ } u;
+};
+typedef struct xen_flask_op xen_flask_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_flask_op_t);
+
+#endif
diff --git a/sys/xen/xenstore/xenstore.c b/sys/xen/xenstore/xenstore.c
index 090de23..a07a262 100644
--- a/sys/xen/xenstore/xenstore.c
+++ b/sys/xen/xenstore/xenstore.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <xen/xen_intr.h>
#include <xen/interface/hvm/params.h>
+#include <xen/hvm.h>
#include <xen/xenstore/xenstorevar.h>
#include <xen/xenstore/xenstore_internal.h>
OpenPOWER on IntegriCloud