summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm/amd/svm_softc.h
blob: 2cbcfb09e815e6d5c8e7bbaad495d045a7bf55d0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
/*-
 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice unmodified, this list of conditions, and the following
 *    disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * $FreeBSD$
 */

#ifndef _SVM_SOFTC_H_
#define _SVM_SOFTC_H_

#define SVM_IO_BITMAP_SIZE	(3 * PAGE_SIZE)
#define SVM_MSR_BITMAP_SIZE	(2 * PAGE_SIZE)

struct asid {
	uint64_t	gen;	/* range is [1, ~0UL] */
	uint32_t	num;	/* range is [1, nasid - 1] */
};

/*
 * svm_vpcu contains SVM VMCB state and vcpu register state.
 */
struct svm_vcpu {
	struct vmcb	vmcb;	 /* hardware saved vcpu context */
	struct svm_regctx swctx; /* software saved vcpu context */
	uint64_t	vmcb_pa; /* VMCB physical address */
        int		lastcpu; /* host cpu that the vcpu last ran on */
	uint32_t	dirty;	 /* state cache bits that must be cleared */
	long		eptgen;	 /* pmap->pm_eptgen when the vcpu last ran */
	struct asid	asid;
} __aligned(PAGE_SIZE);

/*
 * SVM softc, one per virtual machine.
 */
struct svm_softc {
	/*
	 * IO permission map, VMCB.ctrl.iopm_base_pa should point to this.
	 * If a bit is set, access to I/O port is intercepted.
	 */
	uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE];

	/*
	 * MSR permission bitmap, VMCB.ctrl.msrpm_base_pa should point to this.
	 * Two bits are used for each MSR with the LSB used for read access
	 * and the MSB used for write access. A value of '1' indicates that
	 * the operation is intercepted.
	 */
	uint8_t	msr_bitmap[SVM_MSR_BITMAP_SIZE];

	uint8_t apic_page[VM_MAXCPU][PAGE_SIZE];
	/* Nested Paging */
	vm_offset_t 	nptp;	

	/* Virtual machine pointer. */
	struct vm	*vm;

	/* Guest VCPU h/w and s/w context. */
	struct svm_vcpu vcpu[VM_MAXCPU];
} __aligned(PAGE_SIZE);

CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);

static __inline struct svm_vcpu *
svm_get_vcpu(struct svm_softc *sc, int vcpu)
{

	return (&(sc->vcpu[vcpu]));
}

static __inline struct vmcb *
svm_get_vmcb(struct svm_softc *sc, int vcpu)
{

	return (&(sc->vcpu[vcpu].vmcb));
}

static __inline struct vmcb_state *
svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
{

	return (&(sc->vcpu[vcpu].vmcb.state));
}

static __inline struct vmcb_ctrl *
svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
{

	return (&(sc->vcpu[vcpu].vmcb.ctrl));
}

static __inline struct svm_regctx *
svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
{

	return (&(sc->vcpu[vcpu].swctx));
}

static __inline void
svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
{
        struct svm_vcpu *vcpustate;

        vcpustate = svm_get_vcpu(sc, vcpu);

        vcpustate->dirty |= dirtybits;
}

#endif /* _SVM_SOFTC_H_ */
OpenPOWER on IntegriCloud