summaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/msa.S
blob: d02f0c6cc2cc47bda4352339b512f084751dea32 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * MIPS SIMD Architecture (MSA) context handling code for KVM.
 *
 * Copyright (C) 2015 Imagination Technologies Ltd.
 */

#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>

	.set	noreorder
	.set	noat

LEAF(__kvm_save_msa)
	st_d	0,  VCPU_FPR0,  a0
	st_d	1,  VCPU_FPR1,  a0
	st_d	2,  VCPU_FPR2,  a0
	st_d	3,  VCPU_FPR3,  a0
	st_d	4,  VCPU_FPR4,  a0
	st_d	5,  VCPU_FPR5,  a0
	st_d	6,  VCPU_FPR6,  a0
	st_d	7,  VCPU_FPR7,  a0
	st_d	8,  VCPU_FPR8,  a0
	st_d	9,  VCPU_FPR9,  a0
	st_d	10, VCPU_FPR10, a0
	st_d	11, VCPU_FPR11, a0
	st_d	12, VCPU_FPR12, a0
	st_d	13, VCPU_FPR13, a0
	st_d	14, VCPU_FPR14, a0
	st_d	15, VCPU_FPR15, a0
	st_d	16, VCPU_FPR16, a0
	st_d	17, VCPU_FPR17, a0
	st_d	18, VCPU_FPR18, a0
	st_d	19, VCPU_FPR19, a0
	st_d	20, VCPU_FPR20, a0
	st_d	21, VCPU_FPR21, a0
	st_d	22, VCPU_FPR22, a0
	st_d	23, VCPU_FPR23, a0
	st_d	24, VCPU_FPR24, a0
	st_d	25, VCPU_FPR25, a0
	st_d	26, VCPU_FPR26, a0
	st_d	27, VCPU_FPR27, a0
	st_d	28, VCPU_FPR28, a0
	st_d	29, VCPU_FPR29, a0
	st_d	30, VCPU_FPR30, a0
	st_d	31, VCPU_FPR31, a0
	jr	ra
	 nop
	END(__kvm_save_msa)

LEAF(__kvm_restore_msa)
	ld_d	0,  VCPU_FPR0,  a0
	ld_d	1,  VCPU_FPR1,  a0
	ld_d	2,  VCPU_FPR2,  a0
	ld_d	3,  VCPU_FPR3,  a0
	ld_d	4,  VCPU_FPR4,  a0
	ld_d	5,  VCPU_FPR5,  a0
	ld_d	6,  VCPU_FPR6,  a0
	ld_d	7,  VCPU_FPR7,  a0
	ld_d	8,  VCPU_FPR8,  a0
	ld_d	9,  VCPU_FPR9,  a0
	ld_d	10, VCPU_FPR10, a0
	ld_d	11, VCPU_FPR11, a0
	ld_d	12, VCPU_FPR12, a0
	ld_d	13, VCPU_FPR13, a0
	ld_d	14, VCPU_FPR14, a0
	ld_d	15, VCPU_FPR15, a0
	ld_d	16, VCPU_FPR16, a0
	ld_d	17, VCPU_FPR17, a0
	ld_d	18, VCPU_FPR18, a0
	ld_d	19, VCPU_FPR19, a0
	ld_d	20, VCPU_FPR20, a0
	ld_d	21, VCPU_FPR21, a0
	ld_d	22, VCPU_FPR22, a0
	ld_d	23, VCPU_FPR23, a0
	ld_d	24, VCPU_FPR24, a0
	ld_d	25, VCPU_FPR25, a0
	ld_d	26, VCPU_FPR26, a0
	ld_d	27, VCPU_FPR27, a0
	ld_d	28, VCPU_FPR28, a0
	ld_d	29, VCPU_FPR29, a0
	ld_d	30, VCPU_FPR30, a0
	ld_d	31, VCPU_FPR31, a0
	jr	ra
	 nop
	END(__kvm_restore_msa)

	.macro	kvm_restore_msa_upper	wr, off, base
	.set	push
	.set	noat
#ifdef CONFIG_64BIT
	ld	$1, \off(\base)
	insert_d \wr, 1
#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
	lw	$1, \off(\base)
	insert_w \wr, 2
	lw	$1, (\off+4)(\base)
	insert_w \wr, 3
#else /* CONFIG_CPU_BIG_ENDIAN */
	lw	$1, (\off+4)(\base)
	insert_w \wr, 2
	lw	$1, \off(\base)
	insert_w \wr, 3
#endif
	.set	pop
	.endm

LEAF(__kvm_restore_msa_upper)
	kvm_restore_msa_upper	0,  VCPU_FPR0 +8, a0
	kvm_restore_msa_upper	1,  VCPU_FPR1 +8, a0
	kvm_restore_msa_upper	2,  VCPU_FPR2 +8, a0
	kvm_restore_msa_upper	3,  VCPU_FPR3 +8, a0
	kvm_restore_msa_upper	4,  VCPU_FPR4 +8, a0
	kvm_restore_msa_upper	5,  VCPU_FPR5 +8, a0
	kvm_restore_msa_upper	6,  VCPU_FPR6 +8, a0
	kvm_restore_msa_upper	7,  VCPU_FPR7 +8, a0
	kvm_restore_msa_upper	8,  VCPU_FPR8 +8, a0
	kvm_restore_msa_upper	9,  VCPU_FPR9 +8, a0
	kvm_restore_msa_upper	10, VCPU_FPR10+8, a0
	kvm_restore_msa_upper	11, VCPU_FPR11+8, a0
	kvm_restore_msa_upper	12, VCPU_FPR12+8, a0
	kvm_restore_msa_upper	13, VCPU_FPR13+8, a0
	kvm_restore_msa_upper	14, VCPU_FPR14+8, a0
	kvm_restore_msa_upper	15, VCPU_FPR15+8, a0
	kvm_restore_msa_upper	16, VCPU_FPR16+8, a0
	kvm_restore_msa_upper	17, VCPU_FPR17+8, a0
	kvm_restore_msa_upper	18, VCPU_FPR18+8, a0
	kvm_restore_msa_upper	19, VCPU_FPR19+8, a0
	kvm_restore_msa_upper	20, VCPU_FPR20+8, a0
	kvm_restore_msa_upper	21, VCPU_FPR21+8, a0
	kvm_restore_msa_upper	22, VCPU_FPR22+8, a0
	kvm_restore_msa_upper	23, VCPU_FPR23+8, a0
	kvm_restore_msa_upper	24, VCPU_FPR24+8, a0
	kvm_restore_msa_upper	25, VCPU_FPR25+8, a0
	kvm_restore_msa_upper	26, VCPU_FPR26+8, a0
	kvm_restore_msa_upper	27, VCPU_FPR27+8, a0
	kvm_restore_msa_upper	28, VCPU_FPR28+8, a0
	kvm_restore_msa_upper	29, VCPU_FPR29+8, a0
	kvm_restore_msa_upper	30, VCPU_FPR30+8, a0
	kvm_restore_msa_upper	31, VCPU_FPR31+8, a0
	jr	ra
	 nop
	END(__kvm_restore_msa_upper)

LEAF(__kvm_restore_msacsr)
	lw	t0, VCPU_MSA_CSR(a0)
	/*
	 * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
	 * See kvm_mips_csr_die_notify() which handles t0 containing a value
	 * which triggers an MSA FP Exception, which must be stepped over and
	 * ignored since the set cause bits must remain there for the guest.
	 */
	_ctcmsa	MSA_CSR, t0
	jr	ra
	 nop
	END(__kvm_restore_msacsr)
OpenPOWER on IntegriCloud