summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-06-14 17:42:54 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2018-06-14 17:42:54 +0200
commit09027ab73b82ec773f6ab8ae9468d7e15b748dfa (patch)
treedc418a7b8712b8591f0e2a5c0c81ee6e43c335b6 /arch/powerpc/include
parent273ba45796c14b4a2b669098f13d576b9e233dd8 (diff)
parentf61e0d3cc4aee194014074471658a5a037e311ce (diff)
downloadop-kernel-dev-09027ab73b82ec773f6ab8ae9468d7e15b748dfa.zip
op-kernel-dev-09027ab73b82ec773f6ab8ae9468d7e15b748dfa.tar.gz
Merge tag 'kvm-ppc-next-4.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h37
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h20
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h20
-rw-r--r--arch/powerpc/include/asm/kvm_host.h21
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h17
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h1
8 files changed, 77 insertions, 46 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index aa9e785..7841b8a 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -134,7 +134,13 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
void pnv_power9_force_smt4_catch(void);
void pnv_power9_force_smt4_release(void);
+/* Transaction memory related */
void tm_enable(void);
void tm_disable(void);
void tm_abort(uint8_t cause);
+
+struct kvm_vcpu;
+void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index e7377b7..1f345a0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -104,6 +104,7 @@ struct kvmppc_vcore {
ulong vtb; /* virtual timebase */
ulong conferring_threads;
unsigned int halt_poll_ns;
+ atomic_t online_count;
};
struct kvmppc_vcpu_book3s {
@@ -209,6 +210,7 @@ extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec);
extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
+extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
@@ -256,6 +258,21 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
+void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
+void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
+#else
+static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
+#endif
+
+void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
@@ -274,12 +291,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
- vcpu->arch.gpr[num] = val;
+ vcpu->arch.regs.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
- return vcpu->arch.gpr[num];
+ return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
@@ -294,42 +311,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.xer = val;
+ vcpu->arch.regs.xer = val;
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.xer;
+ return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.ctr = val;
+ vcpu->arch.regs.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.ctr;
+ return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.lr = val;
+ vcpu->arch.regs.link = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.lr;
+ return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.pc = val;
+ vcpu->arch.regs.nip = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.pc;
+ return vcpu->arch.regs.nip;
}
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index c424e44..dc435a5 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -483,15 +483,15 @@ static inline u64 sanitize_msr(u64 msr)
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
{
vcpu->arch.cr = vcpu->arch.cr_tm;
- vcpu->arch.xer = vcpu->arch.xer_tm;
- vcpu->arch.lr = vcpu->arch.lr_tm;
- vcpu->arch.ctr = vcpu->arch.ctr_tm;
+ vcpu->arch.regs.xer = vcpu->arch.xer_tm;
+ vcpu->arch.regs.link = vcpu->arch.lr_tm;
+ vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu->arch.amr = vcpu->arch.amr_tm;
vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu->arch.dscr = vcpu->arch.dscr_tm;
vcpu->arch.tar = vcpu->arch.tar_tm;
- memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm,
- sizeof(vcpu->arch.gpr));
+ memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
+ sizeof(vcpu->arch.regs.gpr));
vcpu->arch.fp = vcpu->arch.fp_tm;
vcpu->arch.vr = vcpu->arch.vr_tm;
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
@@ -500,15 +500,15 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
{
vcpu->arch.cr_tm = vcpu->arch.cr;
- vcpu->arch.xer_tm = vcpu->arch.xer;
- vcpu->arch.lr_tm = vcpu->arch.lr;
- vcpu->arch.ctr_tm = vcpu->arch.ctr;
+ vcpu->arch.xer_tm = vcpu->arch.regs.xer;
+ vcpu->arch.lr_tm = vcpu->arch.regs.link;
+ vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu->arch.amr_tm = vcpu->arch.amr;
vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu->arch.dscr_tm = vcpu->arch.dscr;
vcpu->arch.tar_tm = vcpu->arch.tar;
- memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr,
- sizeof(vcpu->arch.gpr));
+ memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
+ sizeof(vcpu->arch.regs.gpr));
vcpu->arch.fp_tm = vcpu->arch.fp;
vcpu->arch.vr_tm = vcpu->arch.vr;
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index bc6e29e..d513e3e 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -36,12 +36,12 @@
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
- vcpu->arch.gpr[num] = val;
+ vcpu->arch.regs.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
- return vcpu->arch.gpr[num];
+ return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
@@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.xer = val;
+ vcpu->arch.regs.xer = val;
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.xer;
+ return vcpu->arch.regs.xer;
}
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
@@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.ctr = val;
+ vcpu->arch.regs.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.ctr;
+ return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.lr = val;
+ vcpu->arch.regs.link = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.lr;
+ return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
- vcpu->arch.pc = val;
+ vcpu->arch.regs.nip = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.pc;
+ return vcpu->arch.regs.nip;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 17498e9..fa4efa7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -269,7 +269,6 @@ struct kvm_arch {
unsigned long host_lpcr;
unsigned long sdr1;
unsigned long host_sdr1;
- int tlbie_lock;
unsigned long lpcr;
unsigned long vrma_slb_v;
int mmu_ready;
@@ -454,6 +453,12 @@ struct mmio_hpte_cache {
#define KVMPPC_VSX_COPY_WORD 1
#define KVMPPC_VSX_COPY_DWORD 2
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
+#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4
+
+#define KVMPPC_VMX_COPY_BYTE 8
+#define KVMPPC_VMX_COPY_HWORD 9
+#define KVMPPC_VMX_COPY_WORD 10
+#define KVMPPC_VMX_COPY_DWORD 11
struct openpic;
@@ -486,7 +491,7 @@ struct kvm_vcpu_arch {
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif
- ulong gpr[32];
+ struct pt_regs regs;
struct thread_fp_state fp;
@@ -521,14 +526,10 @@ struct kvm_vcpu_arch {
u32 qpr[32];
#endif
- ulong pc;
- ulong ctr;
- ulong lr;
#ifdef CONFIG_PPC_BOOK3S
ulong tar;
#endif
- ulong xer;
u32 cr;
#ifdef CONFIG_PPC_BOOK3S
@@ -626,7 +627,6 @@ struct kvm_vcpu_arch {
struct thread_vr_state vr_tm;
u32 vrsave_tm; /* also USPRG0 */
-
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
@@ -681,16 +681,17 @@ struct kvm_vcpu_arch {
* Number of simulations for vsx.
* If we use 2*8bytes to simulate 1*16bytes,
* then the number should be 2 and
- * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
+ * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
* If we use 4*4bytes to simulate 1*16bytes,
* the number should be 4 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
*/
u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset;
- u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums;
+ u8 mmio_vmx_offset;
+ u8 mmio_copy_type;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
@@ -772,6 +773,8 @@ struct kvm_vcpu_arch {
u64 busy_preempt;
u32 emul_inst;
+
+ u32 online;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index abe7032..e991821 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -52,7 +52,7 @@ enum emulation_result {
EMULATE_EXIT_USER, /* emulation requires exit to user-space */
};
-enum instruction_type {
+enum instruction_fetch_type {
INST_GENERIC,
INST_SC, /* system call */
};
@@ -81,10 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
-extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
- struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
-extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
- struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
+extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_default_endian);
+extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rs, unsigned int bytes, int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
@@ -93,7 +93,7 @@ extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
- enum instruction_type type, u32 *inst);
+ enum instruction_fetch_type type, u32 *inst);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
@@ -265,6 +265,8 @@ union kvmppc_one_reg {
vector128 vval;
u64 vsxval[2];
u32 vsx32val[4];
+ u16 vsx16val[8];
+ u8 vsx8val[16];
struct {
u64 addr;
u64 length;
@@ -324,13 +326,14 @@ struct kvmppc_ops {
int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
unsigned long flags);
+ void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;
static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
- enum instruction_type type, u32 *inst)
+ enum instruction_fetch_type type, u32 *inst)
{
int ret = EMULATE_DONE;
u32 fetched_inst;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 75c5b2c..5625684 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -385,6 +385,7 @@
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
#define SPRN_PMCR 0x374 /* Power Management Control Register */
+#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 833ed9a..1b32b56 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
OpenPOWER on IntegriCloud