diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 1749 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 1226 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.h | 25 | ||||
-rw-r--r-- | arch/x86/kvm/mmu_audit.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/mmutrace.h | 48 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 258 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/trace.h | 31 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2784 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 374 | ||||
-rw-r--r-- | arch/x86/kvm/x86.h | 44 |
12 files changed, 4744 insertions, 1814 deletions
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 65cf823..988724b 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -31,6 +31,7 @@ config KVM select KVM_ASYNC_PF select USER_RETURN_NOTIFIER select KVM_MMIO + select TASK_DELAY_ACCT ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index adc9867..6f08bc9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -407,76 +407,59 @@ struct gprefix { } \ } while (0) -/* Fetch next part of the instruction being emulated. */ -#define insn_fetch(_type, _size, _eip) \ -({ unsigned long _x; \ - rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \ - if (rc != X86EMUL_CONTINUE) \ - goto done; \ - (_eip) += (_size); \ - (_type)_x; \ -}) - -#define insn_fetch_arr(_arr, _size, _eip) \ -({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \ - if (rc != X86EMUL_CONTINUE) \ - goto done; \ - (_eip) += (_size); \ -}) - static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, - .rep_prefix = ctxt->decode.rep_prefix, - .modrm_mod = ctxt->decode.modrm_mod, - .modrm_reg = ctxt->decode.modrm_reg, - .modrm_rm = ctxt->decode.modrm_rm, - .src_val = ctxt->decode.src.val64, - .src_bytes = ctxt->decode.src.bytes, - .dst_bytes = ctxt->decode.dst.bytes, - .ad_bytes = ctxt->decode.ad_bytes, + .rep_prefix = ctxt->rep_prefix, + .modrm_mod = ctxt->modrm_mod, + .modrm_reg = ctxt->modrm_reg, + .modrm_rm = ctxt->modrm_rm, + .src_val = ctxt->src.val64, + .src_bytes = ctxt->src.bytes, + .dst_bytes = ctxt->dst.bytes, + .ad_bytes = ctxt->ad_bytes, .next_rip = ctxt->eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } -static inline unsigned long ad_mask(struct decode_cache *c) +static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { - return (1UL << (c->ad_bytes << 3)) - 1; + return (1UL << (ctxt->ad_bytes << 3)) - 1; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long -address_mask(struct decode_cache *c, unsigned long reg) +address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { - if (c->ad_bytes == sizeof(unsigned long)) + if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else - return reg & ad_mask(c); + return reg & ad_mask(ctxt); } static inline unsigned long -register_address(struct decode_cache *c, unsigned long reg) +register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) { - return address_mask(c, reg); + return address_mask(ctxt, reg); } static inline void -register_address_increment(struct decode_cache *c, unsigned long *reg, int inc) +register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) { - if (c->ad_bytes == sizeof(unsigned long)) + if (ctxt->ad_bytes == sizeof(unsigned long)) *reg += inc; else - *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c)); + *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); } -static inline void jmp_rel(struct decode_cache *c, int rel) +static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { - register_address_increment(c, &c->eip, rel); + register_address_increment(ctxt, &ctxt->_eip, rel); } static u32 desc_limit_scaled(struct desc_struct *desc) @@ -486,28 +469,26 @@ static u32 desc_limit_scaled(struct desc_struct *desc) return desc->g ? (limit << 12) | 0xfff : limit; } -static void set_seg_override(struct decode_cache *c, int seg) +static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) { - c->has_seg_override = true; - c->seg_override = seg; + ctxt->has_seg_override = true; + ctxt->seg_override = seg; } -static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, int seg) +static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; - return ops->get_cached_segment_base(ctxt, seg); + return ctxt->ops->get_cached_segment_base(ctxt, seg); } -static unsigned seg_override(struct x86_emulate_ctxt *ctxt, - struct decode_cache *c) +static unsigned seg_override(struct x86_emulate_ctxt *ctxt) { - if (!c->has_seg_override) + if (!ctxt->has_seg_override) return 0; - return c->seg_override; + return ctxt->seg_override; } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, @@ -579,7 +560,6 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, unsigned size, bool write, bool fetch, ulong *linear) { - struct decode_cache *c = &ctxt->decode; struct desc_struct desc; bool usable; ulong la; @@ -587,7 +567,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, u16 sel; unsigned cpl, rpl; - la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; + la = seg_base(ctxt, addr.seg) + addr.ea; switch (ctxt->mode) { case X86EMUL_MODE_REAL: break; @@ -637,7 +617,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, } break; } - if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8) + if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) la &= (u32)-1; *linear = la; return X86EMUL_CONTINUE; @@ -671,11 +651,10 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } -static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, +static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, unsigned long eip, u8 *dest) { - struct fetch_cache *fc = &ctxt->decode.fetch; + struct fetch_cache *fc = &ctxt->fetch; int rc; int size, cur_size; @@ -687,8 +666,8 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, rc = __linearize(ctxt, addr, size, false, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->fetch(ctxt, linear, fc->data + cur_size, - size, &ctxt->exception); + rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size, + size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; fc->end += size; @@ -698,7 +677,6 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, } static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, unsigned long eip, void *dest, unsigned size) { int rc; @@ -707,13 +685,30 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, if (eip + size - ctxt->eip > 15) return X86EMUL_UNHANDLEABLE; while (size--) { - rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); + rc = do_insn_fetch_byte(ctxt, eip++, dest++); if (rc != X86EMUL_CONTINUE) return rc; } return X86EMUL_CONTINUE; } +/* Fetch next part of the instruction being emulated. */ +#define insn_fetch(_type, _size, _eip) \ +({ unsigned long _x; \ + rc = do_insn_fetch(ctxt, (_eip), &_x, (_size)); \ + if (rc != X86EMUL_CONTINUE) \ + goto done; \ + (_eip) += (_size); \ + (_type)_x; \ +}) + +#define insn_fetch_arr(_arr, _size, _eip) \ +({ rc = do_insn_fetch(ctxt, (_eip), _arr, (_size)); \ + if (rc != X86EMUL_CONTINUE) \ + goto done; \ + (_eip) += (_size); \ +}) + /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. @@ -857,16 +852,15 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, - struct decode_cache *c, int inhibit_bytereg) { - unsigned reg = c->modrm_reg; - int highbyte_regs = c->rex_prefix == 0; + unsigned reg = ctxt->modrm_reg; + int highbyte_regs = ctxt->rex_prefix == 0; - if (!(c->d & ModRM)) - reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); + if (!(ctxt->d & ModRM)) + reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); - if (c->d & Sse) { + if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; @@ -875,49 +869,47 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt, } op->type = OP_REG; - if ((c->d & ByteOp) && !inhibit_bytereg) { - op->addr.reg = decode_register(reg, c->regs, highbyte_regs); + if ((ctxt->d & ByteOp) && !inhibit_bytereg) { + op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs); op->bytes = 1; } else { - op->addr.reg = decode_register(reg, c->regs, 0); - op->bytes = c->op_bytes; + op->addr.reg = decode_register(reg, ctxt->regs, 0); + op->bytes = ctxt->op_bytes; } fetch_register_operand(op); op->orig_val = op->val; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct operand *op) { - struct decode_cache *c = &ctxt->decode; u8 sib; int index_reg = 0, base_reg = 0, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; - if (c->rex_prefix) { - c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */ - index_reg = (c->rex_prefix & 2) << 2; /* REX.X */ - c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */ + if (ctxt->rex_prefix) { + ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */ + index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */ + ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */ } - c->modrm = insn_fetch(u8, 1, c->eip); - c->modrm_mod |= (c->modrm & 0xc0) >> 6; - c->modrm_reg |= (c->modrm & 0x38) >> 3; - c->modrm_rm |= (c->modrm & 0x07); - c->modrm_seg = VCPU_SREG_DS; + ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip); + ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6; + ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; + ctxt->modrm_rm |= (ctxt->modrm & 0x07); + ctxt->modrm_seg = VCPU_SREG_DS; - if (c->modrm_mod == 3) { + if (ctxt->modrm_mod == 3) { op->type = OP_REG; - op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - op->addr.reg = decode_register(c->modrm_rm, - c->regs, c->d & ByteOp); - if (c->d & Sse) { + op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + op->addr.reg = decode_register(ctxt->modrm_rm, + ctxt->regs, ctxt->d & ByteOp); + if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; - op->addr.xmm = c->modrm_rm; - read_sse_reg(ctxt, &op->vec_val, c->modrm_rm); + op->addr.xmm = ctxt->modrm_rm; + read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); return rc; } fetch_register_operand(op); @@ -926,26 +918,26 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, op->type = OP_MEM; - if (c->ad_bytes == 2) { - unsigned bx = c->regs[VCPU_REGS_RBX]; - unsigned bp = c->regs[VCPU_REGS_RBP]; - unsigned si = c->regs[VCPU_REGS_RSI]; - unsigned di = c->regs[VCPU_REGS_RDI]; + if (ctxt->ad_bytes == 2) { + unsigned bx = ctxt->regs[VCPU_REGS_RBX]; + unsigned bp = ctxt->regs[VCPU_REGS_RBP]; + unsigned si = ctxt->regs[VCPU_REGS_RSI]; + unsigned di = ctxt->regs[VCPU_REGS_RDI]; /* 16-bit ModR/M decode. */ - switch (c->modrm_mod) { + switch (ctxt->modrm_mod) { case 0: - if (c->modrm_rm == 6) - modrm_ea += insn_fetch(u16, 2, c->eip); + if (ctxt->modrm_rm == 6) + modrm_ea += insn_fetch(u16, 2, ctxt->_eip); break; case 1: - modrm_ea += insn_fetch(s8, 1, c->eip); + modrm_ea += insn_fetch(s8, 1, ctxt->_eip); break; case 2: - modrm_ea += insn_fetch(u16, 2, c->eip); + modrm_ea += insn_fetch(u16, 2, ctxt->_eip); break; } - switch (c->modrm_rm) { + switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; @@ -965,46 +957,46 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, modrm_ea += di; break; case 6: - if (c->modrm_mod != 0) + if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } - if (c->modrm_rm == 2 || c->modrm_rm == 3 || - (c->modrm_rm == 6 && c->modrm_mod != 0)) - c->modrm_seg = VCPU_SREG_SS; + if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || + (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) + ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ - if ((c->modrm_rm & 7) == 4) { - sib = insn_fetch(u8, 1, c->eip); + if ((ctxt->modrm_rm & 7) == 4) { + sib = insn_fetch(u8, 1, ctxt->_eip); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; - if ((base_reg & 7) == 5 && c->modrm_mod == 0) - modrm_ea += insn_fetch(s32, 4, c->eip); + if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) + modrm_ea += insn_fetch(s32, 4, ctxt->_eip); else - modrm_ea += c->regs[base_reg]; + modrm_ea += ctxt->regs[base_reg]; if (index_reg != 4) - modrm_ea += c->regs[index_reg] << scale; - } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) { + modrm_ea += ctxt->regs[index_reg] << scale; + } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { if (ctxt->mode == X86EMUL_MODE_PROT64) - c->rip_relative = 1; + ctxt->rip_relative = 1; } else - modrm_ea += c->regs[c->modrm_rm]; - switch (c->modrm_mod) { + modrm_ea += ctxt->regs[ctxt->modrm_rm]; + switch (ctxt->modrm_mod) { case 0: - if (c->modrm_rm == 5) - modrm_ea += insn_fetch(s32, 4, c->eip); + if (ctxt->modrm_rm == 5) + modrm_ea += insn_fetch(s32, 4, ctxt->_eip); break; case 1: - modrm_ea += insn_fetch(s8, 1, c->eip); + modrm_ea += insn_fetch(s8, 1, ctxt->_eip); break; case 2: - modrm_ea += insn_fetch(s32, 4, c->eip); + modrm_ea += insn_fetch(s32, 4, ctxt->_eip); break; } } @@ -1014,53 +1006,50 @@ done: } static int decode_abs(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct operand *op) { - struct decode_cache *c = &ctxt->decode; int rc = X86EMUL_CONTINUE; op->type = OP_MEM; - switch (c->ad_bytes) { + switch (ctxt->ad_bytes) { case 2: - op->addr.mem.ea = insn_fetch(u16, 2, c->eip); + op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip); break; case 4: - op->addr.mem.ea = insn_fetch(u32, 4, c->eip); + op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip); break; case 8: - op->addr.mem.ea = insn_fetch(u64, 8, c->eip); + op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip); break; } done: return rc; } -static void fetch_bit_operand(struct decode_cache *c) +static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; - if (c->dst.type == OP_MEM && c->src.type == OP_REG) { - mask = ~(c->dst.bytes * 8 - 1); + if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { + mask = ~(ctxt->dst.bytes * 8 - 1); - if (c->src.bytes == 2) - sv = (s16)c->src.val & (s16)mask; - else if (c->src.bytes == 4) - sv = (s32)c->src.val & (s32)mask; + if (ctxt->src.bytes == 2) + sv = (s16)ctxt->src.val & (s16)mask; + else if (ctxt->src.bytes == 4) + sv = (s32)ctxt->src.val & (s32)mask; - c->dst.addr.mem.ea += (sv >> 3); + ctxt->dst.addr.mem.ea += (sv >> 3); } /* only subword offset */ - c->src.val &= (c->dst.bytes << 3) - 1; + ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, unsigned long addr, void *dest, unsigned size) { int rc; - struct read_cache *mc = &ctxt->decode.mem_read; + struct read_cache *mc = &ctxt->mem_read; while (size) { int n = min(size, 8u); @@ -1068,8 +1057,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, if (mc->pos < mc->end) goto read_cached; - rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, - &ctxt->exception); + rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n, + &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += n; @@ -1094,7 +1083,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return read_emulated(ctxt, ctxt->ops, linear, data, size); + return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, @@ -1128,26 +1117,24 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, unsigned int size, unsigned short port, void *dest) { - struct read_cache *rc = &ctxt->decode.io_read; + struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ - struct decode_cache *c = &ctxt->decode; unsigned int in_page, n; - unsigned int count = c->rep_prefix ? - address_mask(c, c->regs[VCPU_REGS_RCX]) : 1; + unsigned int count = ctxt->rep_prefix ? + address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1; in_page = (ctxt->eflags & EFLG_DF) ? - offset_in_page(c->regs[VCPU_REGS_RDI]) : - PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]); + offset_in_page(ctxt->regs[VCPU_REGS_RDI]) : + PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]); n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; - if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n)) + if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } @@ -1158,9 +1145,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 selector, struct desc_ptr *dt) { + struct x86_emulate_ops *ops = ctxt->ops; + if (selector & 1 << 2) { struct desc_struct desc; u16 sel; @@ -1177,48 +1165,42 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 selector, struct desc_struct *desc) { struct desc_ptr dt; u16 index = selector >> 3; - int ret; ulong addr; - get_descriptor_table_ptr(ctxt, ops, selector, &dt); + get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); - addr = dt.address + index * 8; - ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); - return ret; + addr = dt.address + index * 8; + return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, + &ctxt->exception); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 selector, struct desc_struct *desc) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; - int ret; - get_descriptor_table_ptr(ctxt, ops, selector, &dt); + get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; - ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); - - return ret; + return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, + &ctxt->exception); } /* Does not support long mode */ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 selector, int seg) { struct desc_struct seg_desc; @@ -1253,7 +1235,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (null_selector) /* for NULL selector skip all following checks */ goto load; - ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc); + ret = read_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; @@ -1271,7 +1253,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, rpl = selector & 3; dpl = seg_desc.dpl; - cpl = ops->cpl(ctxt); + cpl = ctxt->ops->cpl(ctxt); switch (seg) { case VCPU_SREG_SS: @@ -1322,12 +1304,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (seg_desc.s) { /* mark segment as accessed */ seg_desc.type |= 1; - ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc); + ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } load: - ops->set_segment(ctxt, selector, &seg_desc, 0, seg); + ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg); return X86EMUL_CONTINUE; exception: emulate_exception(ctxt, err_vec, err_code, true); @@ -1356,29 +1338,28 @@ static void write_register_operand(struct operand *op) static int writeback(struct x86_emulate_ctxt *ctxt) { int rc; - struct decode_cache *c = &ctxt->decode; - switch (c->dst.type) { + switch (ctxt->dst.type) { case OP_REG: - write_register_operand(&c->dst); + write_register_operand(&ctxt->dst); break; case OP_MEM: - if (c->lock_prefix) + if (ctxt->lock_prefix) rc = segmented_cmpxchg(ctxt, - c->dst.addr.mem, - &c->dst.orig_val, - &c->dst.val, - c->dst.bytes); + ctxt->dst.addr.mem, + &ctxt->dst.orig_val, + &ctxt->dst.val, + ctxt->dst.bytes); else rc = segmented_write(ctxt, - c->dst.addr.mem, - &c->dst.val, - c->dst.bytes); + ctxt->dst.addr.mem, + &ctxt->dst.val, + ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) return rc; break; case OP_XMM: - write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm); + write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm); break; case OP_NONE: /* no writeback */ @@ -1391,50 +1372,45 @@ static int writeback(struct x86_emulate_ctxt *ctxt) static int em_push(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; struct segmented_address addr; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); - addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes); + addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); addr.seg = VCPU_SREG_SS; /* Disable writeback. */ - c->dst.type = OP_NONE; - return segmented_write(ctxt, addr, &c->src.val, c->op_bytes); + ctxt->dst.type = OP_NONE; + return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { - struct decode_cache *c = &ctxt->decode; int rc; struct segmented_address addr; - addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); + addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - return emulate_pop(ctxt, &c->dst.val, c->op_bytes); + return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, - void *dest, int len) + void *dest, int len) { int rc; unsigned long val, change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; - int cpl = ops->cpl(ctxt); + int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) @@ -1470,49 +1446,41 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, static int em_popf(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - c->dst.type = OP_REG; - c->dst.addr.reg = &ctxt->eflags; - c->dst.bytes = c->op_bytes; - return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes); + ctxt->dst.type = OP_REG; + ctxt->dst.addr.reg = &ctxt->eflags; + ctxt->dst.bytes = ctxt->op_bytes; + return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } -static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, int seg) +static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg) { - struct decode_cache *c = &ctxt->decode; - - c->src.val = get_segment_selector(ctxt, seg); + ctxt->src.val = get_segment_selector(ctxt, seg); return em_push(ctxt); } -static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, int seg) +static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg) { - struct decode_cache *c = &ctxt->decode; unsigned long selector; int rc; - rc = emulate_pop(ctxt, &selector, c->op_bytes); + rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg); + rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - unsigned long old_esp = c->regs[VCPU_REGS_RSP]; + unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP]; int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? - (c->src.val = old_esp) : (c->src.val = c->regs[reg]); + (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) @@ -1526,26 +1494,23 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt) static int em_pushf(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - c->src.val = (unsigned long)ctxt->eflags; + ctxt->src.val = (unsigned long)ctxt->eflags; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { - register_address_increment(c, &c->regs[VCPU_REGS_RSP], - c->op_bytes); + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], + ctxt->op_bytes); --reg; } - rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes); + rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; --reg; @@ -1553,10 +1518,9 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) return rc; } -int emulate_int_real(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, int irq) +int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { - struct decode_cache *c = &ctxt->decode; + struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; @@ -1564,19 +1528,19 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, u16 cs, eip; /* TODO: Add limit checks */ - c->src.val = ctxt->eflags; + ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); - c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); + ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; - c->src.val = c->eip; + ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; @@ -1594,21 +1558,20 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS); + rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; - c->eip = eip; + ctxt->_eip = eip; return rc; } -static int emulate_int(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, int irq) +static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: - return emulate_int_real(ctxt, ops, irq); + return emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: @@ -1619,10 +1582,8 @@ static int emulate_int(struct x86_emulate_ctxt *ctxt, } } -static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) +static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; @@ -1634,7 +1595,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, /* TODO: Add stack limit check */ - rc = emulate_pop(ctxt, &temp_eip, c->op_bytes); + rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; @@ -1642,27 +1603,27 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); - rc = emulate_pop(ctxt, &cs, c->op_bytes); + rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes); + rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); + rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; - c->eip = temp_eip; + ctxt->_eip = temp_eip; - if (c->op_bytes == 4) + if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); - else if (c->op_bytes == 2) { + else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } @@ -1673,12 +1634,11 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, return rc; } -static inline int emulate_iret(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops* ops) +static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: - return emulate_iret_real(ctxt, ops); + return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: @@ -1691,53 +1651,49 @@ static inline int emulate_iret(struct x86_emulate_ctxt *ctxt, static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc; unsigned short sel; - memcpy(&sel, c->src.valptr + c->op_bytes, 2); + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); - rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS); + rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; - c->eip = 0; - memcpy(&c->eip, c->src.valptr, c->op_bytes); + ctxt->_eip = 0; + memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); return X86EMUL_CONTINUE; } static int em_grp1a(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - return emulate_pop(ctxt, &c->dst.val, c->dst.bytes); + return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes); } static int em_grp2(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - switch (c->modrm_reg) { + switch (ctxt->modrm_reg) { case 0: /* rol */ - emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("rol", ctxt->src, ctxt->dst, ctxt->eflags); break; case 1: /* ror */ - emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("ror", ctxt->src, ctxt->dst, ctxt->eflags); break; case 2: /* rcl */ - emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("rcl", ctxt->src, ctxt->dst, ctxt->eflags); break; case 3: /* rcr */ - emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("rcr", ctxt->src, ctxt->dst, ctxt->eflags); break; case 4: /* sal/shl */ case 6: /* sal/shl */ - emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("sal", ctxt->src, ctxt->dst, ctxt->eflags); break; case 5: /* shr */ - emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("shr", ctxt->src, ctxt->dst, ctxt->eflags); break; case 7: /* sar */ - emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcB("sar", ctxt->src, ctxt->dst, ctxt->eflags); break; } return X86EMUL_CONTINUE; @@ -1745,33 +1701,32 @@ static int em_grp2(struct x86_emulate_ctxt *ctxt) static int em_grp3(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - unsigned long *rax = &c->regs[VCPU_REGS_RAX]; - unsigned long *rdx = &c->regs[VCPU_REGS_RDX]; + unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX]; + unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX]; u8 de = 0; - switch (c->modrm_reg) { + switch (ctxt->modrm_reg) { case 0 ... 1: /* test */ - emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags); break; case 2: /* not */ - c->dst.val = ~c->dst.val; + ctxt->dst.val = ~ctxt->dst.val; break; case 3: /* neg */ - emulate_1op("neg", c->dst, ctxt->eflags); + emulate_1op("neg", ctxt->dst, ctxt->eflags); break; case 4: /* mul */ - emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags); + emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags); break; case 5: /* imul */ - emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags); + emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags); break; case 6: /* div */ - emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx, + emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx, ctxt->eflags, de); break; case 7: /* idiv */ - emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx, + emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx, ctxt->eflags, de); break; default: @@ -1784,26 +1739,25 @@ static int em_grp3(struct x86_emulate_ctxt *ctxt) static int em_grp45(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc = X86EMUL_CONTINUE; - switch (c->modrm_reg) { + switch (ctxt->modrm_reg) { case 0: /* inc */ - emulate_1op("inc", c->dst, ctxt->eflags); + emulate_1op("inc", ctxt->dst, ctxt->eflags); break; case 1: /* dec */ - emulate_1op("dec", c->dst, ctxt->eflags); + emulate_1op("dec", ctxt->dst, ctxt->eflags); break; case 2: /* call near abs */ { long int old_eip; - old_eip = c->eip; - c->eip = c->src.val; - c->src.val = old_eip; + old_eip = ctxt->_eip; + ctxt->_eip = ctxt->src.val; + ctxt->src.val = old_eip; rc = em_push(ctxt); break; } case 4: /* jmp abs */ - c->eip = c->src.val; + ctxt->_eip = ctxt->src.val; break; case 5: /* jmp far */ rc = em_jmp_far(ctxt); @@ -1817,68 +1771,70 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt) static int em_grp9(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - u64 old = c->dst.orig_val64; + u64 old = ctxt->dst.orig_val64; - if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || - ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { - c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); - c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); + if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) || + ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) { + ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0); + ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { - c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) | - (u32) c->regs[VCPU_REGS_RBX]; + ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) | + (u32) ctxt->regs[VCPU_REGS_RBX]; ctxt->eflags |= EFLG_ZF; } return X86EMUL_CONTINUE; } -static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) +static int em_ret(struct x86_emulate_ctxt *ctxt) +{ + ctxt->dst.type = OP_REG; + ctxt->dst.addr.reg = &ctxt->_eip; + ctxt->dst.bytes = ctxt->op_bytes; + return em_pop(ctxt); +} + +static int em_ret_far(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc; unsigned long cs; - rc = emulate_pop(ctxt, &c->eip, c->op_bytes); + rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - if (c->op_bytes == 4) - c->eip = (u32)c->eip; - rc = emulate_pop(ctxt, &cs, c->op_bytes); + if (ctxt->op_bytes == 4) + ctxt->_eip = (u32)ctxt->_eip; + rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); + rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); return rc; } -static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, int seg) +static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg) { - struct decode_cache *c = &ctxt->decode; unsigned short sel; int rc; - memcpy(&sel, c->src.valptr + c->op_bytes, 2); + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); - rc = load_segment_descriptor(ctxt, ops, sel, seg); + rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; - c->dst.val = c->src.val; + ctxt->dst.val = ctxt->src.val; return rc; } -static inline void +static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct desc_struct *cs, - struct desc_struct *ss) + struct desc_struct *cs, struct desc_struct *ss) { u16 selector; memset(cs, 0, sizeof(struct desc_struct)); - ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); + ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); memset(ss, 0, sizeof(struct desc_struct)); cs->l = 0; /* will be adjusted later */ @@ -1901,10 +1857,9 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ss->p = 1; } -static int -emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) +static int em_syscall(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; + struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; @@ -1916,7 +1871,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); - setup_syscalls_segments(ctxt, ops, &cs, &ss); + setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; @@ -1930,15 +1885,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); - c->regs[VCPU_REGS_RCX] = c->eip; + ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 - c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; + ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); - c->eip = msr_data; + ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~(msr_data | EFLG_RF); @@ -1946,7 +1901,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); - c->eip = (u32)msr_data; + ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); } @@ -1954,16 +1909,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) return X86EMUL_CONTINUE; } -static int -emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) +static int em_sysenter(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; + struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; - ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); @@ -1974,7 +1928,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) if (ctxt->mode == X86EMUL_MODE_PROT64) return emulate_ud(ctxt); - setup_syscalls_segments(ctxt, ops, &cs, &ss); + setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { @@ -2002,31 +1956,30 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); - c->eip = msr_data; + ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); - c->regs[VCPU_REGS_RSP] = msr_data; + ctxt->regs[VCPU_REGS_RSP] = msr_data; return X86EMUL_CONTINUE; } -static int -emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) +static int em_sysexit(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; + struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; int usermode; - u16 cs_sel, ss_sel; + u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); - setup_syscalls_segments(ctxt, ops, &cs, &ss); + setup_syscalls_segments(ctxt, &cs, &ss); - if ((c->rex_prefix & 0x8) != 0x0) + if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; @@ -2056,14 +2009,13 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); - c->eip = c->regs[VCPU_REGS_RDX]; - c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; + ctxt->_eip = ctxt->regs[VCPU_REGS_RDX]; + ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX]; return X86EMUL_CONTINUE; } -static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) +static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) @@ -2071,13 +2023,13 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; - return ops->cpl(ctxt) > iopl; + return ctxt->ops->cpl(ctxt) > iopl; } static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 port, u16 len) { + struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; @@ -2108,14 +2060,13 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, } static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 port, u16 len) { if (ctxt->perm_ok) return true; - if (emulator_bad_iopl(ctxt, ops)) - if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) + if (emulator_bad_iopl(ctxt)) + if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; @@ -2124,21 +2075,18 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct tss_segment_16 *tss) { - struct decode_cache *c = &ctxt->decode; - - tss->ip = c->eip; + tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; - tss->ax = c->regs[VCPU_REGS_RAX]; - tss->cx = c->regs[VCPU_REGS_RCX]; - tss->dx = c->regs[VCPU_REGS_RDX]; - tss->bx = c->regs[VCPU_REGS_RBX]; - tss->sp = c->regs[VCPU_REGS_RSP]; - tss->bp = c->regs[VCPU_REGS_RBP]; - tss->si = c->regs[VCPU_REGS_RSI]; - tss->di = c->regs[VCPU_REGS_RDI]; + tss->ax = ctxt->regs[VCPU_REGS_RAX]; + tss->cx = ctxt->regs[VCPU_REGS_RCX]; + tss->dx = ctxt->regs[VCPU_REGS_RDX]; + tss->bx = ctxt->regs[VCPU_REGS_RBX]; + tss->sp = ctxt->regs[VCPU_REGS_RSP]; + tss->bp = ctxt->regs[VCPU_REGS_RBP]; + tss->si = ctxt->regs[VCPU_REGS_RSI]; + tss->di = ctxt->regs[VCPU_REGS_RDI]; tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); @@ -2148,22 +2096,20 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct tss_segment_16 *tss) { - struct decode_cache *c = &ctxt->decode; int ret; - c->eip = tss->ip; + ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; - c->regs[VCPU_REGS_RAX] = tss->ax; - c->regs[VCPU_REGS_RCX] = tss->cx; - c->regs[VCPU_REGS_RDX] = tss->dx; - c->regs[VCPU_REGS_RBX] = tss->bx; - c->regs[VCPU_REGS_RSP] = tss->sp; - c->regs[VCPU_REGS_RBP] = tss->bp; - c->regs[VCPU_REGS_RSI] = tss->si; - c->regs[VCPU_REGS_RDI] = tss->di; + ctxt->regs[VCPU_REGS_RAX] = tss->ax; + ctxt->regs[VCPU_REGS_RCX] = tss->cx; + ctxt->regs[VCPU_REGS_RDX] = tss->dx; + ctxt->regs[VCPU_REGS_RBX] = tss->bx; + ctxt->regs[VCPU_REGS_RSP] = tss->sp; + ctxt->regs[VCPU_REGS_RBP] = tss->bp; + ctxt->regs[VCPU_REGS_RSI] = tss->si; + ctxt->regs[VCPU_REGS_RDI] = tss->di; /* * SDM says that segment selectors are loaded before segment @@ -2179,19 +2125,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ - ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR); + ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); + ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); + ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); + ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); + ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); if (ret != X86EMUL_CONTINUE) return ret; @@ -2199,10 +2145,10 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, } static int task_switch_16(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { + struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); @@ -2213,7 +2159,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, /* FIXME: need to provide precise fault address */ return ret; - save_state_to_tss16(ctxt, ops, &tss_seg); + save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); @@ -2239,26 +2185,23 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, return ret; } - return load_state_from_tss16(ctxt, ops, &tss_seg); + return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct tss_segment_32 *tss) { - struct decode_cache *c = &ctxt->decode; - - tss->cr3 = ops->get_cr(ctxt, 3); - tss->eip = c->eip; + tss->cr3 = ctxt->ops->get_cr(ctxt, 3); + tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; - tss->eax = c->regs[VCPU_REGS_RAX]; - tss->ecx = c->regs[VCPU_REGS_RCX]; - tss->edx = c->regs[VCPU_REGS_RDX]; - tss->ebx = c->regs[VCPU_REGS_RBX]; - tss->esp = c->regs[VCPU_REGS_RSP]; - tss->ebp = c->regs[VCPU_REGS_RBP]; - tss->esi = c->regs[VCPU_REGS_RSI]; - tss->edi = c->regs[VCPU_REGS_RDI]; + tss->eax = ctxt->regs[VCPU_REGS_RAX]; + tss->ecx = ctxt->regs[VCPU_REGS_RCX]; + tss->edx = ctxt->regs[VCPU_REGS_RDX]; + tss->ebx = ctxt->regs[VCPU_REGS_RBX]; + tss->esp = ctxt->regs[VCPU_REGS_RSP]; + tss->ebp = ctxt->regs[VCPU_REGS_RBP]; + tss->esi = ctxt->regs[VCPU_REGS_RSI]; + tss->edi = ctxt->regs[VCPU_REGS_RDI]; tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); @@ -2270,24 +2213,22 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, struct tss_segment_32 *tss) { - struct decode_cache *c = &ctxt->decode; int ret; - if (ops->set_cr(ctxt, 3, tss->cr3)) + if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); - c->eip = tss->eip; + ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; - c->regs[VCPU_REGS_RAX] = tss->eax; - c->regs[VCPU_REGS_RCX] = tss->ecx; - c->regs[VCPU_REGS_RDX] = tss->edx; - c->regs[VCPU_REGS_RBX] = tss->ebx; - c->regs[VCPU_REGS_RSP] = tss->esp; - c->regs[VCPU_REGS_RBP] = tss->ebp; - c->regs[VCPU_REGS_RSI] = tss->esi; - c->regs[VCPU_REGS_RDI] = tss->edi; + ctxt->regs[VCPU_REGS_RAX] = tss->eax; + ctxt->regs[VCPU_REGS_RCX] = tss->ecx; + ctxt->regs[VCPU_REGS_RDX] = tss->edx; + ctxt->regs[VCPU_REGS_RBX] = tss->ebx; + ctxt->regs[VCPU_REGS_RSP] = tss->esp; + ctxt->regs[VCPU_REGS_RBP] = tss->ebp; + ctxt->regs[VCPU_REGS_RSI] = tss->esi; + ctxt->regs[VCPU_REGS_RDI] = tss->edi; /* * SDM says that segment selectors are loaded before segment @@ -2305,25 +2246,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ - ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR); + ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); + ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); + ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); + ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); + ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS); + ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS); + ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS); if (ret != X86EMUL_CONTINUE) return ret; @@ -2331,10 +2272,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, } static int task_switch_32(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { + struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); @@ -2345,7 +2286,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, /* FIXME: need to provide precise fault address */ return ret; - save_state_to_tss32(ctxt, ops, &tss_seg); + save_state_to_tss32(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); @@ -2371,14 +2312,14 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, return ret; } - return load_state_from_tss32(ctxt, ops, &tss_seg); + return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, u16 tss_selector, int reason, bool has_error_code, u32 error_code) { + struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); @@ -2388,10 +2329,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, /* FIXME: old_tss_base == ~0 ? */ - ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc); + ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; - ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc); + ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; @@ -2413,8 +2354,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ - write_segment_descriptor(ctxt, ops, old_tss_sel, - &curr_tss_desc); + write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) @@ -2426,10 +2366,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, old_tss_sel = 0xffff; if (next_tss_desc.type & 8) - ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel, + ret = task_switch_32(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); else - ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel, + ret = task_switch_16(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; @@ -2439,19 +2379,16 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ - write_segment_descriptor(ctxt, ops, tss_selector, - &next_tss_desc); + write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { - struct decode_cache *c = &ctxt->decode; - - c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; - c->lock_prefix = 0; - c->src.val = (unsigned long) error_code; + ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; + ctxt->lock_prefix = 0; + ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } @@ -2462,18 +2399,16 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int reason, bool has_error_code, u32 error_code) { - struct x86_emulate_ops *ops = ctxt->ops; - struct decode_cache *c = &ctxt->decode; int rc; - c->eip = ctxt->eip; - c->dst.type = OP_NONE; + ctxt->_eip = ctxt->eip; + ctxt->dst.type = OP_NONE; - rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, + rc = emulator_do_task_switch(ctxt, tss_selector, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) - ctxt->eip = c->eip; + ctxt->eip = ctxt->_eip; return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } @@ -2481,22 +2416,20 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, int reg, struct operand *op) { - struct decode_cache *c = &ctxt->decode; int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; - register_address_increment(c, &c->regs[reg], df * op->bytes); - op->addr.mem.ea = register_address(c, c->regs[reg]); + register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes); + op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]); op->addr.mem.seg = seg; } static int em_das(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; - al = c->dst.val; + al = ctxt->dst.val; old_al = al; old_cf = cf; @@ -2514,12 +2447,12 @@ static int em_das(struct x86_emulate_ctxt *ctxt) cf = true; } - c->dst.val = al; + ctxt->dst.val = al; /* Set PF, ZF, SF */ - c->src.type = OP_IMM; - c->src.val = 0; - c->src.bytes = 1; - emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); + ctxt->src.type = OP_IMM; + ctxt->src.val = 0; + ctxt->src.bytes = 1; + emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; @@ -2530,175 +2463,189 @@ static int em_das(struct x86_emulate_ctxt *ctxt) static int em_call_far(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; u16 sel, old_cs; ulong old_eip; int rc; old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); - old_eip = c->eip; + old_eip = ctxt->_eip; - memcpy(&sel, c->src.valptr + c->op_bytes, 2); - if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS)) + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); + if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) return X86EMUL_CONTINUE; - c->eip = 0; - memcpy(&c->eip, c->src.valptr, c->op_bytes); + ctxt->_eip = 0; + memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); - c->src.val = old_cs; + ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; - c->src.val = old_eip; + ctxt->src.val = old_eip; return em_push(ctxt); } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc; - c->dst.type = OP_REG; - c->dst.addr.reg = &c->eip; - c->dst.bytes = c->op_bytes; - rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes); + ctxt->dst.type = OP_REG; + ctxt->dst.addr.reg = &ctxt->_eip; + ctxt->dst.bytes = ctxt->op_bytes; + rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val); + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); return X86EMUL_CONTINUE; } static int em_add(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_or(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_adc(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("adc", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_sbb(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("sbb", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_and(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("and", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_sub(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("sub", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_xor(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("xor", ctxt->src, ctxt->dst, ctxt->eflags); return X86EMUL_CONTINUE; } static int em_cmp(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags); /* Disable writeback. */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } -static int em_imul(struct x86_emulate_ctxt *ctxt) +static int em_test(struct x86_emulate_ctxt *ctxt) +{ + emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags); + return X86EMUL_CONTINUE; +} + +static int em_xchg(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; + /* Write back the register source. */ + ctxt->src.val = ctxt->dst.val; + write_register_operand(&ctxt->src); - emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags); + /* Write back the memory destination with implicit LOCK prefix. */ + ctxt->dst.val = ctxt->src.orig_val; + ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } -static int em_imul_3op(struct x86_emulate_ctxt *ctxt) +static int em_imul(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; + emulate_2op_SrcV_nobyte("imul", ctxt->src, ctxt->dst, ctxt->eflags); + return X86EMUL_CONTINUE; +} - c->dst.val = c->src2.val; +static int em_imul_3op(struct x86_emulate_ctxt *ctxt) +{ + ctxt->dst.val = ctxt->src2.val; return em_imul(ctxt); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - c->dst.type = OP_REG; - c->dst.bytes = c->src.bytes; - c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; - c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1); + ctxt->dst.type = OP_REG; + ctxt->dst.bytes = ctxt->src.bytes; + ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX]; + ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); - c->regs[VCPU_REGS_RAX] = (u32)tsc; - c->regs[VCPU_REGS_RDX] = tsc >> 32; + ctxt->regs[VCPU_REGS_RAX] = (u32)tsc; + ctxt->regs[VCPU_REGS_RDX] = tsc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - c->dst.val = c->src.val; + ctxt->dst.val = ctxt->src.val; return X86EMUL_CONTINUE; } +static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) +{ + if (ctxt->modrm_reg > VCPU_SREG_GS) + return emulate_ud(ctxt); + + ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); + return X86EMUL_CONTINUE; +} + +static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) +{ + u16 sel = ctxt->src.val; + + if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) + return emulate_ud(ctxt); + + if (ctxt->modrm_reg == VCPU_SREG_SS) + ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; + + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; + return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); +} + static int em_movdqu(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes); + memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes); return X86EMUL_CONTINUE; } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc; ulong linear; - rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear); + rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } @@ -2714,10 +2661,9 @@ static int em_clts(struct x86_emulate_ctxt *ctxt) static int em_vmcall(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc; - if (c->modrm_mod != 3 || c->modrm_rm != 1) + if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1) return X86EMUL_UNHANDLEABLE; rc = ctxt->ops->fix_hypercall(ctxt); @@ -2725,73 +2671,104 @@ static int em_vmcall(struct x86_emulate_ctxt *ctxt) return rc; /* Let the processor re-execute the fixed hypercall */ - c->eip = ctxt->eip; + ctxt->_eip = ctxt->eip; /* Disable writeback. */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; struct desc_ptr desc_ptr; int rc; - rc = read_descriptor(ctxt, c->src.addr.mem, + rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, - c->op_bytes); + ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->set_gdt(ctxt, &desc_ptr); /* Disable writeback. */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_vmmcall(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; int rc; rc = ctxt->ops->fix_hypercall(ctxt); /* Disable writeback. */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; return rc; } static int em_lidt(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; struct desc_ptr desc_ptr; int rc; - rc = read_descriptor(ctxt, c->src.addr.mem, + rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, - c->op_bytes); + ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_smsw(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - c->dst.bytes = 2; - c->dst.val = ctxt->ops->get_cr(ctxt, 0); + ctxt->dst.bytes = 2; + ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) - | (c->src.val & 0x0f)); - c->dst.type = OP_NONE; + | (ctxt->src.val & 0x0f)); + ctxt->dst.type = OP_NONE; + return X86EMUL_CONTINUE; +} + +static int em_loop(struct x86_emulate_ctxt *ctxt) +{ + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); + if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) && + (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) + jmp_rel(ctxt, ctxt->src.val); + + return X86EMUL_CONTINUE; +} + +static int em_jcxz(struct x86_emulate_ctxt *ctxt) +{ + if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) + jmp_rel(ctxt, ctxt->src.val); + + return X86EMUL_CONTINUE; +} + +static int em_cli(struct x86_emulate_ctxt *ctxt) +{ + if (emulator_bad_iopl(ctxt)) + return emulate_gp(ctxt, 0); + + ctxt->eflags &= ~X86_EFLAGS_IF; + return X86EMUL_CONTINUE; +} + +static int em_sti(struct x86_emulate_ctxt *ctxt) +{ + if (emulator_bad_iopl(ctxt)) + return emulate_gp(ctxt, 0); + + ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; + ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } @@ -2809,9 +2786,7 @@ static bool valid_cr(int nr) static int check_cr_read(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - if (!valid_cr(c->modrm_reg)) + if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; @@ -2819,9 +2794,8 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt) static int check_cr_write(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - u64 new_val = c->src.val64; - int cr = c->modrm_reg; + u64 new_val = ctxt->src.val64; + int cr = ctxt->modrm_reg; u64 efer = 0; static u64 cr_reserved_bits[] = { @@ -2898,8 +2872,7 @@ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) static int check_dr_read(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - int dr = c->modrm_reg; + int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) @@ -2917,9 +2890,8 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt) static int check_dr_write(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - u64 new_val = c->src.val64; - int dr = c->modrm_reg; + u64 new_val = ctxt->src.val64; + int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); @@ -2941,7 +2913,7 @@ static int check_svme(struct x86_emulate_ctxt *ctxt) static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { - u64 rax = ctxt->decode.regs[VCPU_REGS_RAX]; + u64 rax = ctxt->regs[VCPU_REGS_RAX]; /* Valid physical address? */ if (rax & 0xffff000000000000ULL) @@ -2963,7 +2935,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt) static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); - u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX]; + u64 rcx = ctxt->regs[VCPU_REGS_RCX]; if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || (rcx > 3)) @@ -2974,10 +2946,8 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt) static int check_perm_in(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - c->dst.bytes = min(c->dst.bytes, 4u); - if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes)) + ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); + if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; @@ -2985,10 +2955,8 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt) static int check_perm_out(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - - c->src.bytes = min(c->src.bytes, 4u); - if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes)) + ctxt->src.bytes = min(ctxt->src.bytes, 4u); + if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; @@ -3165,12 +3133,15 @@ static struct opcode opcode_table[256] = { G(DstMem | SrcImm | ModRM | Group, group1), G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1), G(DstMem | SrcImmByte | ModRM | Group, group1), - D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock), + I2bv(DstMem | SrcReg | ModRM, em_test), + I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), - D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg), - D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A), + I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg), + D(ModRM | SrcMem | NoAccess | DstReg), + I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), + G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ @@ -3184,7 +3155,7 @@ static struct opcode opcode_table[256] = { I2bv(SrcSI | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstDI | String, em_cmp), /* 0xA8 - 0xAF */ - D2bv(DstAcc | SrcImm), + I2bv(DstAcc | SrcImm, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), I2bv(SrcAcc | DstDI | String, em_cmp), @@ -3195,25 +3166,26 @@ static struct opcode opcode_table[256] = { /* 0xC0 - 0xC7 */ D2bv(DstMem | SrcImmByte | ModRM), I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), - D(ImplicitOps | Stack), + I(ImplicitOps | Stack, em_ret), D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ - N, N, N, D(ImplicitOps | Stack), + N, N, N, I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), - D(ImplicitOps | No64), DI(ImplicitOps, iret), + D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ - X4(D(SrcImmByte)), + X3(I(SrcImmByte, em_loop)), + I(SrcImmByte, em_jcxz), D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in), D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out), /* 0xE8 - 0xEF */ D(SrcImm | Stack), D(SrcImm | ImplicitOps), - D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), + I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), D2bvIP(SrcDX | DstAcc, in, check_perm_in), D2bvIP(SrcAcc | DstDX, out, check_perm_out), /* 0xF0 - 0xF7 */ @@ -3221,14 +3193,16 @@ static struct opcode opcode_table[256] = { DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ - D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), + D(ImplicitOps), D(ImplicitOps), + I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, - N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N, + N, I(ImplicitOps | VendorSpecific, em_syscall), + II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM), N, N, /* 0x10 - 0x1F */ @@ -3245,7 +3219,8 @@ static struct opcode twobyte_table[256] = { IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), DI(ImplicitOps | Priv, rdmsr), DIP(ImplicitOps | Priv, rdpmc, check_rdpmc), - D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific), + I(ImplicitOps | VendorSpecific, em_sysenter), + I(ImplicitOps | Priv | VendorSpecific, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ @@ -3313,11 +3288,11 @@ static struct opcode twobyte_table[256] = { #undef I2bv #undef I6ALU -static unsigned imm_size(struct decode_cache *c) +static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; - size = (c->d & ByteOp) ? 1 : c->op_bytes; + size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; @@ -3326,23 +3301,21 @@ static unsigned imm_size(struct decode_cache *c) static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { - struct decode_cache *c = &ctxt->decode; - struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; - op->addr.mem.ea = c->eip; + op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: - op->val = insn_fetch(s8, 1, c->eip); + op->val = insn_fetch(s8, 1, ctxt->_eip); break; case 2: - op->val = insn_fetch(s16, 2, c->eip); + op->val = insn_fetch(s16, 2, ctxt->_eip); break; case 4: - op->val = insn_fetch(s32, 4, c->eip); + op->val = insn_fetch(s32, 4, ctxt->_eip); break; } if (!sign_extension) { @@ -3362,11 +3335,8 @@ done: return rc; } -int -x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) +int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { - struct x86_emulate_ops *ops = ctxt->ops; - struct decode_cache *c = &ctxt->decode; int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; @@ -3374,11 +3344,11 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) struct opcode opcode; struct operand memop = { .type = OP_NONE }, *memopp = NULL; - c->eip = ctxt->eip; - c->fetch.start = c->eip; - c->fetch.end = c->fetch.start + insn_len; + ctxt->_eip = ctxt->eip; + ctxt->fetch.start = ctxt->_eip; + ctxt->fetch.end = ctxt->fetch.start + insn_len; if (insn_len > 0) - memcpy(c->fetch.data, insn, insn_len); + memcpy(ctxt->fetch.data, insn, insn_len); switch (mode) { case X86EMUL_MODE_REAL: @@ -3399,46 +3369,46 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) return -1; } - c->op_bytes = def_op_bytes; - c->ad_bytes = def_ad_bytes; + ctxt->op_bytes = def_op_bytes; + ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { - switch (c->b = insn_fetch(u8, 1, c->eip)) { + switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ - c->op_bytes = def_op_bytes ^ 6; + ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ - c->ad_bytes = def_ad_bytes ^ 12; + ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ - c->ad_bytes = def_ad_bytes ^ 6; + ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ - set_seg_override(c, (c->b >> 3) & 3); + set_seg_override(ctxt, (ctxt->b >> 3) & 3); break; case 0x64: /* FS override */ case 0x65: /* GS override */ - set_seg_override(c, c->b & 7); + set_seg_override(ctxt, ctxt->b & 7); break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; - c->rex_prefix = c->b; + ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ - c->lock_prefix = 1; + ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ - c->rep_prefix = c->b; + ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; @@ -3446,50 +3416,50 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) /* Any legacy prefix after a REX prefix nullifies its effect. */ - c->rex_prefix = 0; + ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ - if (c->rex_prefix & 8) - c->op_bytes = 8; /* REX.W */ + if (ctxt->rex_prefix & 8) + ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ - opcode = opcode_table[c->b]; + opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ - if (c->b == 0x0f) { - c->twobyte = 1; - c->b = insn_fetch(u8, 1, c->eip); - opcode = twobyte_table[c->b]; + if (ctxt->b == 0x0f) { + ctxt->twobyte = 1; + ctxt->b = insn_fetch(u8, 1, ctxt->_eip); + opcode = twobyte_table[ctxt->b]; } - c->d = opcode.flags; + ctxt->d = opcode.flags; - while (c->d & GroupMask) { - switch (c->d & GroupMask) { + while (ctxt->d & GroupMask) { + switch (ctxt->d & GroupMask) { case Group: - c->modrm = insn_fetch(u8, 1, c->eip); - --c->eip; - goffset = (c->modrm >> 3) & 7; + ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip); + --ctxt->_eip; + goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: - c->modrm = insn_fetch(u8, 1, c->eip); - --c->eip; - goffset = (c->modrm >> 3) & 7; - if ((c->modrm >> 6) == 3) + ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip); + --ctxt->_eip; + goffset = (ctxt->modrm >> 3) & 7; + if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: - goffset = c->modrm & 7; + goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: - if (c->rep_prefix && op_prefix) + if (ctxt->rep_prefix && op_prefix) return X86EMUL_UNHANDLEABLE; - simd_prefix = op_prefix ? 0x66 : c->rep_prefix; + simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; @@ -3501,61 +3471,61 @@ done_prefixes: return X86EMUL_UNHANDLEABLE; } - c->d &= ~GroupMask; - c->d |= opcode.flags; + ctxt->d &= ~GroupMask; + ctxt->d |= opcode.flags; } - c->execute = opcode.u.execute; - c->check_perm = opcode.check_perm; - c->intercept = opcode.intercept; + ctxt->execute = opcode.u.execute; + ctxt->check_perm = opcode.check_perm; + ctxt->intercept = opcode.intercept; /* Unrecognised? */ - if (c->d == 0 || (c->d & Undefined)) + if (ctxt->d == 0 || (ctxt->d & Undefined)) return -1; - if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn) + if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn) return -1; - if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack)) - c->op_bytes = 8; + if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) + ctxt->op_bytes = 8; - if (c->d & Op3264) { + if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) - c->op_bytes = 8; + ctxt->op_bytes = 8; else - c->op_bytes = 4; + ctxt->op_bytes = 4; } - if (c->d & Sse) - c->op_bytes = 16; + if (ctxt->d & Sse) + ctxt->op_bytes = 16; /* ModRM and SIB bytes. */ - if (c->d & ModRM) { - rc = decode_modrm(ctxt, ops, &memop); - if (!c->has_seg_override) - set_seg_override(c, c->modrm_seg); - } else if (c->d & MemAbs) - rc = decode_abs(ctxt, ops, &memop); + if (ctxt->d & ModRM) { + rc = decode_modrm(ctxt, &memop); + if (!ctxt->has_seg_override) + set_seg_override(ctxt, ctxt->modrm_seg); + } else if (ctxt->d & MemAbs) + rc = decode_abs(ctxt, &memop); if (rc != X86EMUL_CONTINUE) goto done; - if (!c->has_seg_override) - set_seg_override(c, VCPU_SREG_DS); + if (!ctxt->has_seg_override) + set_seg_override(ctxt, VCPU_SREG_DS); - memop.addr.mem.seg = seg_override(ctxt, c); + memop.addr.mem.seg = seg_override(ctxt); - if (memop.type == OP_MEM && c->ad_bytes != 8) + if (memop.type == OP_MEM && ctxt->ad_bytes != 8) memop.addr.mem.ea = (u32)memop.addr.mem.ea; /* * Decode and fetch the source operand: register, memory * or immediate. */ - switch (c->d & SrcMask) { + switch (ctxt->d & SrcMask) { case SrcNone: break; case SrcReg: - decode_register_operand(ctxt, &c->src, c, 0); + decode_register_operand(ctxt, &ctxt->src, 0); break; case SrcMem16: memop.bytes = 2; @@ -3564,60 +3534,60 @@ done_prefixes: memop.bytes = 4; goto srcmem_common; case SrcMem: - memop.bytes = (c->d & ByteOp) ? 1 : - c->op_bytes; + memop.bytes = (ctxt->d & ByteOp) ? 1 : + ctxt->op_bytes; srcmem_common: - c->src = memop; - memopp = &c->src; + ctxt->src = memop; + memopp = &ctxt->src; break; case SrcImmU16: - rc = decode_imm(ctxt, &c->src, 2, false); + rc = decode_imm(ctxt, &ctxt->src, 2, false); break; case SrcImm: - rc = decode_imm(ctxt, &c->src, imm_size(c), true); + rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true); break; case SrcImmU: - rc = decode_imm(ctxt, &c->src, imm_size(c), false); + rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false); break; case SrcImmByte: - rc = decode_imm(ctxt, &c->src, 1, true); + rc = decode_imm(ctxt, &ctxt->src, 1, true); break; case SrcImmUByte: - rc = decode_imm(ctxt, &c->src, 1, false); + rc = decode_imm(ctxt, &ctxt->src, 1, false); break; case SrcAcc: - c->src.type = OP_REG; - c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->src.addr.reg = &c->regs[VCPU_REGS_RAX]; - fetch_register_operand(&c->src); + ctxt->src.type = OP_REG; + ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX]; + fetch_register_operand(&ctxt->src); break; case SrcOne: - c->src.bytes = 1; - c->src.val = 1; + ctxt->src.bytes = 1; + ctxt->src.val = 1; break; case SrcSI: - c->src.type = OP_MEM; - c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->src.addr.mem.ea = - register_address(c, c->regs[VCPU_REGS_RSI]); - c->src.addr.mem.seg = seg_override(ctxt, c); - c->src.val = 0; + ctxt->src.type = OP_MEM; + ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + ctxt->src.addr.mem.ea = + register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]); + ctxt->src.addr.mem.seg = seg_override(ctxt); + ctxt->src.val = 0; break; case SrcImmFAddr: - c->src.type = OP_IMM; - c->src.addr.mem.ea = c->eip; - c->src.bytes = c->op_bytes + 2; - insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); + ctxt->src.type = OP_IMM; + ctxt->src.addr.mem.ea = ctxt->_eip; + ctxt->src.bytes = ctxt->op_bytes + 2; + insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip); break; case SrcMemFAddr: - memop.bytes = c->op_bytes + 2; + memop.bytes = ctxt->op_bytes + 2; goto srcmem_common; break; case SrcDX: - c->src.type = OP_REG; - c->src.bytes = 2; - c->src.addr.reg = &c->regs[VCPU_REGS_RDX]; - fetch_register_operand(&c->src); + ctxt->src.type = OP_REG; + ctxt->src.bytes = 2; + ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX]; + fetch_register_operand(&ctxt->src); break; } @@ -3628,22 +3598,22 @@ done_prefixes: * Decode and fetch the second source operand: register, memory * or immediate. */ - switch (c->d & Src2Mask) { + switch (ctxt->d & Src2Mask) { case Src2None: break; case Src2CL: - c->src2.bytes = 1; - c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8; + ctxt->src2.bytes = 1; + ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8; break; case Src2ImmByte: - rc = decode_imm(ctxt, &c->src2, 1, true); + rc = decode_imm(ctxt, &ctxt->src2, 1, true); break; case Src2One: - c->src2.bytes = 1; - c->src2.val = 1; + ctxt->src2.bytes = 1; + ctxt->src2.val = 1; break; case Src2Imm: - rc = decode_imm(ctxt, &c->src2, imm_size(c), true); + rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true); break; } @@ -3651,68 +3621,66 @@ done_prefixes: goto done; /* Decode and fetch the destination operand: register or memory. */ - switch (c->d & DstMask) { + switch (ctxt->d & DstMask) { case DstReg: - decode_register_operand(ctxt, &c->dst, c, - c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); + decode_register_operand(ctxt, &ctxt->dst, + ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7)); break; case DstImmUByte: - c->dst.type = OP_IMM; - c->dst.addr.mem.ea = c->eip; - c->dst.bytes = 1; - c->dst.val = insn_fetch(u8, 1, c->eip); + ctxt->dst.type = OP_IMM; + ctxt->dst.addr.mem.ea = ctxt->_eip; + ctxt->dst.bytes = 1; + ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip); break; case DstMem: case DstMem64: - c->dst = memop; - memopp = &c->dst; - if ((c->d & DstMask) == DstMem64) - c->dst.bytes = 8; + ctxt->dst = memop; + memopp = &ctxt->dst; + if ((ctxt->d & DstMask) == DstMem64) + ctxt->dst.bytes = 8; else - c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - if (c->d & BitOp) - fetch_bit_operand(c); - c->dst.orig_val = c->dst.val; + ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + if (ctxt->d & BitOp) + fetch_bit_operand(ctxt); + ctxt->dst.orig_val = ctxt->dst.val; break; case DstAcc: - c->dst.type = OP_REG; - c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->dst.addr.reg = &c->regs[VCPU_REGS_RAX]; - fetch_register_operand(&c->dst); - c->dst.orig_val = c->dst.val; + ctxt->dst.type = OP_REG; + ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX]; + fetch_register_operand(&ctxt->dst); + ctxt->dst.orig_val = ctxt->dst.val; break; case DstDI: - c->dst.type = OP_MEM; - c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->dst.addr.mem.ea = - register_address(c, c->regs[VCPU_REGS_RDI]); - c->dst.addr.mem.seg = VCPU_SREG_ES; - c->dst.val = 0; + ctxt->dst.type = OP_MEM; + ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + ctxt->dst.addr.mem.ea = + register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]); + ctxt->dst.addr.mem.seg = VCPU_SREG_ES; + ctxt->dst.val = 0; break; case DstDX: - c->dst.type = OP_REG; - c->dst.bytes = 2; - c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; - fetch_register_operand(&c->dst); + ctxt->dst.type = OP_REG; + ctxt->dst.bytes = 2; + ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX]; + fetch_register_operand(&ctxt->dst); break; case ImplicitOps: /* Special instructions do their own operand decoding. */ default: - c->dst.type = OP_NONE; /* Disable writeback. */ + ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; } done: - if (memopp && memopp->type == OP_MEM && c->rip_relative) - memopp->addr.mem.ea += c->eip; + if (memopp && memopp->type == OP_MEM && ctxt->rip_relative) + memopp->addr.mem.ea += ctxt->_eip; return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { - struct decode_cache *c = &ctxt->decode; - /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the @@ -3720,304 +3688,232 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ - if (((c->b == 0xa6) || (c->b == 0xa7) || - (c->b == 0xae) || (c->b == 0xaf)) - && (((c->rep_prefix == REPE_PREFIX) && + if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || + (ctxt->b == 0xae) || (ctxt->b == 0xaf)) + && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == 0)) - || ((c->rep_prefix == REPNE_PREFIX) && + || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) return true; return false; } -int -x86_emulate_insn(struct x86_emulate_ctxt *ctxt) +int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { struct x86_emulate_ops *ops = ctxt->ops; u64 msr_data; - struct decode_cache *c = &ctxt->decode; int rc = X86EMUL_CONTINUE; - int saved_dst_type = c->dst.type; - int irq; /* Used for int 3, int, and into */ + int saved_dst_type = ctxt->dst.type; - ctxt->decode.mem_read.pos = 0; + ctxt->mem_read.pos = 0; - if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { + if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) { rc = emulate_ud(ctxt); goto done; } /* LOCK prefix is allowed only with some instructions */ - if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { + if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } - if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) { + if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } - if ((c->d & Sse) + if ((ctxt->d & Sse) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM) || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } - if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { + if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } - if (unlikely(ctxt->guest_mode) && c->intercept) { - rc = emulator_check_intercept(ctxt, c->intercept, + if (unlikely(ctxt->guest_mode) && ctxt->intercept) { + rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Privileged instruction can be executed only in CPL=0 */ - if ((c->d & Priv) && ops->cpl(ctxt)) { + if ((ctxt->d & Priv) && ops->cpl(ctxt)) { rc = emulate_gp(ctxt, 0); goto done; } /* Instruction can only be executed in protected mode */ - if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) { + if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) { rc = emulate_ud(ctxt); goto done; } /* Do instruction specific permission checks */ - if (c->check_perm) { - rc = c->check_perm(ctxt); + if (ctxt->check_perm) { + rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } - if (unlikely(ctxt->guest_mode) && c->intercept) { - rc = emulator_check_intercept(ctxt, c->intercept, + if (unlikely(ctxt->guest_mode) && ctxt->intercept) { + rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } - if (c->rep_prefix && (c->d & String)) { + if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ - if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { - ctxt->eip = c->eip; + if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) { + ctxt->eip = ctxt->_eip; goto done; } } - if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { - rc = segmented_read(ctxt, c->src.addr.mem, - c->src.valptr, c->src.bytes); + if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { + rc = segmented_read(ctxt, ctxt->src.addr.mem, + ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; - c->src.orig_val64 = c->src.val64; + ctxt->src.orig_val64 = ctxt->src.val64; } - if (c->src2.type == OP_MEM) { - rc = segmented_read(ctxt, c->src2.addr.mem, - &c->src2.val, c->src2.bytes); + if (ctxt->src2.type == OP_MEM) { + rc = segmented_read(ctxt, ctxt->src2.addr.mem, + &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } - if ((c->d & DstMask) == ImplicitOps) + if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; - if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { + if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ - rc = segmented_read(ctxt, c->dst.addr.mem, - &c->dst.val, c->dst.bytes); + rc = segmented_read(ctxt, ctxt->dst.addr.mem, + &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } - c->dst.orig_val = c->dst.val; + ctxt->dst.orig_val = ctxt->dst.val; special_insn: - if (unlikely(ctxt->guest_mode) && c->intercept) { - rc = emulator_check_intercept(ctxt, c->intercept, + if (unlikely(ctxt->guest_mode) && ctxt->intercept) { + rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } - if (c->execute) { - rc = c->execute(ctxt); + if (ctxt->execute) { + rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } - if (c->twobyte) + if (ctxt->twobyte) goto twobyte_insn; - switch (c->b) { + switch (ctxt->b) { case 0x06: /* push es */ - rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); + rc = emulate_push_sreg(ctxt, VCPU_SREG_ES); break; case 0x07: /* pop es */ - rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); + rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES); break; case 0x0e: /* push cs */ - rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); + rc = emulate_push_sreg(ctxt, VCPU_SREG_CS); break; case 0x16: /* push ss */ - rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); + rc = emulate_push_sreg(ctxt, VCPU_SREG_SS); break; case 0x17: /* pop ss */ - rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); + rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS); break; case 0x1e: /* push ds */ - rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); + rc = emulate_push_sreg(ctxt, VCPU_SREG_DS); break; case 0x1f: /* pop ds */ - rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); + rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS); break; case 0x40 ... 0x47: /* inc r16/r32 */ - emulate_1op("inc", c->dst, ctxt->eflags); + emulate_1op("inc", ctxt->dst, ctxt->eflags); break; case 0x48 ... 0x4f: /* dec r16/r32 */ - emulate_1op("dec", c->dst, ctxt->eflags); + emulate_1op("dec", ctxt->dst, ctxt->eflags); break; case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; - c->dst.val = (s32) c->src.val; + ctxt->dst.val = (s32) ctxt->src.val; break; case 0x6c: /* insb */ case 0x6d: /* insw/insd */ - c->src.val = c->regs[VCPU_REGS_RDX]; + ctxt->src.val = ctxt->regs[VCPU_REGS_RDX]; goto do_io_in; case 0x6e: /* outsb */ case 0x6f: /* outsw/outsd */ - c->dst.val = c->regs[VCPU_REGS_RDX]; + ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX]; goto do_io_out; break; case 0x70 ... 0x7f: /* jcc (short) */ - if (test_cc(c->b, ctxt->eflags)) - jmp_rel(c, c->src.val); - break; - case 0x84 ... 0x85: - test: - emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); - break; - case 0x86 ... 0x87: /* xchg */ - xchg: - /* Write back the register source. */ - c->src.val = c->dst.val; - write_register_operand(&c->src); - /* - * Write back the memory destination with implicit LOCK - * prefix. - */ - c->dst.val = c->src.orig_val; - c->lock_prefix = 1; - break; - case 0x8c: /* mov r/m, sreg */ - if (c->modrm_reg > VCPU_SREG_GS) { - rc = emulate_ud(ctxt); - goto done; - } - c->dst.val = get_segment_selector(ctxt, c->modrm_reg); + if (test_cc(ctxt->b, ctxt->eflags)) + jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ - c->dst.val = c->src.addr.mem.ea; + ctxt->dst.val = ctxt->src.addr.mem.ea; break; - case 0x8e: { /* mov seg, r/m16 */ - uint16_t sel; - - sel = c->src.val; - - if (c->modrm_reg == VCPU_SREG_CS || - c->modrm_reg > VCPU_SREG_GS) { - rc = emulate_ud(ctxt); - goto done; - } - - if (c->modrm_reg == VCPU_SREG_SS) - ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; - - rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg); - - c->dst.type = OP_NONE; /* Disable writeback. */ - break; - } case 0x8f: /* pop (sole member of Grp1a) */ rc = em_grp1a(ctxt); break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ - if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX]) + if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX]) break; - goto xchg; + rc = em_xchg(ctxt); + break; case 0x98: /* cbw/cwde/cdqe */ - switch (c->op_bytes) { - case 2: c->dst.val = (s8)c->dst.val; break; - case 4: c->dst.val = (s16)c->dst.val; break; - case 8: c->dst.val = (s32)c->dst.val; break; + switch (ctxt->op_bytes) { + case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; + case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; + case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; - case 0xa8 ... 0xa9: /* test ax, imm */ - goto test; case 0xc0 ... 0xc1: rc = em_grp2(ctxt); break; - case 0xc3: /* ret */ - c->dst.type = OP_REG; - c->dst.addr.reg = &c->eip; - c->dst.bytes = c->op_bytes; - rc = em_pop(ctxt); - break; case 0xc4: /* les */ - rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES); + rc = emulate_load_segment(ctxt, VCPU_SREG_ES); break; case 0xc5: /* lds */ - rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS); - break; - case 0xcb: /* ret far */ - rc = emulate_ret_far(ctxt, ops); + rc = emulate_load_segment(ctxt, VCPU_SREG_DS); break; case 0xcc: /* int3 */ - irq = 3; - goto do_interrupt; + rc = emulate_int(ctxt, 3); + break; case 0xcd: /* int n */ - irq = c->src.val; - do_interrupt: - rc = emulate_int(ctxt, ops, irq); + rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ - if (ctxt->eflags & EFLG_OF) { - irq = 4; - goto do_interrupt; - } - break; - case 0xcf: /* iret */ - rc = emulate_iret(ctxt, ops); + if (ctxt->eflags & EFLG_OF) + rc = emulate_int(ctxt, 4); break; case 0xd0 ... 0xd1: /* Grp2 */ rc = em_grp2(ctxt); break; case 0xd2 ... 0xd3: /* Grp2 */ - c->src.val = c->regs[VCPU_REGS_RCX]; + ctxt->src.val = ctxt->regs[VCPU_REGS_RCX]; rc = em_grp2(ctxt); break; - case 0xe0 ... 0xe2: /* loop/loopz/loopnz */ - register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); - if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 && - (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags))) - jmp_rel(c, c->src.val); - break; - case 0xe3: /* jcxz/jecxz/jrcxz */ - if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) - jmp_rel(c, c->src.val); - break; case 0xe4: /* inb */ case 0xe5: /* in */ goto do_io_in; @@ -4025,35 +3921,30 @@ special_insn: case 0xe7: /* out */ goto do_io_out; case 0xe8: /* call (near) */ { - long int rel = c->src.val; - c->src.val = (unsigned long) c->eip; - jmp_rel(c, rel); + long int rel = ctxt->src.val; + ctxt->src.val = (unsigned long) ctxt->_eip; + jmp_rel(ctxt, rel); rc = em_push(ctxt); break; } case 0xe9: /* jmp rel */ - goto jmp; - case 0xea: /* jmp far */ - rc = em_jmp_far(ctxt); - break; - case 0xeb: - jmp: /* jmp rel short */ - jmp_rel(c, c->src.val); - c->dst.type = OP_NONE; /* Disable writeback. */ + case 0xeb: /* jmp rel short */ + jmp_rel(ctxt, ctxt->src.val); + ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xec: /* in al,dx */ case 0xed: /* in (e/r)ax,dx */ do_io_in: - if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, - &c->dst.val)) + if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, + &ctxt->dst.val)) goto done; /* IO is needed */ break; case 0xee: /* out dx,al */ case 0xef: /* out dx,(e/r)ax */ do_io_out: - ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, - &c->src.val, 1); - c->dst.type = OP_NONE; /* Disable writeback. */ + ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, + &ctxt->src.val, 1); + ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); @@ -4071,22 +3962,6 @@ special_insn: case 0xf9: /* stc */ ctxt->eflags |= EFLG_CF; break; - case 0xfa: /* cli */ - if (emulator_bad_iopl(ctxt, ops)) { - rc = emulate_gp(ctxt, 0); - goto done; - } else - ctxt->eflags &= ~X86_EFLAGS_IF; - break; - case 0xfb: /* sti */ - if (emulator_bad_iopl(ctxt, ops)) { - rc = emulate_gp(ctxt, 0); - goto done; - } else { - ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; - ctxt->eflags |= X86_EFLAGS_IF; - } - break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; break; @@ -4115,40 +3990,40 @@ writeback: * restore dst type in case the decoding will be reused * (happens for string instruction ) */ - c->dst.type = saved_dst_type; + ctxt->dst.type = saved_dst_type; - if ((c->d & SrcMask) == SrcSI) - string_addr_inc(ctxt, seg_override(ctxt, c), - VCPU_REGS_RSI, &c->src); + if ((ctxt->d & SrcMask) == SrcSI) + string_addr_inc(ctxt, seg_override(ctxt), + VCPU_REGS_RSI, &ctxt->src); - if ((c->d & DstMask) == DstDI) + if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI, - &c->dst); + &ctxt->dst); - if (c->rep_prefix && (c->d & String)) { - struct read_cache *r = &ctxt->decode.io_read; - register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); + if (ctxt->rep_prefix && (ctxt->d & String)) { + struct read_cache *r = &ctxt->io_read; + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ - if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) && + if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ - ctxt->decode.mem_read.end = 0; + ctxt->mem_read.end = 0; return EMULATION_RESTART; } goto done; /* skip rip writeback */ } } - ctxt->eip = c->eip; + ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) @@ -4159,13 +4034,7 @@ done: return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: - switch (c->b) { - case 0x05: /* syscall */ - rc = emulate_syscall(ctxt, ops); - break; - case 0x06: - rc = em_clts(ctxt); - break; + switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; @@ -4174,21 +4043,21 @@ twobyte_insn: case 0x18: /* Grp16 (prefetch/nop) */ break; case 0x20: /* mov cr, reg */ - c->dst.val = ops->get_cr(ctxt, c->modrm_reg); + ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ - ops->get_dr(ctxt, c->modrm_reg, &c->dst.val); + ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x22: /* mov reg, cr */ - if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) { + if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) { emulate_gp(ctxt, 0); rc = X86EMUL_PROPAGATE_FAULT; goto done; } - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; break; case 0x23: /* mov from reg to dr */ - if (ops->set_dr(ctxt, c->modrm_reg, c->src.val & + if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val & ((ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U)) < 0) { /* #UD condition is already handled by the code above */ @@ -4197,13 +4066,13 @@ twobyte_insn: goto done; } - c->dst.type = OP_NONE; /* no writeback */ + ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x30: /* wrmsr */ - msr_data = (u32)c->regs[VCPU_REGS_RAX] - | ((u64)c->regs[VCPU_REGS_RDX] << 32); - if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) { + msr_data = (u32)ctxt->regs[VCPU_REGS_RAX] + | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32); + if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) { emulate_gp(ctxt, 0); rc = X86EMUL_PROPAGATE_FAULT; goto done; @@ -4212,64 +4081,58 @@ twobyte_insn: break; case 0x32: /* rdmsr */ - if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) { + if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) { emulate_gp(ctxt, 0); rc = X86EMUL_PROPAGATE_FAULT; goto done; } else { - c->regs[VCPU_REGS_RAX] = (u32)msr_data; - c->regs[VCPU_REGS_RDX] = msr_data >> 32; + ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data; + ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32; } rc = X86EMUL_CONTINUE; break; - case 0x34: /* sysenter */ - rc = emulate_sysenter(ctxt, ops); - break; - case 0x35: /* sysexit */ - rc = emulate_sysexit(ctxt, ops); - break; case 0x40 ... 0x4f: /* cmov */ - c->dst.val = c->dst.orig_val = c->src.val; - if (!test_cc(c->b, ctxt->eflags)) - c->dst.type = OP_NONE; /* no writeback */ + ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val; + if (!test_cc(ctxt->b, ctxt->eflags)) + ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ - if (test_cc(c->b, ctxt->eflags)) - jmp_rel(c, c->src.val); + if (test_cc(ctxt->b, ctxt->eflags)) + jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ - c->dst.val = test_cc(c->b, ctxt->eflags); + ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xa0: /* push fs */ - rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); + rc = emulate_push_sreg(ctxt, VCPU_SREG_FS); break; case 0xa1: /* pop fs */ - rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); + rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS); break; case 0xa3: bt: /* bt */ - c->dst.type = OP_NONE; + ctxt->dst.type = OP_NONE; /* only subword offset */ - c->src.val &= (c->dst.bytes << 3) - 1; - emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags); + ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; + emulate_2op_SrcV_nobyte("bt", ctxt->src, ctxt->dst, ctxt->eflags); break; case 0xa4: /* shld imm8, r, r/m */ case 0xa5: /* shld cl, r, r/m */ - emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); + emulate_2op_cl("shld", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags); break; case 0xa8: /* push gs */ - rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); + rc = emulate_push_sreg(ctxt, VCPU_SREG_GS); break; case 0xa9: /* pop gs */ - rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); + rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS); break; case 0xab: bts: /* bts */ - emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV_nobyte("bts", ctxt->src, ctxt->dst, ctxt->eflags); break; case 0xac: /* shrd imm8, r, r/m */ case 0xad: /* shrd cl, r, r/m */ - emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags); + emulate_2op_cl("shrd", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags); break; case 0xae: /* clflush */ break; @@ -4278,38 +4141,38 @@ twobyte_insn: * Save real source value, then compare EAX against * destination. */ - c->src.orig_val = c->src.val; - c->src.val = c->regs[VCPU_REGS_RAX]; - emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); + ctxt->src.orig_val = ctxt->src.val; + ctxt->src.val = ctxt->regs[VCPU_REGS_RAX]; + emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags); if (ctxt->eflags & EFLG_ZF) { /* Success: write back to memory. */ - c->dst.val = c->src.orig_val; + ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ - c->dst.type = OP_REG; - c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX]; + ctxt->dst.type = OP_REG; + ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX]; } break; case 0xb2: /* lss */ - rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS); + rc = emulate_load_segment(ctxt, VCPU_SREG_SS); break; case 0xb3: btr: /* btr */ - emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV_nobyte("btr", ctxt->src, ctxt->dst, ctxt->eflags); break; case 0xb4: /* lfs */ - rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS); + rc = emulate_load_segment(ctxt, VCPU_SREG_FS); break; case 0xb5: /* lgs */ - rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS); + rc = emulate_load_segment(ctxt, VCPU_SREG_GS); break; case 0xb6 ... 0xb7: /* movzx */ - c->dst.bytes = c->op_bytes; - c->dst.val = (c->d & ByteOp) ? (u8) c->src.val - : (u16) c->src.val; + ctxt->dst.bytes = ctxt->op_bytes; + ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val + : (u16) ctxt->src.val; break; case 0xba: /* Grp8 */ - switch (c->modrm_reg & 3) { + switch (ctxt->modrm_reg & 3) { case 0: goto bt; case 1: @@ -4322,47 +4185,47 @@ twobyte_insn: break; case 0xbb: btc: /* btc */ - emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV_nobyte("btc", ctxt->src, ctxt->dst, ctxt->eflags); break; case 0xbc: { /* bsf */ u8 zf; __asm__ ("bsf %2, %0; setz %1" - : "=r"(c->dst.val), "=q"(zf) - : "r"(c->src.val)); + : "=r"(ctxt->dst.val), "=q"(zf) + : "r"(ctxt->src.val)); ctxt->eflags &= ~X86_EFLAGS_ZF; if (zf) { ctxt->eflags |= X86_EFLAGS_ZF; - c->dst.type = OP_NONE; /* Disable writeback. */ + ctxt->dst.type = OP_NONE; /* Disable writeback. */ } break; } case 0xbd: { /* bsr */ u8 zf; __asm__ ("bsr %2, %0; setz %1" - : "=r"(c->dst.val), "=q"(zf) - : "r"(c->src.val)); + : "=r"(ctxt->dst.val), "=q"(zf) + : "r"(ctxt->src.val)); ctxt->eflags &= ~X86_EFLAGS_ZF; if (zf) { ctxt->eflags |= X86_EFLAGS_ZF; - c->dst.type = OP_NONE; /* Disable writeback. */ + ctxt->dst.type = OP_NONE; /* Disable writeback. */ } break; } case 0xbe ... 0xbf: /* movsx */ - c->dst.bytes = c->op_bytes; - c->dst.val = (c->d & ByteOp) ? (s8) c->src.val : - (s16) c->src.val; + ctxt->dst.bytes = ctxt->op_bytes; + ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val : + (s16) ctxt->src.val; break; case 0xc0 ... 0xc1: /* xadd */ - emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); + emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags); /* Write back the register source. */ - c->src.val = c->dst.orig_val; - write_register_operand(&c->src); + ctxt->src.val = ctxt->dst.orig_val; + write_register_operand(&ctxt->src); break; case 0xc3: /* movnti */ - c->dst.bytes = c->op_bytes; - c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val : - (u64) c->src.val; + ctxt->dst.bytes = ctxt->op_bytes; + ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val : + (u64) ctxt->src.val; break; case 0xc7: /* Grp9 (cmpxchg8b) */ rc = em_grp9(ctxt); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index aee3862..9335e1b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -148,7 +148,7 @@ module_param(oos_shadow, bool, 0644); #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ | PT64_NX_MASK) -#define RMAP_EXT 4 +#define PTE_LIST_EXT 4 #define ACC_EXEC_MASK 1 #define ACC_WRITE_MASK PT_WRITABLE_MASK @@ -164,16 +164,16 @@ module_param(oos_shadow, bool, 0644); #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) -struct kvm_rmap_desc { - u64 *sptes[RMAP_EXT]; - struct kvm_rmap_desc *more; +struct pte_list_desc { + u64 *sptes[PTE_LIST_EXT]; + struct pte_list_desc *more; }; struct kvm_shadow_walk_iterator { u64 addr; hpa_t shadow_addr; - int level; u64 *sptep; + int level; unsigned index; }; @@ -182,32 +182,68 @@ struct kvm_shadow_walk_iterator { shadow_walk_okay(&(_walker)); \ shadow_walk_next(&(_walker))) -typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); +#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)) && \ + ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ + __shadow_walk_next(&(_walker), spte)) -static struct kmem_cache *pte_chain_cache; -static struct kmem_cache *rmap_desc_cache; +static struct kmem_cache *pte_list_desc_cache; static struct kmem_cache *mmu_page_header_cache; static struct percpu_counter kvm_total_used_mmu_pages; -static u64 __read_mostly shadow_trap_nonpresent_pte; -static u64 __read_mostly shadow_notrap_nonpresent_pte; static u64 __read_mostly shadow_nx_mask; static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_dirty_mask; +static u64 __read_mostly shadow_mmio_mask; -static inline u64 rsvd_bits(int s, int e) +static void mmu_spte_set(u64 *sptep, u64 spte); + +void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) { - return ((1ULL << (e - s + 1)) - 1) << s; + shadow_mmio_mask = mmio_mask; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); + +static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access) +{ + access &= ACC_WRITE_MASK | ACC_USER_MASK; + + trace_mark_mmio_spte(sptep, gfn, access); + mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT); } -void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) +static bool is_mmio_spte(u64 spte) { - shadow_trap_nonpresent_pte = trap_pte; - shadow_notrap_nonpresent_pte = notrap_pte; + return (spte & shadow_mmio_mask) == shadow_mmio_mask; +} + +static gfn_t get_mmio_spte_gfn(u64 spte) +{ + return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT; +} + +static unsigned get_mmio_spte_access(u64 spte) +{ + return (spte & ~shadow_mmio_mask) & ~PAGE_MASK; +} + +static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) +{ + if (unlikely(is_noslot_pfn(pfn))) { + mark_mmio_spte(sptep, gfn, access); + return true; + } + + return false; +} + +static inline u64 rsvd_bits(int s, int e) +{ + return ((1ULL << (e - s + 1)) - 1) << s; } -EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask) @@ -220,11 +256,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); -static bool is_write_protection(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr0_bits(vcpu, X86_CR0_WP); -} - static int is_cpuid_PSE36(void) { return 1; @@ -237,8 +268,7 @@ static int is_nx(struct kvm_vcpu *vcpu) static int is_shadow_present_pte(u64 pte) { - return pte != shadow_trap_nonpresent_pte - && pte != shadow_notrap_nonpresent_pte; + return pte & PT_PRESENT_MASK && !is_mmio_spte(pte); } static int is_large_pte(u64 pte) @@ -246,11 +276,6 @@ static int is_large_pte(u64 pte) return pte & PT_PAGE_SIZE_MASK; } -static int is_writable_pte(unsigned long pte) -{ - return pte & PT_WRITABLE_MASK; -} - static int is_dirty_gpte(unsigned long pte) { return pte & PT_DIRTY_MASK; @@ -282,26 +307,154 @@ static gfn_t pse36_gfn_delta(u32 gpte) return (gpte & PT32_DIR_PSE36_MASK) << shift; } +#ifdef CONFIG_X86_64 static void __set_spte(u64 *sptep, u64 spte) { - set_64bit(sptep, spte); + *sptep = spte; } -static u64 __xchg_spte(u64 *sptep, u64 new_spte) +static void __update_clear_spte_fast(u64 *sptep, u64 spte) { -#ifdef CONFIG_X86_64 - return xchg(sptep, new_spte); + *sptep = spte; +} + +static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) +{ + return xchg(sptep, spte); +} + +static u64 __get_spte_lockless(u64 *sptep) +{ + return ACCESS_ONCE(*sptep); +} + +static bool __check_direct_spte_mmio_pf(u64 spte) +{ + /* It is valid if the spte is zapped. */ + return spte == 0ull; +} #else - u64 old_spte; +union split_spte { + struct { + u32 spte_low; + u32 spte_high; + }; + u64 spte; +}; - do { - old_spte = *sptep; - } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte); +static void count_spte_clear(u64 *sptep, u64 spte) +{ + struct kvm_mmu_page *sp = page_header(__pa(sptep)); - return old_spte; -#endif + if (is_shadow_present_pte(spte)) + return; + + /* Ensure the spte is completely set before we increase the count */ + smp_wmb(); + sp->clear_spte_count++; +} + +static void __set_spte(u64 *sptep, u64 spte) +{ + union split_spte *ssptep, sspte; + + ssptep = (union split_spte *)sptep; + sspte = (union split_spte)spte; + + ssptep->spte_high = sspte.spte_high; + + /* + * If we map the spte from nonpresent to present, We should store + * the high bits firstly, then set present bit, so cpu can not + * fetch this spte while we are setting the spte. + */ + smp_wmb(); + + ssptep->spte_low = sspte.spte_low; } +static void __update_clear_spte_fast(u64 *sptep, u64 spte) +{ + union split_spte *ssptep, sspte; + + ssptep = (union split_spte *)sptep; + sspte = (union split_spte)spte; + + ssptep->spte_low = sspte.spte_low; + + /* + * If we map the spte from present to nonpresent, we should clear + * present bit firstly to avoid vcpu fetch the old high bits. + */ + smp_wmb(); + + ssptep->spte_high = sspte.spte_high; + count_spte_clear(sptep, spte); +} + +static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) +{ + union split_spte *ssptep, sspte, orig; + + ssptep = (union split_spte *)sptep; + sspte = (union split_spte)spte; + + /* xchg acts as a barrier before the setting of the high bits */ + orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); + orig.spte_high = ssptep->spte_high = sspte.spte_high; + count_spte_clear(sptep, spte); + + return orig.spte; +} + +/* + * The idea using the light way get the spte on x86_32 guest is from + * gup_get_pte(arch/x86/mm/gup.c). + * The difference is we can not catch the spte tlb flush if we leave + * guest mode, so we emulate it by increase clear_spte_count when spte + * is cleared. + */ +static u64 __get_spte_lockless(u64 *sptep) +{ + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + union split_spte spte, *orig = (union split_spte *)sptep; + int count; + +retry: + count = sp->clear_spte_count; + smp_rmb(); + + spte.spte_low = orig->spte_low; + smp_rmb(); + + spte.spte_high = orig->spte_high; + smp_rmb(); + + if (unlikely(spte.spte_low != orig->spte_low || + count != sp->clear_spte_count)) + goto retry; + + return spte.spte; +} + +static bool __check_direct_spte_mmio_pf(u64 spte) +{ + union split_spte sspte = (union split_spte)spte; + u32 high_mmio_mask = shadow_mmio_mask >> 32; + + /* It is valid if the spte is zapped. */ + if (spte == 0ull) + return true; + + /* It is valid if the spte is being zapped. */ + if (sspte.spte_low == 0ull && + (sspte.spte_high & high_mmio_mask) == high_mmio_mask) + return true; + + return false; +} +#endif + static bool spte_has_volatile_bits(u64 spte) { if (!shadow_accessed_mask) @@ -322,12 +475,30 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) return (old_spte & bit_mask) && !(new_spte & bit_mask); } -static void update_spte(u64 *sptep, u64 new_spte) +/* Rules for using mmu_spte_set: + * Set the sptep from nonpresent to present. + * Note: the sptep being assigned *must* be either not present + * or in a state where the hardware will not attempt to update + * the spte. + */ +static void mmu_spte_set(u64 *sptep, u64 new_spte) +{ + WARN_ON(is_shadow_present_pte(*sptep)); + __set_spte(sptep, new_spte); +} + +/* Rules for using mmu_spte_update: + * Update the state bits, it means the mapped pfn is not changged. + */ +static void mmu_spte_update(u64 *sptep, u64 new_spte) { u64 mask, old_spte = *sptep; WARN_ON(!is_rmap_spte(new_spte)); + if (!is_shadow_present_pte(old_spte)) + return mmu_spte_set(sptep, new_spte); + new_spte |= old_spte & shadow_dirty_mask; mask = shadow_accessed_mask; @@ -335,9 +506,9 @@ static void update_spte(u64 *sptep, u64 new_spte) mask |= shadow_dirty_mask; if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) - __set_spte(sptep, new_spte); + __update_clear_spte_fast(sptep, new_spte); else - old_spte = __xchg_spte(sptep, new_spte); + old_spte = __update_clear_spte_slow(sptep, new_spte); if (!shadow_accessed_mask) return; @@ -348,6 +519,64 @@ static void update_spte(u64 *sptep, u64 new_spte) kvm_set_pfn_dirty(spte_to_pfn(old_spte)); } +/* + * Rules for using mmu_spte_clear_track_bits: + * It sets the sptep from present to nonpresent, and track the + * state bits, it is used to clear the last level sptep. + */ +static int mmu_spte_clear_track_bits(u64 *sptep) +{ + pfn_t pfn; + u64 old_spte = *sptep; + + if (!spte_has_volatile_bits(old_spte)) + __update_clear_spte_fast(sptep, 0ull); + else + old_spte = __update_clear_spte_slow(sptep, 0ull); + + if (!is_rmap_spte(old_spte)) + return 0; + + pfn = spte_to_pfn(old_spte); + if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) + kvm_set_pfn_accessed(pfn); + if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) + kvm_set_pfn_dirty(pfn); + return 1; +} + +/* + * Rules for using mmu_spte_clear_no_track: + * Directly clear spte without caring the state bits of sptep, + * it is used to set the upper level spte. + */ +static void mmu_spte_clear_no_track(u64 *sptep) +{ + __update_clear_spte_fast(sptep, 0ull); +} + +static u64 mmu_spte_get_lockless(u64 *sptep) +{ + return __get_spte_lockless(sptep); +} + +static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) +{ + rcu_read_lock(); + atomic_inc(&vcpu->kvm->arch.reader_counter); + + /* Increase the counter before walking shadow page table */ + smp_mb__after_atomic_inc(); +} + +static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) +{ + /* Decrease the counter after walking shadow page table finished */ + smp_mb__before_atomic_dec(); + atomic_dec(&vcpu->kvm->arch.reader_counter); + rcu_read_unlock(); +} + static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, struct kmem_cache *base_cache, int min) { @@ -397,12 +626,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) { int r; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, - pte_chain_cache, 4); - if (r) - goto out; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, - rmap_desc_cache, 4 + PTE_PREFETCH_NUM); + r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); if (r) goto out; r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); @@ -416,8 +641,8 @@ out: static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache); - mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache); + mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, + pte_list_desc_cache); mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, mmu_page_header_cache); @@ -433,26 +658,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, return p; } -static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) -{ - return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache, - sizeof(struct kvm_pte_chain)); -} - -static void mmu_free_pte_chain(struct kvm_pte_chain *pc) +static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) { - kmem_cache_free(pte_chain_cache, pc); + return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, + sizeof(struct pte_list_desc)); } -static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) +static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) { - return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache, - sizeof(struct kvm_rmap_desc)); -} - -static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) -{ - kmem_cache_free(rmap_desc_cache, rd); + kmem_cache_free(pte_list_desc_cache, pte_list_desc); } static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) @@ -498,6 +712,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) linfo = lpage_info_slot(gfn, slot, i); linfo->write_count += 1; } + kvm->arch.indirect_shadow_pages++; } static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) @@ -513,6 +728,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) linfo->write_count -= 1; WARN_ON(linfo->write_count < 0); } + kvm->arch.indirect_shadow_pages--; } static int has_wrprotected_page(struct kvm *kvm, @@ -588,67 +804,42 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) } /* - * Take gfn and return the reverse mapping to it. - */ - -static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) -{ - struct kvm_memory_slot *slot; - struct kvm_lpage_info *linfo; - - slot = gfn_to_memslot(kvm, gfn); - if (likely(level == PT_PAGE_TABLE_LEVEL)) - return &slot->rmap[gfn - slot->base_gfn]; - - linfo = lpage_info_slot(gfn, slot, level); - - return &linfo->rmap_pde; -} - -/* - * Reverse mapping data structures: + * Pte mapping structures: * - * If rmapp bit zero is zero, then rmapp point to the shadw page table entry - * that points to page_address(page). + * If pte_list bit zero is zero, then pte_list point to the spte. * - * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc - * containing more mappings. + * If pte_list bit zero is one, (then pte_list & ~1) points to a struct + * pte_list_desc containing more mappings. * - * Returns the number of rmap entries before the spte was added or zero if + * Returns the number of pte entries before the spte was added or zero if * the spte was not added. * */ -static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) +static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, + unsigned long *pte_list) { - struct kvm_mmu_page *sp; - struct kvm_rmap_desc *desc; - unsigned long *rmapp; + struct pte_list_desc *desc; int i, count = 0; - if (!is_rmap_spte(*spte)) - return count; - sp = page_header(__pa(spte)); - kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); - rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); - if (!*rmapp) { - rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); - *rmapp = (unsigned long)spte; - } else if (!(*rmapp & 1)) { - rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); - desc = mmu_alloc_rmap_desc(vcpu); - desc->sptes[0] = (u64 *)*rmapp; + if (!*pte_list) { + rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); + *pte_list = (unsigned long)spte; + } else if (!(*pte_list & 1)) { + rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); + desc = mmu_alloc_pte_list_desc(vcpu); + desc->sptes[0] = (u64 *)*pte_list; desc->sptes[1] = spte; - *rmapp = (unsigned long)desc | 1; + *pte_list = (unsigned long)desc | 1; ++count; } else { - rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); - desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); - while (desc->sptes[RMAP_EXT-1] && desc->more) { + rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); + desc = (struct pte_list_desc *)(*pte_list & ~1ul); + while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { desc = desc->more; - count += RMAP_EXT; + count += PTE_LIST_EXT; } - if (desc->sptes[RMAP_EXT-1]) { - desc->more = mmu_alloc_rmap_desc(vcpu); + if (desc->sptes[PTE_LIST_EXT-1]) { + desc->more = mmu_alloc_pte_list_desc(vcpu); desc = desc->more; } for (i = 0; desc->sptes[i]; ++i) @@ -658,59 +849,78 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) return count; } -static void rmap_desc_remove_entry(unsigned long *rmapp, - struct kvm_rmap_desc *desc, - int i, - struct kvm_rmap_desc *prev_desc) +static u64 *pte_list_next(unsigned long *pte_list, u64 *spte) +{ + struct pte_list_desc *desc; + u64 *prev_spte; + int i; + + if (!*pte_list) + return NULL; + else if (!(*pte_list & 1)) { + if (!spte) + return (u64 *)*pte_list; + return NULL; + } + desc = (struct pte_list_desc *)(*pte_list & ~1ul); + prev_spte = NULL; + while (desc) { + for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { + if (prev_spte == spte) + return desc->sptes[i]; + prev_spte = desc->sptes[i]; + } + desc = desc->more; + } + return NULL; +} + +static void +pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, + int i, struct pte_list_desc *prev_desc) { int j; - for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j) + for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) ; desc->sptes[i] = desc->sptes[j]; desc->sptes[j] = NULL; if (j != 0) return; if (!prev_desc && !desc->more) - *rmapp = (unsigned long)desc->sptes[0]; + *pte_list = (unsigned long)desc->sptes[0]; else if (prev_desc) prev_desc->more = desc->more; else - *rmapp = (unsigned long)desc->more | 1; - mmu_free_rmap_desc(desc); + *pte_list = (unsigned long)desc->more | 1; + mmu_free_pte_list_desc(desc); } -static void rmap_remove(struct kvm *kvm, u64 *spte) +static void pte_list_remove(u64 *spte, unsigned long *pte_list) { - struct kvm_rmap_desc *desc; - struct kvm_rmap_desc *prev_desc; - struct kvm_mmu_page *sp; - gfn_t gfn; - unsigned long *rmapp; + struct pte_list_desc *desc; + struct pte_list_desc *prev_desc; int i; - sp = page_header(__pa(spte)); - gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); - rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); - if (!*rmapp) { - printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte); + if (!*pte_list) { + printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); BUG(); - } else if (!(*rmapp & 1)) { - rmap_printk("rmap_remove: %p 1->0\n", spte); - if ((u64 *)*rmapp != spte) { - printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte); + } else if (!(*pte_list & 1)) { + rmap_printk("pte_list_remove: %p 1->0\n", spte); + if ((u64 *)*pte_list != spte) { + printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); BUG(); } - *rmapp = 0; + *pte_list = 0; } else { - rmap_printk("rmap_remove: %p many->many\n", spte); - desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); + rmap_printk("pte_list_remove: %p many->many\n", spte); + desc = (struct pte_list_desc *)(*pte_list & ~1ul); prev_desc = NULL; while (desc) { - for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) + for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) if (desc->sptes[i] == spte) { - rmap_desc_remove_entry(rmapp, + pte_list_desc_remove_entry(pte_list, desc, i, prev_desc); return; @@ -718,62 +928,80 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) prev_desc = desc; desc = desc->more; } - pr_err("rmap_remove: %p many->many\n", spte); + pr_err("pte_list_remove: %p many->many\n", spte); BUG(); } } -static int set_spte_track_bits(u64 *sptep, u64 new_spte) +typedef void (*pte_list_walk_fn) (u64 *spte); +static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) { - pfn_t pfn; - u64 old_spte = *sptep; + struct pte_list_desc *desc; + int i; - if (!spte_has_volatile_bits(old_spte)) - __set_spte(sptep, new_spte); - else - old_spte = __xchg_spte(sptep, new_spte); + if (!*pte_list) + return; - if (!is_rmap_spte(old_spte)) - return 0; + if (!(*pte_list & 1)) + return fn((u64 *)*pte_list); - pfn = spte_to_pfn(old_spte); - if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) - kvm_set_pfn_accessed(pfn); - if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) - kvm_set_pfn_dirty(pfn); - return 1; + desc = (struct pte_list_desc *)(*pte_list & ~1ul); + while (desc) { + for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) + fn(desc->sptes[i]); + desc = desc->more; + } } -static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) +/* + * Take gfn and return the reverse mapping to it. + */ +static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) { - if (set_spte_track_bits(sptep, new_spte)) - rmap_remove(kvm, sptep); + struct kvm_memory_slot *slot; + struct kvm_lpage_info *linfo; + + slot = gfn_to_memslot(kvm, gfn); + if (likely(level == PT_PAGE_TABLE_LEVEL)) + return &slot->rmap[gfn - slot->base_gfn]; + + linfo = lpage_info_slot(gfn, slot, level); + + return &linfo->rmap_pde; +} + +static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) +{ + struct kvm_mmu_page *sp; + unsigned long *rmapp; + + sp = page_header(__pa(spte)); + kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); + rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); + return pte_list_add(vcpu, spte, rmapp); } static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) { - struct kvm_rmap_desc *desc; - u64 *prev_spte; - int i; + return pte_list_next(rmapp, spte); +} - if (!*rmapp) - return NULL; - else if (!(*rmapp & 1)) { - if (!spte) - return (u64 *)*rmapp; - return NULL; - } - desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); - prev_spte = NULL; - while (desc) { - for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) { - if (prev_spte == spte) - return desc->sptes[i]; - prev_spte = desc->sptes[i]; - } - desc = desc->more; - } - return NULL; +static void rmap_remove(struct kvm *kvm, u64 *spte) +{ + struct kvm_mmu_page *sp; + gfn_t gfn; + unsigned long *rmapp; + + sp = page_header(__pa(spte)); + gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); + rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); + pte_list_remove(spte, rmapp); +} + +static void drop_spte(struct kvm *kvm, u64 *sptep) +{ + if (mmu_spte_clear_track_bits(sptep)) + rmap_remove(kvm, sptep); } static int rmap_write_protect(struct kvm *kvm, u64 gfn) @@ -790,7 +1018,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); if (is_writable_pte(*spte)) { - update_spte(spte, *spte & ~PT_WRITABLE_MASK); + mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); write_protected = 1; } spte = rmap_next(kvm, rmapp, spte); @@ -807,8 +1035,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); if (is_writable_pte(*spte)) { - drop_spte(kvm, spte, - shadow_trap_nonpresent_pte); + drop_spte(kvm, spte); --kvm->stat.lpages; spte = NULL; write_protected = 1; @@ -829,7 +1056,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, while ((spte = rmap_next(kvm, rmapp, NULL))) { BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); - drop_spte(kvm, spte, shadow_trap_nonpresent_pte); + drop_spte(kvm, spte); need_tlb_flush = 1; } return need_tlb_flush; @@ -851,7 +1078,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); need_flush = 1; if (pte_write(*ptep)) { - drop_spte(kvm, spte, shadow_trap_nonpresent_pte); + drop_spte(kvm, spte); spte = rmap_next(kvm, rmapp, NULL); } else { new_spte = *spte &~ (PT64_BASE_ADDR_MASK); @@ -860,7 +1087,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, new_spte &= ~PT_WRITABLE_MASK; new_spte &= ~SPTE_HOST_WRITEABLE; new_spte &= ~shadow_accessed_mask; - set_spte_track_bits(spte, new_spte); + mmu_spte_clear_track_bits(spte); + mmu_spte_set(spte, new_spte); spte = rmap_next(kvm, rmapp, spte); } } @@ -1032,151 +1260,89 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) percpu_counter_add(&kvm_total_used_mmu_pages, nr); } -static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) +/* + * Remove the sp from shadow page cache, after call it, + * we can not find this sp from the cache, and the shadow + * page table is still valid. + * It should be under the protection of mmu lock. + */ +static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp) { ASSERT(is_empty_shadow_page(sp->spt)); hlist_del(&sp->hash_link); - list_del(&sp->link); - free_page((unsigned long)sp->spt); if (!sp->role.direct) free_page((unsigned long)sp->gfns); - kmem_cache_free(mmu_page_header_cache, sp); - kvm_mod_used_mmu_pages(kvm, -1); } -static unsigned kvm_page_table_hashfn(gfn_t gfn) +/* + * Free the shadow page table and the sp, we can do it + * out of the protection of mmu lock. + */ +static void kvm_mmu_free_page(struct kvm_mmu_page *sp) { - return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); + list_del(&sp->link); + free_page((unsigned long)sp->spt); + kmem_cache_free(mmu_page_header_cache, sp); } -static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, - u64 *parent_pte, int direct) +static unsigned kvm_page_table_hashfn(gfn_t gfn) { - struct kvm_mmu_page *sp; - - sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); - sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); - if (!direct) - sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, - PAGE_SIZE); - set_page_private(virt_to_page(sp->spt), (unsigned long)sp); - list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); - bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); - sp->multimapped = 0; - sp->parent_pte = parent_pte; - kvm_mod_used_mmu_pages(vcpu->kvm, +1); - return sp; + return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); } static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *parent_pte) { - struct kvm_pte_chain *pte_chain; - struct hlist_node *node; - int i; - if (!parent_pte) return; - if (!sp->multimapped) { - u64 *old = sp->parent_pte; - if (!old) { - sp->parent_pte = parent_pte; - return; - } - sp->multimapped = 1; - pte_chain = mmu_alloc_pte_chain(vcpu); - INIT_HLIST_HEAD(&sp->parent_ptes); - hlist_add_head(&pte_chain->link, &sp->parent_ptes); - pte_chain->parent_ptes[0] = old; - } - hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) { - if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) - continue; - for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) - if (!pte_chain->parent_ptes[i]) { - pte_chain->parent_ptes[i] = parent_pte; - return; - } - } - pte_chain = mmu_alloc_pte_chain(vcpu); - BUG_ON(!pte_chain); - hlist_add_head(&pte_chain->link, &sp->parent_ptes); - pte_chain->parent_ptes[0] = parent_pte; + pte_list_add(vcpu, parent_pte, &sp->parent_ptes); } static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, u64 *parent_pte) { - struct kvm_pte_chain *pte_chain; - struct hlist_node *node; - int i; - - if (!sp->multimapped) { - BUG_ON(sp->parent_pte != parent_pte); - sp->parent_pte = NULL; - return; - } - hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) - for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { - if (!pte_chain->parent_ptes[i]) - break; - if (pte_chain->parent_ptes[i] != parent_pte) - continue; - while (i + 1 < NR_PTE_CHAIN_ENTRIES - && pte_chain->parent_ptes[i + 1]) { - pte_chain->parent_ptes[i] - = pte_chain->parent_ptes[i + 1]; - ++i; - } - pte_chain->parent_ptes[i] = NULL; - if (i == 0) { - hlist_del(&pte_chain->link); - mmu_free_pte_chain(pte_chain); - if (hlist_empty(&sp->parent_ptes)) { - sp->multimapped = 0; - sp->parent_pte = NULL; - } - } - return; - } - BUG(); + pte_list_remove(parent_pte, &sp->parent_ptes); } -static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) +static void drop_parent_pte(struct kvm_mmu_page *sp, + u64 *parent_pte) { - struct kvm_pte_chain *pte_chain; - struct hlist_node *node; - struct kvm_mmu_page *parent_sp; - int i; - - if (!sp->multimapped && sp->parent_pte) { - parent_sp = page_header(__pa(sp->parent_pte)); - fn(parent_sp, sp->parent_pte); - return; - } - - hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) - for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { - u64 *spte = pte_chain->parent_ptes[i]; + mmu_page_remove_parent_pte(sp, parent_pte); + mmu_spte_clear_no_track(parent_pte); +} - if (!spte) - break; - parent_sp = page_header(__pa(spte)); - fn(parent_sp, spte); - } +static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, + u64 *parent_pte, int direct) +{ + struct kvm_mmu_page *sp; + sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, + sizeof *sp); + sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); + if (!direct) + sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, + PAGE_SIZE); + set_page_private(virt_to_page(sp->spt), (unsigned long)sp); + list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); + bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); + sp->parent_ptes = 0; + mmu_page_add_parent_pte(vcpu, sp, parent_pte); + kvm_mod_used_mmu_pages(vcpu->kvm, +1); + return sp; } -static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte); +static void mark_unsync(u64 *spte); static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) { - mmu_parent_walk(sp, mark_unsync); + pte_list_walk(&sp->parent_ptes, mark_unsync); } -static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte) +static void mark_unsync(u64 *spte) { + struct kvm_mmu_page *sp; unsigned int index; + sp = page_header(__pa(spte)); index = spte - sp->spt; if (__test_and_set_bit(index, sp->unsync_child_bitmap)) return; @@ -1185,15 +1351,6 @@ static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte) kvm_mmu_mark_parents_unsync(sp); } -static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) -{ - int i; - - for (i = 0; i < PT64_ENT_PER_PAGE; ++i) - sp->spt[i] = shadow_trap_nonpresent_pte; -} - static int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -1475,6 +1632,14 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, } } +static void init_shadow_page_table(struct kvm_mmu_page *sp) +{ + int i; + + for (i = 0; i < PT64_ENT_PER_PAGE; ++i) + sp->spt[i] = 0ull; +} + static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, @@ -1537,10 +1702,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, account_shadowed(vcpu->kvm, gfn); } - if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) - vcpu->arch.mmu.prefetch_page(vcpu, sp); - else - nonpaging_prefetch_page(vcpu, sp); + init_shadow_page_table(sp); trace_kvm_mmu_get_page(sp, true); return sp; } @@ -1572,21 +1734,28 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) if (iterator->level < PT_PAGE_TABLE_LEVEL) return false; - if (iterator->level == PT_PAGE_TABLE_LEVEL) - if (is_large_pte(*iterator->sptep)) - return false; - iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; return true; } -static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) +static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, + u64 spte) { - iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; + if (is_last_spte(spte, iterator->level)) { + iterator->level = 0; + return; + } + + iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; --iterator->level; } +static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) +{ + return __shadow_walk_next(iterator, *iterator->sptep); +} + static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) { u64 spte; @@ -1594,13 +1763,13 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; - __set_spte(sptep, spte); + mmu_spte_set(sptep, spte); } static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) { if (is_large_pte(*sptep)) { - drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); + drop_spte(vcpu->kvm, sptep); kvm_flush_remote_tlbs(vcpu->kvm); } } @@ -1622,38 +1791,39 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (child->role.access == direct_access) return; - mmu_page_remove_parent_pte(child, sptep); - __set_spte(sptep, shadow_trap_nonpresent_pte); + drop_parent_pte(child, sptep); kvm_flush_remote_tlbs(vcpu->kvm); } } +static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, + u64 *spte) +{ + u64 pte; + struct kvm_mmu_page *child; + + pte = *spte; + if (is_shadow_present_pte(pte)) { + if (is_last_spte(pte, sp->role.level)) + drop_spte(kvm, spte); + else { + child = page_header(pte & PT64_BASE_ADDR_MASK); + drop_parent_pte(child, spte); + } + } else if (is_mmio_spte(pte)) + mmu_spte_clear_no_track(spte); + + if (is_large_pte(pte)) + --kvm->stat.lpages; +} + static void kvm_mmu_page_unlink_children(struct kvm *kvm, struct kvm_mmu_page *sp) { unsigned i; - u64 *pt; - u64 ent; - - pt = sp->spt; - - for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { - ent = pt[i]; - - if (is_shadow_present_pte(ent)) { - if (!is_last_spte(ent, sp->role.level)) { - ent &= PT64_BASE_ADDR_MASK; - mmu_page_remove_parent_pte(page_header(ent), - &pt[i]); - } else { - if (is_large_pte(ent)) - --kvm->stat.lpages; - drop_spte(kvm, &pt[i], - shadow_trap_nonpresent_pte); - } - } - pt[i] = shadow_trap_nonpresent_pte; - } + + for (i = 0; i < PT64_ENT_PER_PAGE; ++i) + mmu_page_zap_pte(kvm, sp, sp->spt + i); } static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) @@ -1674,20 +1844,8 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) { u64 *parent_pte; - while (sp->multimapped || sp->parent_pte) { - if (!sp->multimapped) - parent_pte = sp->parent_pte; - else { - struct kvm_pte_chain *chain; - - chain = container_of(sp->parent_ptes.first, - struct kvm_pte_chain, link); - parent_pte = chain->parent_ptes[0]; - } - BUG_ON(!parent_pte); - kvm_mmu_put_page(sp, parent_pte); - __set_spte(parent_pte, shadow_trap_nonpresent_pte); - } + while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL))) + drop_parent_pte(sp, parent_pte); } static int mmu_zap_unsync_children(struct kvm *kvm, @@ -1734,6 +1892,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, /* Count self */ ret++; list_move(&sp->link, invalid_list); + kvm_mod_used_mmu_pages(kvm, -1); } else { list_move(&sp->link, &kvm->arch.active_mmu_pages); kvm_reload_remote_mmus(kvm); @@ -1744,6 +1903,30 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, return ret; } +static void kvm_mmu_isolate_pages(struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp; + + list_for_each_entry(sp, invalid_list, link) + kvm_mmu_isolate_page(sp); +} + +static void free_pages_rcu(struct rcu_head *head) +{ + struct kvm_mmu_page *next, *sp; + + sp = container_of(head, struct kvm_mmu_page, rcu); + while (sp) { + if (!list_empty(&sp->link)) + next = list_first_entry(&sp->link, + struct kvm_mmu_page, link); + else + next = NULL; + kvm_mmu_free_page(sp); + sp = next; + } +} + static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list) { @@ -1754,10 +1937,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, kvm_flush_remote_tlbs(kvm); + if (atomic_read(&kvm->arch.reader_counter)) { + kvm_mmu_isolate_pages(invalid_list); + sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); + list_del_init(invalid_list); + + trace_kvm_mmu_delay_free_pages(sp); + call_rcu(&sp->rcu, free_pages_rcu); + return; + } + do { sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); WARN_ON(!sp->role.invalid || sp->root_count); - kvm_mmu_free_page(kvm, sp); + kvm_mmu_isolate_page(sp); + kvm_mmu_free_page(sp); } while (!list_empty(invalid_list)); } @@ -1783,8 +1977,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); - kvm_mmu_commit_zap_page(kvm, &invalid_list); } + kvm_mmu_commit_zap_page(kvm, &invalid_list); goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; } @@ -1833,20 +2027,6 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) __set_bit(slot, sp->slot_bitmap); } -static void mmu_convert_notrap(struct kvm_mmu_page *sp) -{ - int i; - u64 *pt = sp->spt; - - if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte) - return; - - for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { - if (pt[i] == shadow_notrap_nonpresent_pte) - __set_spte(&pt[i], shadow_trap_nonpresent_pte); - } -} - /* * The function is based on mtrr_type_lookup() in * arch/x86/kernel/cpu/mtrr/generic.c @@ -1959,7 +2139,6 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) sp->unsync = 1; kvm_mmu_mark_parents_unsync(sp); - mmu_convert_notrap(sp); } static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) @@ -2002,13 +2181,16 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, - int write_fault, int dirty, int level, + int write_fault, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) { u64 spte, entry = *sptep; int ret = 0; + if (set_mmio_spte(sptep, gfn, pfn, pte_access)) + return 0; + /* * We don't set the accessed bit, since we sometimes want to see * whether the guest actually used the pte (in order to detect @@ -2017,8 +2199,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte = PT_PRESENT_MASK; if (!speculative) spte |= shadow_accessed_mask; - if (!dirty) - pte_access &= ~ACC_WRITE_MASK; + if (pte_access & ACC_EXEC_MASK) spte |= shadow_x_mask; else @@ -2045,15 +2226,24 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (level > PT_PAGE_TABLE_LEVEL && has_wrprotected_page(vcpu->kvm, gfn, level)) { ret = 1; - drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); + drop_spte(vcpu->kvm, sptep); goto done; } spte |= PT_WRITABLE_MASK; if (!vcpu->arch.mmu.direct_map - && !(pte_access & ACC_WRITE_MASK)) + && !(pte_access & ACC_WRITE_MASK)) { spte &= ~PT_USER_MASK; + /* + * If we converted a user page to a kernel page, + * so that the kernel can write to it when cr0.wp=0, + * then we should prevent the kernel from executing it + * if SMEP is enabled. + */ + if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) + spte |= PT64_NX_MASK; + } /* * Optimization: for pte sync, if spte was writable the hash @@ -2078,7 +2268,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, mark_page_dirty(vcpu->kvm, gfn); set_pte: - update_spte(sptep, spte); + mmu_spte_update(sptep, spte); /* * If we overwrite a writable spte with a read-only one we * should flush remote TLBs. Otherwise rmap_write_protect @@ -2093,8 +2283,8 @@ done: static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, - int user_fault, int write_fault, int dirty, - int *ptwrite, int level, gfn_t gfn, + int user_fault, int write_fault, + int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) { @@ -2117,26 +2307,28 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 pte = *sptep; child = page_header(pte & PT64_BASE_ADDR_MASK); - mmu_page_remove_parent_pte(child, sptep); - __set_spte(sptep, shadow_trap_nonpresent_pte); + drop_parent_pte(child, sptep); kvm_flush_remote_tlbs(vcpu->kvm); } else if (pfn != spte_to_pfn(*sptep)) { pgprintk("hfn old %llx new %llx\n", spte_to_pfn(*sptep), pfn); - drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); + drop_spte(vcpu->kvm, sptep); kvm_flush_remote_tlbs(vcpu->kvm); } else was_rmapped = 1; } if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, - dirty, level, gfn, pfn, speculative, true, + level, gfn, pfn, speculative, true, host_writable)) { if (write_fault) - *ptwrite = 1; + *emulate = 1; kvm_mmu_flush_tlb(vcpu); } + if (unlikely(is_mmio_spte(*sptep) && emulate)) + *emulate = 1; + pgprintk("%s: setting spte %llx\n", __func__, *sptep); pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", is_large_pte(*sptep)? "2MB" : "4kB", @@ -2145,11 +2337,13 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (!was_rmapped && is_large_pte(*sptep)) ++vcpu->kvm->stat.lpages; - page_header_update_slot(vcpu->kvm, sptep, gfn); - if (!was_rmapped) { - rmap_count = rmap_add(vcpu, sptep, gfn); - if (rmap_count > RMAP_RECYCLE_THRESHOLD) - rmap_recycle(vcpu, sptep, gfn); + if (is_shadow_present_pte(*sptep)) { + page_header_update_slot(vcpu->kvm, sptep, gfn); + if (!was_rmapped) { + rmap_count = rmap_add(vcpu, sptep, gfn); + if (rmap_count > RMAP_RECYCLE_THRESHOLD) + rmap_recycle(vcpu, sptep, gfn); + } } kvm_release_pfn_clean(pfn); if (speculative) { @@ -2170,8 +2364,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); if (!slot) { - get_page(bad_page); - return page_to_pfn(bad_page); + get_page(fault_page); + return page_to_pfn(fault_page); } hva = gfn_to_hva_memslot(slot, gfn); @@ -2198,7 +2392,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, for (i = 0; i < ret; i++, gfn++, start++) mmu_set_spte(vcpu, start, ACC_ALL, - access, 0, 0, 1, NULL, + access, 0, 0, NULL, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); @@ -2217,7 +2411,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, spte = sp->spt + i; for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { - if (*spte != shadow_trap_nonpresent_pte || spte == sptep) { + if (is_shadow_present_pte(*spte) || spte == sptep) { if (!start) continue; if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) @@ -2254,7 +2448,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; - int pt_write = 0; + int emulate = 0; gfn_t pseudo_gfn; for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { @@ -2262,14 +2456,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, unsigned pte_access = ACC_ALL; mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, - 0, write, 1, &pt_write, + 0, write, &emulate, level, gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); ++vcpu->stat.pf_fixed; break; } - if (*iterator.sptep == shadow_trap_nonpresent_pte) { + if (!is_shadow_present_pte(*iterator.sptep)) { u64 base_addr = iterator.addr; base_addr &= PT64_LVL_ADDR_MASK(iterator.level); @@ -2283,14 +2477,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, return -ENOMEM; } - __set_spte(iterator.sptep, - __pa(sp->spt) - | PT_PRESENT_MASK | PT_WRITABLE_MASK - | shadow_user_mask | shadow_x_mask - | shadow_accessed_mask); + mmu_spte_set(iterator.sptep, + __pa(sp->spt) + | PT_PRESENT_MASK | PT_WRITABLE_MASK + | shadow_user_mask | shadow_x_mask + | shadow_accessed_mask); } } - return pt_write; + return emulate; } static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) @@ -2306,16 +2500,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * send_sig_info(SIGBUS, &info, tsk); } -static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) +static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) { kvm_release_pfn_clean(pfn); if (is_hwpoison_pfn(pfn)) { - kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current); + kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); return 0; - } else if (is_fault_pfn(pfn)) - return -EFAULT; + } - return 1; + return -EFAULT; } static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, @@ -2360,6 +2553,30 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, } } +static bool mmu_invalid_pfn(pfn_t pfn) +{ + return unlikely(is_invalid_pfn(pfn)); +} + +static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, + pfn_t pfn, unsigned access, int *ret_val) +{ + bool ret = true; + + /* The pfn is invalid, report the error! */ + if (unlikely(is_invalid_pfn(pfn))) { + *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); + goto exit; + } + + if (unlikely(is_noslot_pfn(pfn))) + vcpu_cache_mmio_info(vcpu, gva, gfn, access); + + ret = false; +exit: + return ret; +} + static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable); @@ -2394,9 +2611,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) return 0; - /* mmio */ - if (is_error_pfn(pfn)) - return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); + if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) + return r; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) @@ -2623,6 +2839,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; + vcpu_clear_mmio_info(vcpu, ~0ul); trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -2667,6 +2884,94 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); } +static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + if (direct) + return vcpu_match_mmio_gpa(vcpu, addr); + + return vcpu_match_mmio_gva(vcpu, addr); +} + + +/* + * On direct hosts, the last spte is only allows two states + * for mmio page fault: + * - It is the mmio spte + * - It is zapped or it is being zapped. + * + * This function completely checks the spte when the last spte + * is not the mmio spte. + */ +static bool check_direct_spte_mmio_pf(u64 spte) +{ + return __check_direct_spte_mmio_pf(spte); +} + +static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) +{ + struct kvm_shadow_walk_iterator iterator; + u64 spte = 0ull; + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) + if (!is_shadow_present_pte(spte)) + break; + walk_shadow_page_lockless_end(vcpu); + + return spte; +} + +/* + * If it is a real mmio page fault, return 1 and emulat the instruction + * directly, return 0 to let CPU fault again on the address, -1 is + * returned if bug is detected. + */ +int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) +{ + u64 spte; + + if (quickly_check_mmio_pf(vcpu, addr, direct)) + return 1; + + spte = walk_shadow_page_get_mmio_spte(vcpu, addr); + + if (is_mmio_spte(spte)) { + gfn_t gfn = get_mmio_spte_gfn(spte); + unsigned access = get_mmio_spte_access(spte); + + if (direct) + addr = 0; + + trace_handle_mmio_page_fault(addr, gfn, access); + vcpu_cache_mmio_info(vcpu, addr, gfn, access); + return 1; + } + + /* + * It's ok if the gva is remapped by other cpus on shadow guest, + * it's a BUG if the gfn is not a mmio page. + */ + if (direct && !check_direct_spte_mmio_pf(spte)) + return -1; + + /* + * If the page table is zapped by other cpus, let CPU fault again on + * the address. + */ + return 0; +} +EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common); + +static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, + u32 error_code, bool direct) +{ + int ret; + + ret = handle_mmio_page_fault_common(vcpu, addr, direct); + WARN_ON(ret < 0); + return ret; +} + static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, bool prefault) { @@ -2674,6 +2979,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int r; pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); + + if (unlikely(error_code & PFERR_RSVD_MASK)) + return handle_mmio_page_fault(vcpu, gva, error_code, true); + r = mmu_topup_memory_caches(vcpu); if (r) return r; @@ -2750,6 +3059,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ASSERT(vcpu); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); + if (unlikely(error_code & PFERR_RSVD_MASK)) + return handle_mmio_page_fault(vcpu, gpa, error_code, true); + r = mmu_topup_memory_caches(vcpu); if (r) return r; @@ -2767,9 +3079,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) return 0; - /* mmio */ - if (is_error_pfn(pfn)) - return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); + if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) + return r; + spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; @@ -2800,7 +3112,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu, context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; context->free = nonpaging_free; - context->prefetch_page = nonpaging_prefetch_page; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; @@ -2848,6 +3159,23 @@ static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; } +static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, + int *nr_present) +{ + if (unlikely(is_mmio_spte(*sptep))) { + if (gfn != get_mmio_spte_gfn(*sptep)) { + mmu_spte_clear_no_track(sptep); + return true; + } + + (*nr_present)++; + mark_mmio_spte(sptep, gfn, access); + return true; + } + + return false; +} + #define PTTYPE 64 #include "paging_tmpl.h" #undef PTTYPE @@ -2930,7 +3258,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, context->new_cr3 = paging_new_cr3; context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; - context->prefetch_page = paging64_prefetch_page; context->sync_page = paging64_sync_page; context->invlpg = paging64_invlpg; context->update_pte = paging64_update_pte; @@ -2959,7 +3286,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; context->free = paging_free; - context->prefetch_page = paging32_prefetch_page; context->sync_page = paging32_sync_page; context->invlpg = paging32_invlpg; context->update_pte = paging32_update_pte; @@ -2984,7 +3310,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->new_cr3 = nonpaging_new_cr3; context->page_fault = tdp_page_fault; context->free = nonpaging_free; - context->prefetch_page = nonpaging_prefetch_page; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; @@ -3023,6 +3348,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { int r; + bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); @@ -3037,6 +3363,8 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); + vcpu->arch.mmu.base_role.smep_andnot_wp + = smep && !is_write_protection(vcpu); return r; } @@ -3141,27 +3469,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_mmu_unload); -static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp, - u64 *spte) -{ - u64 pte; - struct kvm_mmu_page *child; - - pte = *spte; - if (is_shadow_present_pte(pte)) { - if (is_last_spte(pte, sp->role.level)) - drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte); - else { - child = page_header(pte & PT64_BASE_ADDR_MASK); - mmu_page_remove_parent_pte(child, spte); - } - } - __set_spte(spte, shadow_trap_nonpresent_pte); - if (is_large_pte(pte)) - --vcpu->kvm->stat.lpages; -} - static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *new) @@ -3233,6 +3540,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, int level, npte, invlpg_counter, r, flooded = 0; bool remote_flush, local_flush, zap_page; + /* + * If we don't have indirect shadow pages, it means no page is + * write-protected, so we can exit simply. + */ + if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) + return; + zap_page = remote_flush = local_flush = false; offset = offset_in_page(gpa); @@ -3336,7 +3650,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, spte = &sp->spt[page_offset / sizeof(*spte)]; while (npte--) { entry = *spte; - mmu_pte_write_zap_pte(vcpu, sp, spte); + mmu_page_zap_pte(vcpu->kvm, sp, spte); if (gentry && !((sp->role.word ^ vcpu->arch.mmu.base_role.word) & mask.word)) @@ -3380,9 +3694,9 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); ++vcpu->kvm->stat.mmu_recycled; } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); } int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, @@ -3506,15 +3820,15 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) continue; if (is_large_pte(pt[i])) { - drop_spte(kvm, &pt[i], - shadow_trap_nonpresent_pte); + drop_spte(kvm, &pt[i]); --kvm->stat.lpages; continue; } /* avoid RMW */ if (is_writable_pte(pt[i])) - update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK); + mmu_spte_update(&pt[i], + pt[i] & ~PT_WRITABLE_MASK); } } kvm_flush_remote_tlbs(kvm); @@ -3590,25 +3904,18 @@ static struct shrinker mmu_shrinker = { static void mmu_destroy_caches(void) { - if (pte_chain_cache) - kmem_cache_destroy(pte_chain_cache); - if (rmap_desc_cache) - kmem_cache_destroy(rmap_desc_cache); + if (pte_list_desc_cache) + kmem_cache_destroy(pte_list_desc_cache); if (mmu_page_header_cache) kmem_cache_destroy(mmu_page_header_cache); } int kvm_mmu_module_init(void) { - pte_chain_cache = kmem_cache_create("kvm_pte_chain", - sizeof(struct kvm_pte_chain), - 0, 0, NULL); - if (!pte_chain_cache) - goto nomem; - rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", - sizeof(struct kvm_rmap_desc), + pte_list_desc_cache = kmem_cache_create("pte_list_desc", + sizeof(struct pte_list_desc), 0, 0, NULL); - if (!rmap_desc_cache) + if (!pte_list_desc_cache) goto nomem; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", @@ -3775,16 +4082,17 @@ out: int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) { struct kvm_shadow_walk_iterator iterator; + u64 spte; int nr_sptes = 0; - spin_lock(&vcpu->kvm->mmu_lock); - for_each_shadow_entry(vcpu, addr, iterator) { - sptes[iterator.level-1] = *iterator.sptep; + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { + sptes[iterator.level-1] = spte; nr_sptes++; - if (!is_shadow_present_pte(*iterator.sptep)) + if (!is_shadow_present_pte(spte)) break; } - spin_unlock(&vcpu->kvm->mmu_lock); + walk_shadow_page_lockless_end(vcpu); return nr_sptes; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 7086ca8..e374db9 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -49,6 +49,8 @@ #define PFERR_FETCH_MASK (1U << 4) int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); +void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); +int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) @@ -76,4 +78,27 @@ static inline int is_present_gpte(unsigned long pte) return pte & PT_PRESENT_MASK; } +static inline int is_writable_pte(unsigned long pte) +{ + return pte & PT_WRITABLE_MASK; +} + +static inline bool is_write_protection(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr0_bits(vcpu, X86_CR0_WP); +} + +static inline bool check_write_user_access(struct kvm_vcpu *vcpu, + bool write_fault, bool user_fault, + unsigned long pte) +{ + if (unlikely(write_fault && !is_writable_pte(pte) + && (user_fault || is_write_protection(vcpu)))) + return false; + + if (unlikely(user_fault && !(pte & PT_USER_MASK))) + return false; + + return true; +} #endif diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index 5f6223b..2460a26 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -99,18 +99,6 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) "level = %d\n", sp, level); return; } - - if (*sptep == shadow_notrap_nonpresent_pte) { - audit_printk(vcpu->kvm, "notrap spte in unsync " - "sp: %p\n", sp); - return; - } - } - - if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) { - audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n", - sp); - return; } if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index b60b4fd..eed67f3 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -196,6 +196,54 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, TP_ARGS(sp) ); +DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages, + TP_PROTO(struct kvm_mmu_page *sp), + + TP_ARGS(sp) +); + +TRACE_EVENT( + mark_mmio_spte, + TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), + TP_ARGS(sptep, gfn, access), + + TP_STRUCT__entry( + __field(void *, sptep) + __field(gfn_t, gfn) + __field(unsigned, access) + ), + + TP_fast_assign( + __entry->sptep = sptep; + __entry->gfn = gfn; + __entry->access = access; + ), + + TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, + __entry->access) +); + +TRACE_EVENT( + handle_mmio_page_fault, + TP_PROTO(u64 addr, gfn_t gfn, unsigned access), + TP_ARGS(addr, gfn, access), + + TP_STRUCT__entry( + __field(u64, addr) + __field(gfn_t, gfn) + __field(unsigned, access) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->gfn = gfn; + __entry->access = access; + ), + + TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, + __entry->access) +); + TRACE_EVENT( kvm_mmu_audit, TP_PROTO(struct kvm_vcpu *vcpu, int audit_point), diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 9d03ad4..507e2b8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -101,11 +101,15 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return (ret != orig_pte); } -static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) +static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte, + bool last) { unsigned access; access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; + if (last && !is_dirty_gpte(gpte)) + access &= ~ACC_WRITE_MASK; + #if PTTYPE == 64 if (vcpu->arch.mmu.nx) access &= ~(gpte >> PT64_NX_SHIFT); @@ -113,6 +117,24 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) return access; } +static bool FNAME(is_last_gpte)(struct guest_walker *walker, + struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + pt_element_t gpte) +{ + if (walker->level == PT_PAGE_TABLE_LEVEL) + return true; + + if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) && + (PTTYPE == 64 || is_pse(vcpu))) + return true; + + if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) && + (mmu->root_level == PT64_ROOT_LEVEL)) + return true; + + return false; +} + /* * Fetch a guest pte for a guest virtual address */ @@ -125,18 +147,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; - bool eperm, present, rsvd_fault; - int offset, write_fault, user_fault, fetch_fault; - - write_fault = access & PFERR_WRITE_MASK; - user_fault = access & PFERR_USER_MASK; - fetch_fault = access & PFERR_FETCH_MASK; + bool eperm; + int offset; + const int write_fault = access & PFERR_WRITE_MASK; + const int user_fault = access & PFERR_USER_MASK; + const int fetch_fault = access & PFERR_FETCH_MASK; + u16 errcode = 0; trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, fetch_fault); -walk: - present = true; - eperm = rsvd_fault = false; +retry_walk: + eperm = false; walker->level = mmu->root_level; pte = mmu->get_cr3(vcpu); @@ -144,10 +165,8 @@ walk: if (walker->level == PT32E_ROOT_LEVEL) { pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); trace_kvm_mmu_paging_element(pte, walker->level); - if (!is_present_gpte(pte)) { - present = false; + if (!is_present_gpte(pte)) goto error; - } --walker->level; } #endif @@ -170,42 +189,31 @@ walk: real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), PFERR_USER_MASK|PFERR_WRITE_MASK); - if (unlikely(real_gfn == UNMAPPED_GVA)) { - present = false; - break; - } + if (unlikely(real_gfn == UNMAPPED_GVA)) + goto error; real_gfn = gpa_to_gfn(real_gfn); host_addr = gfn_to_hva(vcpu->kvm, real_gfn); - if (unlikely(kvm_is_error_hva(host_addr))) { - present = false; - break; - } + if (unlikely(kvm_is_error_hva(host_addr))) + goto error; ptep_user = (pt_element_t __user *)((void *)host_addr + offset); - if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) { - present = false; - break; - } + if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) + goto error; trace_kvm_mmu_paging_element(pte, walker->level); - if (unlikely(!is_present_gpte(pte))) { - present = false; - break; - } + if (unlikely(!is_present_gpte(pte))) + goto error; if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level))) { - rsvd_fault = true; - break; + errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; + goto error; } - if (unlikely(write_fault && !is_writable_pte(pte) - && (user_fault || is_write_protection(vcpu)))) - eperm = true; - - if (unlikely(user_fault && !(pte & PT_USER_MASK))) + if (!check_write_user_access(vcpu, write_fault, user_fault, + pte)) eperm = true; #if PTTYPE == 64 @@ -213,39 +221,35 @@ walk: eperm = true; #endif - if (!eperm && !rsvd_fault - && unlikely(!(pte & PT_ACCESSED_MASK))) { + if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) { int ret; trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, pte, pte|PT_ACCESSED_MASK); - if (unlikely(ret < 0)) { - present = false; - break; - } else if (ret) - goto walk; + if (unlikely(ret < 0)) + goto error; + else if (ret) + goto retry_walk; mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_ACCESSED_MASK; } - pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); - walker->ptes[walker->level - 1] = pte; - if ((walker->level == PT_PAGE_TABLE_LEVEL) || - ((walker->level == PT_DIRECTORY_LEVEL) && - is_large_pte(pte) && - (PTTYPE == 64 || is_pse(vcpu))) || - ((walker->level == PT_PDPE_LEVEL) && - is_large_pte(pte) && - mmu->root_level == PT64_ROOT_LEVEL)) { + if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) { int lvl = walker->level; gpa_t real_gpa; gfn_t gfn; u32 ac; + /* check if the kernel is fetching from user page */ + if (unlikely(pte_access & PT_USER_MASK) && + kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) + if (fetch_fault && !user_fault) + eperm = true; + gfn = gpte_to_gfn_lvl(pte, lvl); gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT; @@ -266,12 +270,14 @@ walk: break; } - pt_access = pte_access; + pt_access &= FNAME(gpte_access)(vcpu, pte, false); --walker->level; } - if (unlikely(!present || eperm || rsvd_fault)) + if (unlikely(eperm)) { + errcode |= PFERR_PRESENT_MASK; goto error; + } if (write_fault && unlikely(!is_dirty_gpte(pte))) { int ret; @@ -279,17 +285,17 @@ walk: trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, pte, pte|PT_DIRTY_MASK); - if (unlikely(ret < 0)) { - present = false; + if (unlikely(ret < 0)) goto error; - } else if (ret) - goto walk; + else if (ret) + goto retry_walk; mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_DIRTY_MASK; walker->ptes[walker->level - 1] = pte; } + pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true); walker->pt_access = pt_access; walker->pte_access = pte_access; pgprintk("%s: pte %llx pte_access %x pt_access %x\n", @@ -297,19 +303,14 @@ walk: return 1; error: + errcode |= write_fault | user_fault; + if (fetch_fault && (mmu->nx || + kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) + errcode |= PFERR_FETCH_MASK; + walker->fault.vector = PF_VECTOR; walker->fault.error_code_valid = true; - walker->fault.error_code = 0; - if (present) - walker->fault.error_code |= PFERR_PRESENT_MASK; - - walker->fault.error_code |= write_fault | user_fault; - - if (fetch_fault && mmu->nx) - walker->fault.error_code |= PFERR_FETCH_MASK; - if (rsvd_fault) - walker->fault.error_code |= PFERR_RSVD_MASK; - + walker->fault.error_code = errcode; walker->fault.address = addr; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; @@ -336,16 +337,11 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte) { - u64 nonpresent = shadow_trap_nonpresent_pte; - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) goto no_present; - if (!is_present_gpte(gpte)) { - if (!sp->unsync) - nonpresent = shadow_notrap_nonpresent_pte; + if (!is_present_gpte(gpte)) goto no_present; - } if (!(gpte & PT_ACCESSED_MASK)) goto no_present; @@ -353,7 +349,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, return false; no_present: - drop_spte(vcpu->kvm, spte, nonpresent); + drop_spte(vcpu->kvm, spte); return true; } @@ -369,9 +365,9 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return; pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); - pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); - if (is_error_pfn(pfn)) { + if (mmu_invalid_pfn(pfn)) { kvm_release_pfn_clean(pfn); return; } @@ -381,7 +377,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). */ mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, - is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL, + NULL, PT_PAGE_TABLE_LEVEL, gpte_to_gfn(gpte), pfn, true, true); } @@ -432,12 +428,11 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, unsigned pte_access; gfn_t gfn; pfn_t pfn; - bool dirty; if (spte == sptep) continue; - if (*spte != shadow_trap_nonpresent_pte) + if (is_shadow_present_pte(*spte)) continue; gpte = gptep[i]; @@ -445,18 +440,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) continue; - pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, + true); gfn = gpte_to_gfn(gpte); - dirty = is_dirty_gpte(gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, - (pte_access & ACC_WRITE_MASK) && dirty); - if (is_error_pfn(pfn)) { + pte_access & ACC_WRITE_MASK); + if (mmu_invalid_pfn(pfn)) { kvm_release_pfn_clean(pfn); break; } mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, - dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn, + NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); } } @@ -467,12 +462,11 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct guest_walker *gw, int user_fault, int write_fault, int hlevel, - int *ptwrite, pfn_t pfn, bool map_writable, + int *emulate, pfn_t pfn, bool map_writable, bool prefault) { unsigned access = gw->pt_access; struct kvm_mmu_page *sp = NULL; - bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); int top_level; unsigned direct_access; struct kvm_shadow_walk_iterator it; @@ -480,9 +474,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (!is_present_gpte(gw->ptes[gw->level - 1])) return NULL; - direct_access = gw->pt_access & gw->pte_access; - if (!dirty) - direct_access &= ~ACC_WRITE_MASK; + direct_access = gw->pte_access; top_level = vcpu->arch.mmu.root_level; if (top_level == PT32E_ROOT_LEVEL) @@ -540,8 +532,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, link_shadow_page(it.sptep, sp); } - mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, - user_fault, write_fault, dirty, ptwrite, it.level, + mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, + user_fault, write_fault, emulate, it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); @@ -575,7 +567,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, int user_fault = error_code & PFERR_USER_MASK; struct guest_walker walker; u64 *sptep; - int write_pt = 0; + int emulate = 0; int r; pfn_t pfn; int level = PT_PAGE_TABLE_LEVEL; @@ -585,6 +577,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); + if (unlikely(error_code & PFERR_RSVD_MASK)) + return handle_mmio_page_fault(vcpu, addr, error_code, + mmu_is_nested(vcpu)); + r = mmu_topup_memory_caches(vcpu); if (r) return r; @@ -623,9 +619,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, &map_writable)) return 0; - /* mmio */ - if (is_error_pfn(pfn)) - return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); + if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr, + walker.gfn, pfn, walker.pte_access, &r)) + return r; spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) @@ -636,19 +632,19 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (!force_pt_level) transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, - level, &write_pt, pfn, map_writable, prefault); + level, &emulate, pfn, map_writable, prefault); (void)sptep; - pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, - sptep, *sptep, write_pt); + pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__, + sptep, *sptep, emulate); - if (!write_pt) + if (!emulate) vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ ++vcpu->stat.pf_fixed; trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); spin_unlock(&vcpu->kvm->mmu_lock); - return write_pt; + return emulate; out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); @@ -665,6 +661,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) u64 *sptep; int need_flush = 0; + vcpu_clear_mmio_info(vcpu, gva); + spin_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry(vcpu, gva, iterator) { @@ -688,11 +686,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) if (is_shadow_present_pte(*sptep)) { if (is_large_pte(*sptep)) --vcpu->kvm->stat.lpages; - drop_spte(vcpu->kvm, sptep, - shadow_trap_nonpresent_pte); + drop_spte(vcpu->kvm, sptep); need_flush = 1; - } else - __set_spte(sptep, shadow_trap_nonpresent_pte); + } else if (is_mmio_spte(*sptep)) + mmu_spte_clear_no_track(sptep); + break; } @@ -752,36 +750,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, return gpa; } -static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) -{ - int i, j, offset, r; - pt_element_t pt[256 / sizeof(pt_element_t)]; - gpa_t pte_gpa; - - if (sp->role.direct - || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { - nonpaging_prefetch_page(vcpu, sp); - return; - } - - pte_gpa = gfn_to_gpa(sp->gfn); - if (PTTYPE == 32) { - offset = sp->role.quadrant << PT64_LEVEL_BITS; - pte_gpa += offset * sizeof(pt_element_t); - } - - for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) { - r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); - pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); - for (j = 0; j < ARRAY_SIZE(pt); ++j) - if (r || is_present_gpte(pt[j])) - sp->spt[i+j] = shadow_trap_nonpresent_pte; - else - sp->spt[i+j] = shadow_notrap_nonpresent_pte; - } -} - /* * Using the cached information from sp->gfns is safe because: * - The spte has a reference to the struct page, so the pfn for a given gfn @@ -817,7 +785,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) gpa_t pte_gpa; gfn_t gfn; - if (!is_shadow_present_pte(sp->spt[i])) + if (!sp->spt[i]) continue; pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); @@ -826,26 +794,30 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) sizeof(pt_element_t))) return -EINVAL; - gfn = gpte_to_gfn(gpte); - if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { vcpu->kvm->tlbs_dirty++; continue; } + gfn = gpte_to_gfn(gpte); + pte_access = sp->role.access; + pte_access &= FNAME(gpte_access)(vcpu, gpte, true); + + if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) + continue; + if (gfn != sp->gfns[i]) { - drop_spte(vcpu->kvm, &sp->spt[i], - shadow_trap_nonpresent_pte); + drop_spte(vcpu->kvm, &sp->spt[i]); vcpu->kvm->tlbs_dirty++; continue; } nr_present++; - pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); + host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, - is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, + PT_PAGE_TABLE_LEVEL, gfn, spte_to_pfn(sp->spt[i]), true, false, host_writable); } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 506e4fe..475d1c9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1496,11 +1496,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) update_cr0_intercept(svm); } -static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; + if (cr4 & X86_CR4_VMXE) + return 1; + if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) svm_flush_tlb(vcpu); @@ -1510,6 +1513,7 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); + return 0; } static void svm_set_segment(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index db93276..3ff898c 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -675,12 +675,12 @@ TRACE_EVENT(kvm_emulate_insn, ), TP_fast_assign( - __entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start; + __entry->rip = vcpu->arch.emulate_ctxt.fetch.start; __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); - __entry->len = vcpu->arch.emulate_ctxt.decode.eip - - vcpu->arch.emulate_ctxt.decode.fetch.start; + __entry->len = vcpu->arch.emulate_ctxt._eip + - vcpu->arch.emulate_ctxt.fetch.start; memcpy(__entry->insn, - vcpu->arch.emulate_ctxt.decode.fetch.data, + vcpu->arch.emulate_ctxt.fetch.data, 15); __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); __entry->failed = failed; @@ -698,6 +698,29 @@ TRACE_EVENT(kvm_emulate_insn, #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) +TRACE_EVENT( + vcpu_match_mmio, + TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), + TP_ARGS(gva, gpa, write, gpa_match), + + TP_STRUCT__entry( + __field(gva_t, gva) + __field(gpa_t, gpa) + __field(bool, write) + __field(bool, gpa_match) + ), + + TP_fast_assign( + __entry->gva = gva; + __entry->gpa = gpa; + __entry->write = write; + __entry->gpa_match = gpa_match + ), + + TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, + __entry->write ? "Write" : "Read", + __entry->gpa_match ? "GPA" : "GVA") +); #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d48ec60..e65a158 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -43,13 +43,12 @@ #include "trace.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) +#define __ex_clear(x, reg) \ + ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); -static int __read_mostly bypass_guest_pf = 1; -module_param(bypass_guest_pf, bool, S_IRUGO); - static int __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); @@ -72,6 +71,14 @@ module_param(vmm_exclusive, bool, S_IRUGO); static int __read_mostly yield_on_hlt = 1; module_param(yield_on_hlt, bool, S_IRUGO); +/* + * If nested=1, nested virtualization is supported, i.e., guests may use + * VMX and be a hypervisor for its own guests. If nested=0, guests may not + * use VMX instructions. + */ +static int __read_mostly nested = 0; +module_param(nested, bool, S_IRUGO); + #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) #define KVM_GUEST_CR0_MASK \ @@ -109,6 +116,7 @@ static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, int, S_IRUGO); #define NR_AUTOLOAD_MSRS 1 +#define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; @@ -116,17 +124,237 @@ struct vmcs { char data[0]; }; +/* + * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also + * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs + * loaded on this CPU (so we can clear them if the CPU goes down). + */ +struct loaded_vmcs { + struct vmcs *vmcs; + int cpu; + int launched; + struct list_head loaded_vmcss_on_cpu_link; +}; + struct shared_msr_entry { unsigned index; u64 data; u64 mask; }; +/* + * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a + * single nested guest (L2), hence the name vmcs12. Any VMX implementation has + * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is + * stored in guest memory specified by VMPTRLD, but is opaque to the guest, + * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. + * More than one of these structures may exist, if L1 runs multiple L2 guests. + * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the + * underlying hardware which will be used to run L2. + * This structure is packed to ensure that its layout is identical across + * machines (necessary for live migration). + * If there are changes in this struct, VMCS12_REVISION must be changed. + */ +typedef u64 natural_width; +struct __packed vmcs12 { + /* According to the Intel spec, a VMCS region must start with the + * following two fields. Then follow implementation-specific data. + */ + u32 revision_id; + u32 abort; + + u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ + u32 padding[7]; /* room for future expansion */ + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + u64 vm_exit_msr_store_addr; + u64 vm_exit_msr_load_addr; + u64 vm_entry_msr_load_addr; + u64 tsc_offset; + u64 virtual_apic_page_addr; + u64 apic_access_addr; + u64 ept_pointer; + u64 guest_physical_address; + u64 vmcs_link_pointer; + u64 guest_ia32_debugctl; + u64 guest_ia32_pat; + u64 guest_ia32_efer; + u64 guest_ia32_perf_global_ctrl; + u64 guest_pdptr0; + u64 guest_pdptr1; + u64 guest_pdptr2; + u64 guest_pdptr3; + u64 host_ia32_pat; + u64 host_ia32_efer; + u64 host_ia32_perf_global_ctrl; + u64 padding64[8]; /* room for future expansion */ + /* + * To allow migration of L1 (complete with its L2 guests) between + * machines of different natural widths (32 or 64 bit), we cannot have + * unsigned long fields with no explict size. We use u64 (aliased + * natural_width) instead. Luckily, x86 is little-endian. + */ + natural_width cr0_guest_host_mask; + natural_width cr4_guest_host_mask; + natural_width cr0_read_shadow; + natural_width cr4_read_shadow; + natural_width cr3_target_value0; + natural_width cr3_target_value1; + natural_width cr3_target_value2; + natural_width cr3_target_value3; + natural_width exit_qualification; + natural_width guest_linear_address; + natural_width guest_cr0; + natural_width guest_cr3; + natural_width guest_cr4; + natural_width guest_es_base; + natural_width guest_cs_base; + natural_width guest_ss_base; + natural_width guest_ds_base; + natural_width guest_fs_base; + natural_width guest_gs_base; + natural_width guest_ldtr_base; + natural_width guest_tr_base; + natural_width guest_gdtr_base; + natural_width guest_idtr_base; + natural_width guest_dr7; + natural_width guest_rsp; + natural_width guest_rip; + natural_width guest_rflags; + natural_width guest_pending_dbg_exceptions; + natural_width guest_sysenter_esp; + natural_width guest_sysenter_eip; + natural_width host_cr0; + natural_width host_cr3; + natural_width host_cr4; + natural_width host_fs_base; + natural_width host_gs_base; + natural_width host_tr_base; + natural_width host_gdtr_base; + natural_width host_idtr_base; + natural_width host_ia32_sysenter_esp; + natural_width host_ia32_sysenter_eip; + natural_width host_rsp; + natural_width host_rip; + natural_width paddingl[8]; /* room for future expansion */ + u32 pin_based_vm_exec_control; + u32 cpu_based_vm_exec_control; + u32 exception_bitmap; + u32 page_fault_error_code_mask; + u32 page_fault_error_code_match; + u32 cr3_target_count; + u32 vm_exit_controls; + u32 vm_exit_msr_store_count; + u32 vm_exit_msr_load_count; + u32 vm_entry_controls; + u32 vm_entry_msr_load_count; + u32 vm_entry_intr_info_field; + u32 vm_entry_exception_error_code; + u32 vm_entry_instruction_len; + u32 tpr_threshold; + u32 secondary_vm_exec_control; + u32 vm_instruction_error; + u32 vm_exit_reason; + u32 vm_exit_intr_info; + u32 vm_exit_intr_error_code; + u32 idt_vectoring_info_field; + u32 idt_vectoring_error_code; + u32 vm_exit_instruction_len; + u32 vmx_instruction_info; + u32 guest_es_limit; + u32 guest_cs_limit; + u32 guest_ss_limit; + u32 guest_ds_limit; + u32 guest_fs_limit; + u32 guest_gs_limit; + u32 guest_ldtr_limit; + u32 guest_tr_limit; + u32 guest_gdtr_limit; + u32 guest_idtr_limit; + u32 guest_es_ar_bytes; + u32 guest_cs_ar_bytes; + u32 guest_ss_ar_bytes; + u32 guest_ds_ar_bytes; + u32 guest_fs_ar_bytes; + u32 guest_gs_ar_bytes; + u32 guest_ldtr_ar_bytes; + u32 guest_tr_ar_bytes; + u32 guest_interruptibility_info; + u32 guest_activity_state; + u32 guest_sysenter_cs; + u32 host_ia32_sysenter_cs; + u32 padding32[8]; /* room for future expansion */ + u16 virtual_processor_id; + u16 guest_es_selector; + u16 guest_cs_selector; + u16 guest_ss_selector; + u16 guest_ds_selector; + u16 guest_fs_selector; + u16 guest_gs_selector; + u16 guest_ldtr_selector; + u16 guest_tr_selector; + u16 host_es_selector; + u16 host_cs_selector; + u16 host_ss_selector; + u16 host_ds_selector; + u16 host_fs_selector; + u16 host_gs_selector; + u16 host_tr_selector; +}; + +/* + * VMCS12_REVISION is an arbitrary id that should be changed if the content or + * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and + * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. + */ +#define VMCS12_REVISION 0x11e57ed0 + +/* + * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region + * and any VMCS region. Although only sizeof(struct vmcs12) are used by the + * current implementation, 4K are reserved to avoid future complications. + */ +#define VMCS12_SIZE 0x1000 + +/* Used to remember the last vmcs02 used for some recently used vmcs12s */ +struct vmcs02_list { + struct list_head list; + gpa_t vmptr; + struct loaded_vmcs vmcs02; +}; + +/* + * The nested_vmx structure is part of vcpu_vmx, and holds information we need + * for correct emulation of VMX (i.e., nested VMX) on this vcpu. + */ +struct nested_vmx { + /* Has the level1 guest done vmxon? */ + bool vmxon; + + /* The guest-physical address of the current VMCS L1 keeps for L2 */ + gpa_t current_vmptr; + /* The host-usable pointer to the above */ + struct page *current_vmcs12_page; + struct vmcs12 *current_vmcs12; + + /* vmcs02_list cache of VMCSs recently used to run L2 guests */ + struct list_head vmcs02_pool; + int vmcs02_num; + u64 vmcs01_tsc_offset; + /* L2 must run next, and mustn't decide to exit to L1. */ + bool nested_run_pending; + /* + * Guest pages referred to in vmcs02 with host-physical pointers, so + * we must keep them pinned while L2 runs. + */ + struct page *apic_access_page; +}; + struct vcpu_vmx { struct kvm_vcpu vcpu; - struct list_head local_vcpus_link; unsigned long host_rsp; - int launched; u8 fail; u8 cpl; bool nmi_known_unmasked; @@ -140,7 +368,14 @@ struct vcpu_vmx { u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif - struct vmcs *vmcs; + /* + * loaded_vmcs points to the VMCS currently used in this vcpu. For a + * non-nested (L1) guest, it always points to vmcs01. For a nested + * guest (L2), it points to a different VMCS. + */ + struct loaded_vmcs vmcs01; + struct loaded_vmcs *loaded_vmcs; + bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { unsigned nr; struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; @@ -176,6 +411,9 @@ struct vcpu_vmx { u32 exit_reason; bool rdtscp_enabled; + + /* Support for a guest hypervisor (nested VMX) */ + struct nested_vmx nested; }; enum segment_cache_field { @@ -192,6 +430,174 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_vmx, vcpu); } +#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) +#define FIELD(number, name) [number] = VMCS12_OFFSET(name) +#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ + [number##_HIGH] = VMCS12_OFFSET(name)+4 + +static unsigned short vmcs_field_to_offset_table[] = { + FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), + FIELD(GUEST_ES_SELECTOR, guest_es_selector), + FIELD(GUEST_CS_SELECTOR, guest_cs_selector), + FIELD(GUEST_SS_SELECTOR, guest_ss_selector), + FIELD(GUEST_DS_SELECTOR, guest_ds_selector), + FIELD(GUEST_FS_SELECTOR, guest_fs_selector), + FIELD(GUEST_GS_SELECTOR, guest_gs_selector), + FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), + FIELD(GUEST_TR_SELECTOR, guest_tr_selector), + FIELD(HOST_ES_SELECTOR, host_es_selector), + FIELD(HOST_CS_SELECTOR, host_cs_selector), + FIELD(HOST_SS_SELECTOR, host_ss_selector), + FIELD(HOST_DS_SELECTOR, host_ds_selector), + FIELD(HOST_FS_SELECTOR, host_fs_selector), + FIELD(HOST_GS_SELECTOR, host_gs_selector), + FIELD(HOST_TR_SELECTOR, host_tr_selector), + FIELD64(IO_BITMAP_A, io_bitmap_a), + FIELD64(IO_BITMAP_B, io_bitmap_b), + FIELD64(MSR_BITMAP, msr_bitmap), + FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), + FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), + FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), + FIELD64(TSC_OFFSET, tsc_offset), + FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), + FIELD64(APIC_ACCESS_ADDR, apic_access_addr), + FIELD64(EPT_POINTER, ept_pointer), + FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), + FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), + FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), + FIELD64(GUEST_IA32_PAT, guest_ia32_pat), + FIELD64(GUEST_IA32_EFER, guest_ia32_efer), + FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), + FIELD64(GUEST_PDPTR0, guest_pdptr0), + FIELD64(GUEST_PDPTR1, guest_pdptr1), + FIELD64(GUEST_PDPTR2, guest_pdptr2), + FIELD64(GUEST_PDPTR3, guest_pdptr3), + FIELD64(HOST_IA32_PAT, host_ia32_pat), + FIELD64(HOST_IA32_EFER, host_ia32_efer), + FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), + FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), + FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), + FIELD(EXCEPTION_BITMAP, exception_bitmap), + FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), + FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), + FIELD(CR3_TARGET_COUNT, cr3_target_count), + FIELD(VM_EXIT_CONTROLS, vm_exit_controls), + FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), + FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), + FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), + FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), + FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), + FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), + FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), + FIELD(TPR_THRESHOLD, tpr_threshold), + FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), + FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), + FIELD(VM_EXIT_REASON, vm_exit_reason), + FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), + FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), + FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), + FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), + FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), + FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), + FIELD(GUEST_ES_LIMIT, guest_es_limit), + FIELD(GUEST_CS_LIMIT, guest_cs_limit), + FIELD(GUEST_SS_LIMIT, guest_ss_limit), + FIELD(GUEST_DS_LIMIT, guest_ds_limit), + FIELD(GUEST_FS_LIMIT, guest_fs_limit), + FIELD(GUEST_GS_LIMIT, guest_gs_limit), + FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), + FIELD(GUEST_TR_LIMIT, guest_tr_limit), + FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), + FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), + FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), + FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), + FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), + FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), + FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), + FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), + FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), + FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), + FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), + FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), + FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), + FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), + FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), + FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), + FIELD(CR0_READ_SHADOW, cr0_read_shadow), + FIELD(CR4_READ_SHADOW, cr4_read_shadow), + FIELD(CR3_TARGET_VALUE0, cr3_target_value0), + FIELD(CR3_TARGET_VALUE1, cr3_target_value1), + FIELD(CR3_TARGET_VALUE2, cr3_target_value2), + FIELD(CR3_TARGET_VALUE3, cr3_target_value3), + FIELD(EXIT_QUALIFICATION, exit_qualification), + FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), + FIELD(GUEST_CR0, guest_cr0), + FIELD(GUEST_CR3, guest_cr3), + FIELD(GUEST_CR4, guest_cr4), + FIELD(GUEST_ES_BASE, guest_es_base), + FIELD(GUEST_CS_BASE, guest_cs_base), + FIELD(GUEST_SS_BASE, guest_ss_base), + FIELD(GUEST_DS_BASE, guest_ds_base), + FIELD(GUEST_FS_BASE, guest_fs_base), + FIELD(GUEST_GS_BASE, guest_gs_base), + FIELD(GUEST_LDTR_BASE, guest_ldtr_base), + FIELD(GUEST_TR_BASE, guest_tr_base), + FIELD(GUEST_GDTR_BASE, guest_gdtr_base), + FIELD(GUEST_IDTR_BASE, guest_idtr_base), + FIELD(GUEST_DR7, guest_dr7), + FIELD(GUEST_RSP, guest_rsp), + FIELD(GUEST_RIP, guest_rip), + FIELD(GUEST_RFLAGS, guest_rflags), + FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), + FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), + FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), + FIELD(HOST_CR0, host_cr0), + FIELD(HOST_CR3, host_cr3), + FIELD(HOST_CR4, host_cr4), + FIELD(HOST_FS_BASE, host_fs_base), + FIELD(HOST_GS_BASE, host_gs_base), + FIELD(HOST_TR_BASE, host_tr_base), + FIELD(HOST_GDTR_BASE, host_gdtr_base), + FIELD(HOST_IDTR_BASE, host_idtr_base), + FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), + FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), + FIELD(HOST_RSP, host_rsp), + FIELD(HOST_RIP, host_rip), +}; +static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table); + +static inline short vmcs_field_to_offset(unsigned long field) +{ + if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0) + return -1; + return vmcs_field_to_offset_table[field]; +} + +static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.current_vmcs12; +} + +static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) +{ + struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); + if (is_error_page(page)) { + kvm_release_page_clean(page); + return NULL; + } + return page; +} + +static void nested_release_page(struct page *page) +{ + kvm_release_page_dirty(page); +} + +static void nested_release_page_clean(struct page *page) +{ + kvm_release_page_clean(page); +} + static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); @@ -200,7 +606,11 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); -static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); +/* + * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed + * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. + */ +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); static DEFINE_PER_CPU(struct desc_ptr, host_gdt); static unsigned long *vmx_io_bitmap_a; @@ -442,6 +852,35 @@ static inline bool report_flexpriority(void) return flexpriority_enabled; } +static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) +{ + return vmcs12->cpu_based_vm_exec_control & bit; +} + +static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) +{ + return (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && + (vmcs12->secondary_vm_exec_control & bit); +} + +static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12, + struct kvm_vcpu *vcpu) +{ + return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; +} + +static inline bool is_exception(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); +} + +static void nested_vmx_vmexit(struct kvm_vcpu *vcpu); +static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12, + u32 reason, unsigned long qualification); + static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -501,6 +940,13 @@ static void vmcs_clear(struct vmcs *vmcs) vmcs, phys_addr); } +static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) +{ + vmcs_clear(loaded_vmcs->vmcs); + loaded_vmcs->cpu = -1; + loaded_vmcs->launched = 0; +} + static void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); @@ -510,29 +956,28 @@ static void vmcs_load(struct vmcs *vmcs) : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) - printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", + printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); } -static void __vcpu_clear(void *arg) +static void __loaded_vmcs_clear(void *arg) { - struct vcpu_vmx *vmx = arg; + struct loaded_vmcs *loaded_vmcs = arg; int cpu = raw_smp_processor_id(); - if (vmx->vcpu.cpu == cpu) - vmcs_clear(vmx->vmcs); - if (per_cpu(current_vmcs, cpu) == vmx->vmcs) + if (loaded_vmcs->cpu != cpu) + return; /* vcpu migration can race with cpu offline */ + if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; - list_del(&vmx->local_vcpus_link); - vmx->vcpu.cpu = -1; - vmx->launched = 0; + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); + loaded_vmcs_init(loaded_vmcs); } -static void vcpu_clear(struct vcpu_vmx *vmx) +static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) { - if (vmx->vcpu.cpu == -1) - return; - smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); + if (loaded_vmcs->cpu != -1) + smp_call_function_single( + loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1); } static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) @@ -585,26 +1030,26 @@ static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) } } -static unsigned long vmcs_readl(unsigned long field) +static __always_inline unsigned long vmcs_readl(unsigned long field) { - unsigned long value = 0; + unsigned long value; - asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) - : "+a"(value) : "d"(field) : "cc"); + asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") + : "=a"(value) : "d"(field) : "cc"); return value; } -static u16 vmcs_read16(unsigned long field) +static __always_inline u16 vmcs_read16(unsigned long field) { return vmcs_readl(field); } -static u32 vmcs_read32(unsigned long field) +static __always_inline u32 vmcs_read32(unsigned long field) { return vmcs_readl(field); } -static u64 vmcs_read64(unsigned long field) +static __always_inline u64 vmcs_read64(unsigned long field) { #ifdef CONFIG_X86_64 return vmcs_readl(field); @@ -731,6 +1176,15 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ if (vcpu->fpu_active) eb &= ~(1u << NM_VECTOR); + + /* When we are running a nested L2 guest and L1 specified for it a + * certain exception bitmap, we must trap the same exceptions and pass + * them to L1. When running L2, we will only handle the exceptions + * specified above if L1 did not want them. + */ + if (is_guest_mode(vcpu)) + eb |= get_vmcs12(vcpu)->exception_bitmap; + vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -971,22 +1425,22 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!vmm_exclusive) kvm_cpu_vmxon(phys_addr); - else if (vcpu->cpu != cpu) - vcpu_clear(vmx); + else if (vmx->loaded_vmcs->cpu != cpu) + loaded_vmcs_clear(vmx->loaded_vmcs); - if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { - per_cpu(current_vmcs, cpu) = vmx->vmcs; - vmcs_load(vmx->vmcs); + if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { + per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; + vmcs_load(vmx->loaded_vmcs->vmcs); } - if (vcpu->cpu != cpu) { + if (vmx->loaded_vmcs->cpu != cpu) { struct desc_ptr *gdt = &__get_cpu_var(host_gdt); unsigned long sysenter_esp; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); local_irq_disable(); - list_add(&vmx->local_vcpus_link, - &per_cpu(vcpus_on_cpu, cpu)); + list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, + &per_cpu(loaded_vmcss_on_cpu, cpu)); local_irq_enable(); /* @@ -998,6 +1452,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ + vmx->loaded_vmcs->cpu = cpu; } } @@ -1005,7 +1460,8 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { __vmx_load_host_state(to_vmx(vcpu)); if (!vmm_exclusive) { - __vcpu_clear(to_vmx(vcpu)); + __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); + vcpu->cpu = -1; kvm_cpu_vmxoff(); } } @@ -1023,19 +1479,55 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_CR0, cr0); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + if (is_guest_mode(vcpu)) + vcpu->arch.cr0_guest_owned_bits &= + ~get_vmcs12(vcpu)->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); +/* + * Return the cr0 value that a nested guest would read. This is a combination + * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by + * its hypervisor (cr0_read_shadow). + */ +static inline unsigned long nested_read_cr0(struct vmcs12 *fields) +{ + return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | + (fields->cr0_read_shadow & fields->cr0_guest_host_mask); +} +static inline unsigned long nested_read_cr4(struct vmcs12 *fields) +{ + return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | + (fields->cr4_read_shadow & fields->cr4_guest_host_mask); +} + static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { + /* Note that there is no vcpu->fpu_active = 0 here. The caller must + * set this *before* calling this function. + */ vmx_decache_cr0_guest_bits(vcpu); vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits = 0; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); - vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); + if (is_guest_mode(vcpu)) { + /* + * L1's specified read shadow might not contain the TS bit, + * so now that we turned on shadowing of this bit, we need to + * set this bit of the shadow. Like in nested_vmx_run we need + * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet + * up-to-date here because we just decached cr0.TS (and we'll + * only update vmcs12->guest_cr0 on nested exit). + */ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) | + (vcpu->arch.cr0 & X86_CR0_TS); + vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); + } else + vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) @@ -1119,6 +1611,25 @@ static void vmx_clear_hlt(struct kvm_vcpu *vcpu) vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); } +/* + * KVM wants to inject page-faults which it got to the guest. This function + * checks whether in a nested guest, we need to inject them to L1 or L2. + * This function assumes it is called with the exit reason in vmcs02 being + * a #PF exception (this is the only case in which KVM injects a #PF when L2 + * is running). + */ +static int nested_pf_handled(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ + if (!(vmcs12->exception_bitmap & PF_VECTOR)) + return 0; + + nested_vmx_vmexit(vcpu); + return 1; +} + static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code, bool reinject) @@ -1126,6 +1637,10 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info = nr | INTR_INFO_VALID_MASK; + if (nr == PF_VECTOR && is_guest_mode(vcpu) && + nested_pf_handled(vcpu)) + return; + if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; @@ -1248,12 +1763,24 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { vmcs_write64(TSC_OFFSET, offset); + if (is_guest_mode(vcpu)) + /* + * We're here if L1 chose not to trap the TSC MSR. Since + * prepare_vmcs12() does not copy tsc_offset, we need to also + * set the vmcs12 field here. + */ + get_vmcs12(vcpu)->tsc_offset = offset - + to_vmx(vcpu)->nested.vmcs01_tsc_offset; } static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) { u64 offset = vmcs_read64(TSC_OFFSET); vmcs_write64(TSC_OFFSET, offset + adjustment); + if (is_guest_mode(vcpu)) { + /* Even when running L2, the adjustment needs to apply to L1 */ + to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; + } } static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) @@ -1261,6 +1788,236 @@ static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) return target_tsc - native_read_tsc(); } +static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0); + return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31))); +} + +/* + * nested_vmx_allowed() checks whether a guest should be allowed to use VMX + * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for + * all guests if the "nested" module option is off, and can also be disabled + * for a single guest by disabling its VMX cpuid bit. + */ +static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) +{ + return nested && guest_cpuid_has_vmx(vcpu); +} + +/* + * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be + * returned for the various VMX controls MSRs when nested VMX is enabled. + * The same values should also be used to verify that vmcs12 control fields are + * valid during nested entry from L1 to L2. + * Each of these control msrs has a low and high 32-bit half: A low bit is on + * if the corresponding bit in the (32-bit) control field *must* be on, and a + * bit in the high half is on if the corresponding bit in the control field + * may be on. See also vmx_control_verify(). + * TODO: allow these variables to be modified (downgraded) by module options + * or other means. + */ +static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; +static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; +static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; +static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; +static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; +static __init void nested_vmx_setup_ctls_msrs(void) +{ + /* + * Note that as a general rule, the high half of the MSRs (bits in + * the control fields which may be 1) should be initialized by the + * intersection of the underlying hardware's MSR (i.e., features which + * can be supported) and the list of features we want to expose - + * because they are known to be properly supported in our code. + * Also, usually, the low half of the MSRs (bits which must be 1) can + * be set to 0, meaning that L1 may turn off any of these bits. The + * reason is that if one of these bits is necessary, it will appear + * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control + * fields of vmcs01 and vmcs02, will turn these bits off - and + * nested_vmx_exit_handled() will not pass related exits to L1. + * These rules have exceptions below. + */ + + /* pin-based controls */ + /* + * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is + * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR. + */ + nested_vmx_pinbased_ctls_low = 0x16 ; + nested_vmx_pinbased_ctls_high = 0x16 | + PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | + PIN_BASED_VIRTUAL_NMIS; + + /* exit controls */ + nested_vmx_exit_ctls_low = 0; + /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */ +#ifdef CONFIG_X86_64 + nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE; +#else + nested_vmx_exit_ctls_high = 0; +#endif + + /* entry controls */ + rdmsr(MSR_IA32_VMX_ENTRY_CTLS, + nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); + nested_vmx_entry_ctls_low = 0; + nested_vmx_entry_ctls_high &= + VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE; + + /* cpu-based controls */ + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, + nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); + nested_vmx_procbased_ctls_low = 0; + nested_vmx_procbased_ctls_high &= + CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | + CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | +#ifdef CONFIG_X86_64 + CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | +#endif + CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | + CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; + /* + * We can allow some features even when not supported by the + * hardware. For example, L1 can specify an MSR bitmap - and we + * can use it to avoid exits to L1 - even when L0 runs L2 + * without MSR bitmaps. + */ + nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS; + + /* secondary cpu-based controls */ + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high); + nested_vmx_secondary_ctls_low = 0; + nested_vmx_secondary_ctls_high &= + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; +} + +static inline bool vmx_control_verify(u32 control, u32 low, u32 high) +{ + /* + * Bits 0 in high must be 0, and bits 1 in low must be 1. + */ + return ((control & high) | low) == control; +} + +static inline u64 vmx_control_msr(u32 low, u32 high) +{ + return low | ((u64)high << 32); +} + +/* + * If we allow our guest to use VMX instructions (i.e., nested VMX), we should + * also let it use VMX-specific MSRs. + * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a + * VMX-specific MSR, or 0 when we haven't (and the caller should handle it + * like all other MSRs). + */ +static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) +{ + if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC && + msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) { + /* + * According to the spec, processors which do not support VMX + * should throw a #GP(0) when VMX capability MSRs are read. + */ + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); + return 1; + } + + switch (msr_index) { + case MSR_IA32_FEATURE_CONTROL: + *pdata = 0; + break; + case MSR_IA32_VMX_BASIC: + /* + * This MSR reports some information about VMX support. We + * should return information about the VMX we emulate for the + * guest, and the VMCS structure we give it - not about the + * VMX support of the underlying hardware. + */ + *pdata = VMCS12_REVISION | + ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | + (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); + break; + case MSR_IA32_VMX_TRUE_PINBASED_CTLS: + case MSR_IA32_VMX_PINBASED_CTLS: + *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low, + nested_vmx_pinbased_ctls_high); + break; + case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: + case MSR_IA32_VMX_PROCBASED_CTLS: + *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, + nested_vmx_procbased_ctls_high); + break; + case MSR_IA32_VMX_TRUE_EXIT_CTLS: + case MSR_IA32_VMX_EXIT_CTLS: + *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, + nested_vmx_exit_ctls_high); + break; + case MSR_IA32_VMX_TRUE_ENTRY_CTLS: + case MSR_IA32_VMX_ENTRY_CTLS: + *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, + nested_vmx_entry_ctls_high); + break; + case MSR_IA32_VMX_MISC: + *pdata = 0; + break; + /* + * These MSRs specify bits which the guest must keep fixed (on or off) + * while L1 is in VMXON mode (in L1's root mode, or running an L2). + * We picked the standard core2 setting. + */ +#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) +#define VMXON_CR4_ALWAYSON X86_CR4_VMXE + case MSR_IA32_VMX_CR0_FIXED0: + *pdata = VMXON_CR0_ALWAYSON; + break; + case MSR_IA32_VMX_CR0_FIXED1: + *pdata = -1ULL; + break; + case MSR_IA32_VMX_CR4_FIXED0: + *pdata = VMXON_CR4_ALWAYSON; + break; + case MSR_IA32_VMX_CR4_FIXED1: + *pdata = -1ULL; + break; + case MSR_IA32_VMX_VMCS_ENUM: + *pdata = 0x1f; + break; + case MSR_IA32_VMX_PROCBASED_CTLS2: + *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, + nested_vmx_secondary_ctls_high); + break; + case MSR_IA32_VMX_EPT_VPID_CAP: + /* Currently, no nested ept or nested vpid */ + *pdata = 0; + break; + default: + return 0; + } + + return 1; +} + +static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) +{ + if (!nested_vmx_allowed(vcpu)) + return 0; + + if (msr_index == MSR_IA32_FEATURE_CONTROL) + /* TODO: the right thing. */ + return 1; + /* + * No need to treat VMX capability MSRs specially: If we don't handle + * them, handle_wrmsr will #GP(0), which is correct (they are readonly) + */ + return 0; +} + /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. @@ -1309,6 +2066,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) /* Otherwise falls through */ default: vmx_load_host_state(to_vmx(vcpu)); + if (vmx_get_vmx_msr(vcpu, msr_index, pdata)) + return 0; msr = find_msr_entry(to_vmx(vcpu), msr_index); if (msr) { vmx_load_host_state(to_vmx(vcpu)); @@ -1380,6 +2139,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) return 1; /* Otherwise falls through */ default: + if (vmx_set_vmx_msr(vcpu, msr_index, data)) + break; msr = find_msr_entry(vmx, msr_index); if (msr) { vmx_load_host_state(vmx); @@ -1469,7 +2230,7 @@ static int hardware_enable(void *garbage) if (read_cr4() & X86_CR4_VMXE) return -EBUSY; - INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; @@ -1493,14 +2254,14 @@ static int hardware_enable(void *garbage) return 0; } -static void vmclear_local_vcpus(void) +static void vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); - struct vcpu_vmx *vmx, *n; + struct loaded_vmcs *v, *n; - list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), - local_vcpus_link) - __vcpu_clear(vmx); + list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), + loaded_vmcss_on_cpu_link) + __loaded_vmcs_clear(v); } @@ -1515,7 +2276,7 @@ static void kvm_cpu_vmxoff(void) static void hardware_disable(void *garbage) { if (vmm_exclusive) { - vmclear_local_vcpus(); + vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); } write_cr4(read_cr4() & ~X86_CR4_VMXE); @@ -1696,6 +2457,18 @@ static void free_vmcs(struct vmcs *vmcs) free_pages((unsigned long)vmcs, vmcs_config.order); } +/* + * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded + */ +static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) +{ + if (!loaded_vmcs->vmcs) + return; + loaded_vmcs_clear(loaded_vmcs); + free_vmcs(loaded_vmcs->vmcs); + loaded_vmcs->vmcs = NULL; +} + static void free_kvm_area(void) { int cpu; @@ -1756,6 +2529,9 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_ple()) ple_gap = 0; + if (nested) + nested_vmx_setup_ctls_msrs(); + return alloc_kvm_area(); } @@ -2041,7 +2817,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu) (unsigned long *)&vcpu->arch.regs_dirty); } -static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, @@ -2139,11 +2915,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) vmcs_writel(GUEST_CR3, guest_cr3); } -static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); + if (cr4 & X86_CR4_VMXE) { + /* + * To use VMXON (and later other VMX instructions), a guest + * must first be able to turn on cr4.VMXE (see handle_vmon()). + * So basically the check on whether to allow nested VMX + * is here. + */ + if (!nested_vmx_allowed(vcpu)) + return 1; + } else if (to_vmx(vcpu)->nested.vmxon) + return 1; + vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { @@ -2156,6 +2944,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); + return 0; } static void vmx_get_segment(struct kvm_vcpu *vcpu, @@ -2721,18 +3510,110 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) } /* + * Set up the vmcs's constant host-state fields, i.e., host-state fields that + * will not change in the lifetime of the guest. + * Note that host-state that does change is set elsewhere. E.g., host-state + * that is set differently for each CPU is set in vmx_vcpu_load(), not here. + */ +static void vmx_set_constant_host_state(void) +{ + u32 low32, high32; + unsigned long tmpl; + struct desc_ptr dt; + + vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */ + vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ + + vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ + vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ + vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ + vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ + vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ + + native_store_idt(&dt); + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + + asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl)); + vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */ + + rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); + vmcs_write32(HOST_IA32_SYSENTER_CS, low32); + rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); + vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ + + if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { + rdmsr(MSR_IA32_CR_PAT, low32, high32); + vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); + } +} + +static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) +{ + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; + if (enable_ept) + vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; + if (is_guest_mode(&vmx->vcpu)) + vmx->vcpu.arch.cr4_guest_owned_bits &= + ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; + vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); +} + +static u32 vmx_exec_control(struct vcpu_vmx *vmx) +{ + u32 exec_control = vmcs_config.cpu_based_exec_ctrl; + if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { + exec_control &= ~CPU_BASED_TPR_SHADOW; +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_STORE_EXITING | + CPU_BASED_CR8_LOAD_EXITING; +#endif + } + if (!enable_ept) + exec_control |= CPU_BASED_CR3_STORE_EXITING | + CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_INVLPG_EXITING; + return exec_control; +} + +static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) +{ + u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; + if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) + exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + if (vmx->vpid == 0) + exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; + if (!enable_ept) { + exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; + enable_unrestricted_guest = 0; + } + if (!enable_unrestricted_guest) + exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; + if (!ple_gap) + exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; + return exec_control; +} + +static void ept_set_mmio_spte_mask(void) +{ + /* + * EPT Misconfigurations can be generated if the value of bits 2:0 + * of an EPT paging-structure entry is 110b (write/execute). + * Also, magic bits (0xffull << 49) is set to quickly identify mmio + * spte. + */ + kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull); +} + +/* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { - u32 host_sysenter_cs, msr_low, msr_high; - u32 junk; - u64 host_pat; +#ifdef CONFIG_X86_64 unsigned long a; - struct desc_ptr dt; +#endif int i; - unsigned long kvm_vmx_return; - u32 exec_control; /* I/O */ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); @@ -2747,36 +3628,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmcs_config.pin_based_exec_ctrl); - exec_control = vmcs_config.cpu_based_exec_ctrl; - if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { - exec_control &= ~CPU_BASED_TPR_SHADOW; -#ifdef CONFIG_X86_64 - exec_control |= CPU_BASED_CR8_STORE_EXITING | - CPU_BASED_CR8_LOAD_EXITING; -#endif - } - if (!enable_ept) - exec_control |= CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_INVLPG_EXITING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) { - exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; - if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) - exec_control &= - ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - if (vmx->vpid == 0) - exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; - if (!enable_ept) { - exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; - enable_unrestricted_guest = 0; - } - if (!enable_unrestricted_guest) - exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; - if (!ple_gap) - exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + vmx_secondary_exec_control(vmx)); } if (ple_gap) { @@ -2784,20 +3640,13 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(PLE_WINDOW, ple_window); } - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ - vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */ - vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ - vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ - - vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ - vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ - vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ - vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ + vmx_set_constant_host_state(); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ @@ -2808,32 +3657,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif - vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ - - native_store_idt(&dt); - vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ - - asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); - rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); - vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); - rdmsrl(MSR_IA32_SYSENTER_ESP, a); - vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */ - rdmsrl(MSR_IA32_SYSENTER_EIP, a); - vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ - - if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { - rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); - host_pat = msr_low | ((u64) msr_high << 32); - vmcs_write64(HOST_IA32_PAT, host_pat); - } if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { + u32 msr_low, msr_high; + u64 host_pat; rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); /* Write the default value follow host pat */ @@ -2863,10 +3695,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); - vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; - if (enable_ept) - vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; - vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); + set_cr4_guest_host_mask(vmx); kvm_write_tsc(&vmx->vcpu, 0); @@ -2990,9 +3819,25 @@ out: return ret; } +/* + * In nested virtualization, check if L1 asked to exit on external interrupts. + * For most existing hypervisors, this will always return true. + */ +static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) +{ + return get_vmcs12(vcpu)->pin_based_vm_exec_control & + PIN_BASED_EXT_INTR_MASK; +} + static void enable_irq_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; + if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) + /* We can get here when nested_run_pending caused + * vmx_interrupt_allowed() to return false. In this case, do + * nothing - the interrupt will be injected later. + */ + return; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; @@ -3049,6 +3894,9 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (is_guest_mode(vcpu)) + return; + if (!cpu_has_virtual_nmis()) { /* * Tracking the NMI-blocked state in software is built upon @@ -3115,6 +3963,17 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { + if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { + struct vmcs12 *vmcs12; + if (to_vmx(vcpu)->nested.nested_run_pending) + return 0; + nested_vmx_vmexit(vcpu); + vmcs12 = get_vmcs12(vcpu); + vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; + vmcs12->vm_exit_intr_info = 0; + /* fall through to normal code, but now in L1, not L2 */ + } + return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); @@ -3356,6 +4215,58 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[2] = 0xc1; } +/* called to set cr0 as approriate for a mov-to-cr0 exit. */ +static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) +{ + if (to_vmx(vcpu)->nested.vmxon && + ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) + return 1; + + if (is_guest_mode(vcpu)) { + /* + * We get here when L2 changed cr0 in a way that did not change + * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), + * but did change L0 shadowed bits. This can currently happen + * with the TS bit: L0 may want to leave TS on (for lazy fpu + * loading) while pretending to allow the guest to change it. + */ + if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) | + (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits))) + return 1; + vmcs_writel(CR0_READ_SHADOW, val); + return 0; + } else + return kvm_set_cr0(vcpu, val); +} + +static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) +{ + if (is_guest_mode(vcpu)) { + if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) | + (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits))) + return 1; + vmcs_writel(CR4_READ_SHADOW, val); + return 0; + } else + return kvm_set_cr4(vcpu, val); +} + +/* called to set cr0 as approriate for clts instruction exit. */ +static void handle_clts(struct kvm_vcpu *vcpu) +{ + if (is_guest_mode(vcpu)) { + /* + * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS + * but we did (!fpu_active). We need to keep GUEST_CR0.TS on, + * just pretend it's off (also in arch.cr0 for fpu_activate). + */ + vmcs_writel(CR0_READ_SHADOW, + vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); + vcpu->arch.cr0 &= ~X86_CR0_TS; + } else + vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); +} + static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; @@ -3372,7 +4283,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) trace_kvm_cr_write(cr, val); switch (cr) { case 0: - err = kvm_set_cr0(vcpu, val); + err = handle_set_cr0(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 3: @@ -3380,7 +4291,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) kvm_complete_insn_gp(vcpu, err); return 1; case 4: - err = kvm_set_cr4(vcpu, val); + err = handle_set_cr4(vcpu, val); kvm_complete_insn_gp(vcpu, err); return 1; case 8: { @@ -3398,7 +4309,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) }; break; case 2: /* clts */ - vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); + handle_clts(vcpu); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); vmx_fpu_activate(vcpu); @@ -3574,12 +4485,6 @@ static int handle_vmcall(struct kvm_vcpu *vcpu) return 1; } -static int handle_vmx_insn(struct kvm_vcpu *vcpu) -{ - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; -} - static int handle_invd(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0) == EMULATE_DONE; @@ -3777,11 +4682,19 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { u64 sptes[4]; - int nr_sptes, i; + int nr_sptes, i, ret; gpa_t gpa; gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + ret = handle_mmio_page_fault_common(vcpu, gpa, true); + if (likely(ret == 1)) + return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) == + EMULATE_DONE; + if (unlikely(!ret)) + return 1; + + /* It is the real ept misconfig */ printk(KERN_ERR "EPT: Misconfiguration.\n"); printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); @@ -3866,6 +4779,639 @@ static int handle_invalid_op(struct kvm_vcpu *vcpu) } /* + * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. + * We could reuse a single VMCS for all the L2 guests, but we also want the + * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this + * allows keeping them loaded on the processor, and in the future will allow + * optimizations where prepare_vmcs02 doesn't need to set all the fields on + * every entry if they never change. + * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE + * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. + * + * The following functions allocate and free a vmcs02 in this pool. + */ + +/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ +static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) +{ + struct vmcs02_list *item; + list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) + if (item->vmptr == vmx->nested.current_vmptr) { + list_move(&item->list, &vmx->nested.vmcs02_pool); + return &item->vmcs02; + } + + if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { + /* Recycle the least recently used VMCS. */ + item = list_entry(vmx->nested.vmcs02_pool.prev, + struct vmcs02_list, list); + item->vmptr = vmx->nested.current_vmptr; + list_move(&item->list, &vmx->nested.vmcs02_pool); + return &item->vmcs02; + } + + /* Create a new VMCS */ + item = (struct vmcs02_list *) + kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); + if (!item) + return NULL; + item->vmcs02.vmcs = alloc_vmcs(); + if (!item->vmcs02.vmcs) { + kfree(item); + return NULL; + } + loaded_vmcs_init(&item->vmcs02); + item->vmptr = vmx->nested.current_vmptr; + list_add(&(item->list), &(vmx->nested.vmcs02_pool)); + vmx->nested.vmcs02_num++; + return &item->vmcs02; +} + +/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ +static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) +{ + struct vmcs02_list *item; + list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) + if (item->vmptr == vmptr) { + free_loaded_vmcs(&item->vmcs02); + list_del(&item->list); + kfree(item); + vmx->nested.vmcs02_num--; + return; + } +} + +/* + * Free all VMCSs saved for this vcpu, except the one pointed by + * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one + * currently used, if running L2), and vmcs01 when running L2. + */ +static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) +{ + struct vmcs02_list *item, *n; + list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { + if (vmx->loaded_vmcs != &item->vmcs02) + free_loaded_vmcs(&item->vmcs02); + list_del(&item->list); + kfree(item); + } + vmx->nested.vmcs02_num = 0; + + if (vmx->loaded_vmcs != &vmx->vmcs01) + free_loaded_vmcs(&vmx->vmcs01); +} + +/* + * Emulate the VMXON instruction. + * Currently, we just remember that VMX is active, and do not save or even + * inspect the argument to VMXON (the so-called "VMXON pointer") because we + * do not currently need to store anything in that guest-allocated memory + * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their + * argument is different from the VMXON pointer (which the spec says they do). + */ +static int handle_vmon(struct kvm_vcpu *vcpu) +{ + struct kvm_segment cs; + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* The Intel VMX Instruction Reference lists a bunch of bits that + * are prerequisite to running VMXON, most notably cr4.VMXE must be + * set to 1 (see vmx_set_cr4() for when we allow the guest to set this). + * Otherwise, we should fail with #UD. We test these now: + */ + if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) || + !kvm_read_cr0_bits(vcpu, X86_CR0_PE) || + (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); + if (is_long_mode(vcpu) && !cs.l) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + if (vmx_get_cpl(vcpu)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); + vmx->nested.vmcs02_num = 0; + + vmx->nested.vmxon = true; + + skip_emulated_instruction(vcpu); + return 1; +} + +/* + * Intel's VMX Instruction Reference specifies a common set of prerequisites + * for running VMX instructions (except VMXON, whose prerequisites are + * slightly different). It also specifies what exception to inject otherwise. + */ +static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) +{ + struct kvm_segment cs; + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.vmxon) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + + vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); + if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) || + (is_long_mode(vcpu) && !cs.l)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + + if (vmx_get_cpl(vcpu)) { + kvm_inject_gp(vcpu, 0); + return 0; + } + + return 1; +} + +/* + * Free whatever needs to be freed from vmx->nested when L1 goes down, or + * just stops using VMX. + */ +static void free_nested(struct vcpu_vmx *vmx) +{ + if (!vmx->nested.vmxon) + return; + vmx->nested.vmxon = false; + if (vmx->nested.current_vmptr != -1ull) { + kunmap(vmx->nested.current_vmcs12_page); + nested_release_page(vmx->nested.current_vmcs12_page); + vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmcs12 = NULL; + } + /* Unpin physical memory we referred to in current vmcs02 */ + if (vmx->nested.apic_access_page) { + nested_release_page(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = 0; + } + + nested_free_all_saved_vmcss(vmx); +} + +/* Emulate the VMXOFF instruction */ +static int handle_vmoff(struct kvm_vcpu *vcpu) +{ + if (!nested_vmx_check_permission(vcpu)) + return 1; + free_nested(to_vmx(vcpu)); + skip_emulated_instruction(vcpu); + return 1; +} + +/* + * Decode the memory-address operand of a vmx instruction, as recorded on an + * exit caused by such an instruction (run by a guest hypervisor). + * On success, returns 0. When the operand is invalid, returns 1 and throws + * #UD or #GP. + */ +static int get_vmx_mem_address(struct kvm_vcpu *vcpu, + unsigned long exit_qualification, + u32 vmx_instruction_info, gva_t *ret) +{ + /* + * According to Vol. 3B, "Information for VM Exits Due to Instruction + * Execution", on an exit, vmx_instruction_info holds most of the + * addressing components of the operand. Only the displacement part + * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). + * For how an actual address is calculated from all these components, + * refer to Vol. 1, "Operand Addressing". + */ + int scaling = vmx_instruction_info & 3; + int addr_size = (vmx_instruction_info >> 7) & 7; + bool is_reg = vmx_instruction_info & (1u << 10); + int seg_reg = (vmx_instruction_info >> 15) & 7; + int index_reg = (vmx_instruction_info >> 18) & 0xf; + bool index_is_valid = !(vmx_instruction_info & (1u << 22)); + int base_reg = (vmx_instruction_info >> 23) & 0xf; + bool base_is_valid = !(vmx_instruction_info & (1u << 27)); + + if (is_reg) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + /* Addr = segment_base + offset */ + /* offset = base + [index * scale] + displacement */ + *ret = vmx_get_segment_base(vcpu, seg_reg); + if (base_is_valid) + *ret += kvm_register_read(vcpu, base_reg); + if (index_is_valid) + *ret += kvm_register_read(vcpu, index_reg)<<scaling; + *ret += exit_qualification; /* holds the displacement */ + + if (addr_size == 1) /* 32 bit */ + *ret &= 0xffffffff; + + /* + * TODO: throw #GP (and return 1) in various cases that the VM* + * instructions require it - e.g., offset beyond segment limit, + * unusable or unreadable/unwritable segment, non-canonical 64-bit + * address, and so on. Currently these are not checked. + */ + return 0; +} + +/* + * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), + * set the success or error code of an emulated VMX instruction, as specified + * by Vol 2B, VMX Instruction Reference, "Conventions". + */ +static void nested_vmx_succeed(struct kvm_vcpu *vcpu) +{ + vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); +} + +static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) +{ + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | + X86_EFLAGS_SF | X86_EFLAGS_OF)) + | X86_EFLAGS_CF); +} + +static void nested_vmx_failValid(struct kvm_vcpu *vcpu, + u32 vm_instruction_error) +{ + if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { + /* + * failValid writes the error number to the current VMCS, which + * can't be done there isn't a current VMCS. + */ + nested_vmx_failInvalid(vcpu); + return; + } + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) + & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_SF | X86_EFLAGS_OF)) + | X86_EFLAGS_ZF); + get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; +} + +/* Emulate the VMCLEAR instruction */ +static int handle_vmclear(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + gva_t gva; + gpa_t vmptr; + struct vmcs12 *vmcs12; + struct page *page; + struct x86_exception e; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) + return 1; + + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, + sizeof(vmptr), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + if (!IS_ALIGNED(vmptr, PAGE_SIZE)) { + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); + skip_emulated_instruction(vcpu); + return 1; + } + + if (vmptr == vmx->nested.current_vmptr) { + kunmap(vmx->nested.current_vmcs12_page); + nested_release_page(vmx->nested.current_vmcs12_page); + vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmcs12 = NULL; + } + + page = nested_get_page(vcpu, vmptr); + if (page == NULL) { + /* + * For accurate processor emulation, VMCLEAR beyond available + * physical memory should do nothing at all. However, it is + * possible that a nested vmx bug, not a guest hypervisor bug, + * resulted in this case, so let's shut down before doing any + * more damage: + */ + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + return 1; + } + vmcs12 = kmap(page); + vmcs12->launch_state = 0; + kunmap(page); + nested_release_page(page); + + nested_free_vmcs02(vmx, vmptr); + + skip_emulated_instruction(vcpu); + nested_vmx_succeed(vcpu); + return 1; +} + +static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); + +/* Emulate the VMLAUNCH instruction */ +static int handle_vmlaunch(struct kvm_vcpu *vcpu) +{ + return nested_vmx_run(vcpu, true); +} + +/* Emulate the VMRESUME instruction */ +static int handle_vmresume(struct kvm_vcpu *vcpu) +{ + + return nested_vmx_run(vcpu, false); +} + +enum vmcs_field_type { + VMCS_FIELD_TYPE_U16 = 0, + VMCS_FIELD_TYPE_U64 = 1, + VMCS_FIELD_TYPE_U32 = 2, + VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 +}; + +static inline int vmcs_field_type(unsigned long field) +{ + if (0x1 & field) /* the *_HIGH fields are all 32 bit */ + return VMCS_FIELD_TYPE_U32; + return (field >> 13) & 0x3 ; +} + +static inline int vmcs_field_readonly(unsigned long field) +{ + return (((field >> 10) & 0x3) == 1); +} + +/* + * Read a vmcs12 field. Since these can have varying lengths and we return + * one type, we chose the biggest type (u64) and zero-extend the return value + * to that size. Note that the caller, handle_vmread, might need to use only + * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of + * 64-bit fields are to be returned). + */ +static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu, + unsigned long field, u64 *ret) +{ + short offset = vmcs_field_to_offset(field); + char *p; + + if (offset < 0) + return 0; + + p = ((char *)(get_vmcs12(vcpu))) + offset; + + switch (vmcs_field_type(field)) { + case VMCS_FIELD_TYPE_NATURAL_WIDTH: + *ret = *((natural_width *)p); + return 1; + case VMCS_FIELD_TYPE_U16: + *ret = *((u16 *)p); + return 1; + case VMCS_FIELD_TYPE_U32: + *ret = *((u32 *)p); + return 1; + case VMCS_FIELD_TYPE_U64: + *ret = *((u64 *)p); + return 1; + default: + return 0; /* can never happen. */ + } +} + +/* + * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was + * used before) all generate the same failure when it is missing. + */ +static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + if (vmx->nested.current_vmptr == -1ull) { + nested_vmx_failInvalid(vcpu); + skip_emulated_instruction(vcpu); + return 0; + } + return 1; +} + +static int handle_vmread(struct kvm_vcpu *vcpu) +{ + unsigned long field; + u64 field_value; + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gva_t gva = 0; + + if (!nested_vmx_check_permission(vcpu) || + !nested_vmx_check_vmcs12(vcpu)) + return 1; + + /* Decode instruction info and find the field to read */ + field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + /* Read the field, zero-extended to a u64 field_value */ + if (!vmcs12_read_any(vcpu, field, &field_value)) { + nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + /* + * Now copy part of this value to register or memory, as requested. + * Note that the number of bits actually copied is 32 or 64 depending + * on the guest's mode (32 or 64 bit), not on the given field's length. + */ + if (vmx_instruction_info & (1u << 10)) { + kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf), + field_value); + } else { + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, &gva)) + return 1; + /* _system ok, as nested_vmx_check_permission verified cpl=0 */ + kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, + &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); + } + + nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + + +static int handle_vmwrite(struct kvm_vcpu *vcpu) +{ + unsigned long field; + gva_t gva; + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + char *p; + short offset; + /* The value to write might be 32 or 64 bits, depending on L1's long + * mode, and eventually we need to write that into a field of several + * possible lengths. The code below first zero-extends the value to 64 + * bit (field_value), and then copies only the approriate number of + * bits into the vmcs12 field. + */ + u64 field_value = 0; + struct x86_exception e; + + if (!nested_vmx_check_permission(vcpu) || + !nested_vmx_check_vmcs12(vcpu)) + return 1; + + if (vmx_instruction_info & (1u << 10)) + field_value = kvm_register_read(vcpu, + (((vmx_instruction_info) >> 3) & 0xf)); + else { + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, &gva)) + return 1; + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, + &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + } + + + field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + if (vmcs_field_readonly(field)) { + nested_vmx_failValid(vcpu, + VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + + offset = vmcs_field_to_offset(field); + if (offset < 0) { + nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + p = ((char *) get_vmcs12(vcpu)) + offset; + + switch (vmcs_field_type(field)) { + case VMCS_FIELD_TYPE_U16: + *(u16 *)p = field_value; + break; + case VMCS_FIELD_TYPE_U32: + *(u32 *)p = field_value; + break; + case VMCS_FIELD_TYPE_U64: + *(u64 *)p = field_value; + break; + case VMCS_FIELD_TYPE_NATURAL_WIDTH: + *(natural_width *)p = field_value; + break; + default: + nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + + nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + +/* Emulate the VMPTRLD instruction */ +static int handle_vmptrld(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + gva_t gva; + gpa_t vmptr; + struct x86_exception e; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), + vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) + return 1; + + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, + sizeof(vmptr), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + + if (!IS_ALIGNED(vmptr, PAGE_SIZE)) { + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); + skip_emulated_instruction(vcpu); + return 1; + } + + if (vmx->nested.current_vmptr != vmptr) { + struct vmcs12 *new_vmcs12; + struct page *page; + page = nested_get_page(vcpu, vmptr); + if (page == NULL) { + nested_vmx_failInvalid(vcpu); + skip_emulated_instruction(vcpu); + return 1; + } + new_vmcs12 = kmap(page); + if (new_vmcs12->revision_id != VMCS12_REVISION) { + kunmap(page); + nested_release_page_clean(page); + nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); + skip_emulated_instruction(vcpu); + return 1; + } + if (vmx->nested.current_vmptr != -1ull) { + kunmap(vmx->nested.current_vmcs12_page); + nested_release_page(vmx->nested.current_vmcs12_page); + } + + vmx->nested.current_vmptr = vmptr; + vmx->nested.current_vmcs12 = new_vmcs12; + vmx->nested.current_vmcs12_page = page; + } + + nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + +/* Emulate the VMPTRST instruction */ +static int handle_vmptrst(struct kvm_vcpu *vcpu) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gva_t vmcs_gva; + struct x86_exception e; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, &vmcs_gva)) + return 1; + /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ + if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, + (void *)&to_vmx(vcpu)->nested.current_vmptr, + sizeof(u64), &e)) { + kvm_inject_page_fault(vcpu, &e); + return 1; + } + nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + +/* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. @@ -3886,15 +5432,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_VMCALL] = handle_vmcall, - [EXIT_REASON_VMCLEAR] = handle_vmx_insn, - [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, - [EXIT_REASON_VMPTRLD] = handle_vmx_insn, - [EXIT_REASON_VMPTRST] = handle_vmx_insn, - [EXIT_REASON_VMREAD] = handle_vmx_insn, - [EXIT_REASON_VMRESUME] = handle_vmx_insn, - [EXIT_REASON_VMWRITE] = handle_vmx_insn, - [EXIT_REASON_VMOFF] = handle_vmx_insn, - [EXIT_REASON_VMON] = handle_vmx_insn, + [EXIT_REASON_VMCLEAR] = handle_vmclear, + [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, + [EXIT_REASON_VMPTRLD] = handle_vmptrld, + [EXIT_REASON_VMPTRST] = handle_vmptrst, + [EXIT_REASON_VMREAD] = handle_vmread, + [EXIT_REASON_VMRESUME] = handle_vmresume, + [EXIT_REASON_VMWRITE] = handle_vmwrite, + [EXIT_REASON_VMOFF] = handle_vmoff, + [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_WBINVD] = handle_wbinvd, @@ -3911,6 +5457,229 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); +/* + * Return 1 if we should exit from L2 to L1 to handle an MSR access access, + * rather than handle it ourselves in L0. I.e., check whether L1 expressed + * disinterest in the current event (read or write a specific MSR) by using an + * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. + */ +static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12, u32 exit_reason) +{ + u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; + gpa_t bitmap; + + if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS)) + return 1; + + /* + * The MSR_BITMAP page is divided into four 1024-byte bitmaps, + * for the four combinations of read/write and low/high MSR numbers. + * First we need to figure out which of the four to use: + */ + bitmap = vmcs12->msr_bitmap; + if (exit_reason == EXIT_REASON_MSR_WRITE) + bitmap += 2048; + if (msr_index >= 0xc0000000) { + msr_index -= 0xc0000000; + bitmap += 1024; + } + + /* Then read the msr_index'th bit from this bitmap: */ + if (msr_index < 1024*8) { + unsigned char b; + kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1); + return 1 & (b >> (msr_index & 7)); + } else + return 1; /* let L1 handle the wrong parameter */ +} + +/* + * Return 1 if we should exit from L2 to L1 to handle a CR access exit, + * rather than handle it ourselves in L0. I.e., check if L1 wanted to + * intercept (via guest_host_mask etc.) the current event. + */ +static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + int cr = exit_qualification & 15; + int reg = (exit_qualification >> 8) & 15; + unsigned long val = kvm_register_read(vcpu, reg); + + switch ((exit_qualification >> 4) & 3) { + case 0: /* mov to cr */ + switch (cr) { + case 0: + if (vmcs12->cr0_guest_host_mask & + (val ^ vmcs12->cr0_read_shadow)) + return 1; + break; + case 3: + if ((vmcs12->cr3_target_count >= 1 && + vmcs12->cr3_target_value0 == val) || + (vmcs12->cr3_target_count >= 2 && + vmcs12->cr3_target_value1 == val) || + (vmcs12->cr3_target_count >= 3 && + vmcs12->cr3_target_value2 == val) || + (vmcs12->cr3_target_count >= 4 && + vmcs12->cr3_target_value3 == val)) + return 0; + if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) + return 1; + break; + case 4: + if (vmcs12->cr4_guest_host_mask & + (vmcs12->cr4_read_shadow ^ val)) + return 1; + break; + case 8: + if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) + return 1; + break; + } + break; + case 2: /* clts */ + if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && + (vmcs12->cr0_read_shadow & X86_CR0_TS)) + return 1; + break; + case 1: /* mov from cr */ + switch (cr) { + case 3: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR3_STORE_EXITING) + return 1; + break; + case 8: + if (vmcs12->cpu_based_vm_exec_control & + CPU_BASED_CR8_STORE_EXITING) + return 1; + break; + } + break; + case 3: /* lmsw */ + /* + * lmsw can change bits 1..3 of cr0, and only set bit 0 of + * cr0. Other attempted changes are ignored, with no exit. + */ + if (vmcs12->cr0_guest_host_mask & 0xe & + (val ^ vmcs12->cr0_read_shadow)) + return 1; + if ((vmcs12->cr0_guest_host_mask & 0x1) && + !(vmcs12->cr0_read_shadow & 0x1) && + (val & 0x1)) + return 1; + break; + } + return 0; +} + +/* + * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we + * should handle it ourselves in L0 (and then continue L2). Only call this + * when in is_guest_mode (L2). + */ +static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) +{ + u32 exit_reason = vmcs_read32(VM_EXIT_REASON); + u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + if (vmx->nested.nested_run_pending) + return 0; + + if (unlikely(vmx->fail)) { + printk(KERN_INFO "%s failed vm entry %x\n", + __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); + return 1; + } + + switch (exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: + if (!is_exception(intr_info)) + return 0; + else if (is_page_fault(intr_info)) + return enable_ept; + return vmcs12->exception_bitmap & + (1u << (intr_info & INTR_INFO_VECTOR_MASK)); + case EXIT_REASON_EXTERNAL_INTERRUPT: + return 0; + case EXIT_REASON_TRIPLE_FAULT: + return 1; + case EXIT_REASON_PENDING_INTERRUPT: + case EXIT_REASON_NMI_WINDOW: + /* + * prepare_vmcs02() set the CPU_BASED_VIRTUAL_INTR_PENDING bit + * (aka Interrupt Window Exiting) only when L1 turned it on, + * so if we got a PENDING_INTERRUPT exit, this must be for L1. + * Same for NMI Window Exiting. + */ + return 1; + case EXIT_REASON_TASK_SWITCH: + return 1; + case EXIT_REASON_CPUID: + return 1; + case EXIT_REASON_HLT: + return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); + case EXIT_REASON_INVD: + return 1; + case EXIT_REASON_INVLPG: + return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); + case EXIT_REASON_RDPMC: + return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); + case EXIT_REASON_RDTSC: + return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); + case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: + case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: + case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: + case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: + case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: + /* + * VMX instructions trap unconditionally. This allows L1 to + * emulate them for its L2 guest, i.e., allows 3-level nesting! + */ + return 1; + case EXIT_REASON_CR_ACCESS: + return nested_vmx_exit_handled_cr(vcpu, vmcs12); + case EXIT_REASON_DR_ACCESS: + return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); + case EXIT_REASON_IO_INSTRUCTION: + /* TODO: support IO bitmaps */ + return 1; + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: + return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); + case EXIT_REASON_INVALID_STATE: + return 1; + case EXIT_REASON_MWAIT_INSTRUCTION: + return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); + case EXIT_REASON_MONITOR_INSTRUCTION: + return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); + case EXIT_REASON_PAUSE_INSTRUCTION: + return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || + nested_cpu_has2(vmcs12, + SECONDARY_EXEC_PAUSE_LOOP_EXITING); + case EXIT_REASON_MCE_DURING_VMENTRY: + return 0; + case EXIT_REASON_TPR_BELOW_THRESHOLD: + return 1; + case EXIT_REASON_APIC_ACCESS: + return nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); + case EXIT_REASON_EPT_VIOLATION: + case EXIT_REASON_EPT_MISCONFIG: + return 0; + case EXIT_REASON_WBINVD: + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); + case EXIT_REASON_XSETBV: + return 1; + default: + return 1; + } +} + static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); @@ -3933,6 +5702,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) if (vmx->emulation_required && emulate_invalid_guest_state) return handle_invalid_guest_state(vcpu); + /* + * the KVM_REQ_EVENT optimization bit is only on for one entry, and if + * we did not inject a still-pending event to L1 now because of + * nested_run_pending, we need to re-enable this bit. + */ + if (vmx->nested.nested_run_pending) + kvm_make_request(KVM_REQ_EVENT, vcpu); + + if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH || + exit_reason == EXIT_REASON_VMRESUME)) + vmx->nested.nested_run_pending = 1; + else + vmx->nested.nested_run_pending = 0; + + if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { + nested_vmx_vmexit(vcpu); + return 1; + } + if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason @@ -3955,7 +5743,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) "(0x%x) and exit reason is 0x%x\n", __func__, vectoring_info, exit_reason); - if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { + if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && + !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( + get_vmcs12(vcpu), vcpu)))) { if (vmx_interrupt_allowed(vcpu)) { vmx->soft_vnmi_blocked = 0; } else if (vmx->vnmi_blocked_time > 1000000000LL && @@ -4118,6 +5908,8 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { + if (is_guest_mode(&vmx->vcpu)) + return; __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); @@ -4125,6 +5917,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { + if (is_guest_mode(vcpu)) + return; __vmx_complete_interrupts(to_vmx(vcpu), vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, @@ -4145,6 +5939,21 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + if (vmcs12->idt_vectoring_info_field & + VECTORING_INFO_VALID_MASK) { + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + vmcs12->idt_vectoring_info_field); + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmcs12->vm_exit_instruction_len); + if (vmcs12->idt_vectoring_info_field & + VECTORING_INFO_DELIVER_CODE_MASK) + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, + vmcs12->idt_vectoring_error_code); + } + } + /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) vmx->entry_time = ktime_get(); @@ -4167,6 +5976,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); + vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ "push %%"R"dx; push %%"R"bp;" @@ -4237,7 +6047,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "pop %%"R"bp; pop %%"R"dx \n\t" "setbe %c[fail](%0) \n\t" : : "c"(vmx), "d"((unsigned long)HOST_RSP), - [launched]"i"(offsetof(struct vcpu_vmx, launched)), + [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), @@ -4276,8 +6086,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); + if (is_guest_mode(vcpu)) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + vmcs12->idt_vectoring_info_field = vmx->idt_vectoring_info; + if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { + vmcs12->idt_vectoring_error_code = + vmcs_read32(IDT_VECTORING_ERROR_CODE); + vmcs12->vm_exit_instruction_len = + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + } + } + asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); - vmx->launched = 1; + vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); @@ -4289,41 +6110,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #undef R #undef Q -static void vmx_free_vmcs(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (vmx->vmcs) { - vcpu_clear(vmx); - free_vmcs(vmx->vmcs); - vmx->vmcs = NULL; - } -} - static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); - vmx_free_vmcs(vcpu); + free_nested(vmx); + free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } -static inline void vmcs_init(struct vmcs *vmcs) -{ - u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id())); - - if (!vmm_exclusive) - kvm_cpu_vmxon(phys_addr); - - vmcs_clear(vmcs); - - if (!vmm_exclusive) - kvm_cpu_vmxoff(); -} - static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; @@ -4345,11 +6143,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto uninit_vcpu; } - vmx->vmcs = alloc_vmcs(); - if (!vmx->vmcs) + vmx->loaded_vmcs = &vmx->vmcs01; + vmx->loaded_vmcs->vmcs = alloc_vmcs(); + if (!vmx->loaded_vmcs->vmcs) goto free_msrs; - - vmcs_init(vmx->vmcs); + if (!vmm_exclusive) + kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id()))); + loaded_vmcs_init(vmx->loaded_vmcs); + if (!vmm_exclusive) + kvm_cpu_vmxoff(); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); @@ -4375,10 +6177,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto free_vmcs; } + vmx->nested.current_vmptr = -1ull; + vmx->nested.current_vmcs12 = NULL; + return &vmx->vcpu; free_vmcs: - free_vmcs(vmx->vmcs); + free_vmcs(vmx->loaded_vmcs->vmcs); free_msrs: kfree(vmx->guest_msrs); uninit_vcpu: @@ -4512,6 +6317,650 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { + if (func == 1 && nested) + entry->ecx |= bit(X86_FEATURE_VMX); +} + +/* + * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested + * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it + * with L0's requirements for its guest (a.k.a. vmsc01), so we can run the L2 + * guest in a way that will both be appropriate to L1's requests, and our + * needs. In addition to modifying the active vmcs (which is vmcs02), this + * function also has additional necessary side-effects, like setting various + * vcpu->arch fields. + */ +static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 exec_control; + + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); + vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); + vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); + vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); + vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); + vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); + vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); + vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); + vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); + vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); + vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); + vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); + vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); + vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); + vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); + vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); + vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); + vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); + vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); + vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); + vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); + vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); + vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); + vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); + vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); + vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); + vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); + vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + + vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + vmcs12->vm_entry_intr_info_field); + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, + vmcs12->vm_entry_exception_error_code); + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmcs12->vm_entry_instruction_len); + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, + vmcs12->guest_interruptibility_info); + vmcs_write32(GUEST_ACTIVITY_STATE, vmcs12->guest_activity_state); + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); + vmcs_writel(GUEST_DR7, vmcs12->guest_dr7); + vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs12->guest_pending_dbg_exceptions); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + + vmcs_write64(VMCS_LINK_POINTER, -1ull); + + vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, + (vmcs_config.pin_based_exec_ctrl | + vmcs12->pin_based_vm_exec_control)); + + /* + * Whether page-faults are trapped is determined by a combination of + * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. + * If enable_ept, L0 doesn't care about page faults and we should + * set all of these to L1's desires. However, if !enable_ept, L0 does + * care about (at least some) page faults, and because it is not easy + * (if at all possible?) to merge L0 and L1's desires, we simply ask + * to exit on each and every L2 page fault. This is done by setting + * MASK=MATCH=0 and (see below) EB.PF=1. + * Note that below we don't need special code to set EB.PF beyond the + * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, + * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when + * !enable_ept, EB.PF is 1, so the "or" will always be 1. + * + * A problem with this approach (when !enable_ept) is that L1 may be + * injected with more page faults than it asked for. This could have + * caused problems, but in practice existing hypervisors don't care. + * To fix this, we will need to emulate the PFEC checking (on the L1 + * page tables), using walk_addr(), when injecting PFs to L1. + */ + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, + enable_ept ? vmcs12->page_fault_error_code_mask : 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, + enable_ept ? vmcs12->page_fault_error_code_match : 0); + + if (cpu_has_secondary_exec_ctrls()) { + u32 exec_control = vmx_secondary_exec_control(vmx); + if (!vmx->rdtscp_enabled) + exec_control &= ~SECONDARY_EXEC_RDTSCP; + /* Take the following fields only from vmcs12 */ + exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + if (nested_cpu_has(vmcs12, + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) + exec_control |= vmcs12->secondary_vm_exec_control; + + if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { + /* + * Translate L1 physical address to host physical + * address for vmcs02. Keep the page pinned, so this + * physical address remains valid. We keep a reference + * to it so we can release it later. + */ + if (vmx->nested.apic_access_page) /* shouldn't happen */ + nested_release_page(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = + nested_get_page(vcpu, vmcs12->apic_access_addr); + /* + * If translation failed, no matter: This feature asks + * to exit when accessing the given address, and if it + * can never be accessed, this feature won't do + * anything anyway. + */ + if (!vmx->nested.apic_access_page) + exec_control &= + ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + else + vmcs_write64(APIC_ACCESS_ADDR, + page_to_phys(vmx->nested.apic_access_page)); + } + + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); + } + + + /* + * Set host-state according to L0's settings (vmcs12 is irrelevant here) + * Some constant fields are set here by vmx_set_constant_host_state(). + * Other fields are different per CPU, and will be set later when + * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. + */ + vmx_set_constant_host_state(); + + /* + * HOST_RSP is normally set correctly in vmx_vcpu_run() just before + * entry, but only if the current (host) sp changed from the value + * we wrote last (vmx->host_rsp). This cache is no longer relevant + * if we switch vmcs, and rather than hold a separate cache per vmcs, + * here we just force the write to happen on entry. + */ + vmx->host_rsp = 0; + + exec_control = vmx_exec_control(vmx); /* L0's desires */ + exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; + exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; + exec_control &= ~CPU_BASED_TPR_SHADOW; + exec_control |= vmcs12->cpu_based_vm_exec_control; + /* + * Merging of IO and MSR bitmaps not currently supported. + * Rather, exit every time. + */ + exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; + exec_control &= ~CPU_BASED_USE_IO_BITMAPS; + exec_control |= CPU_BASED_UNCOND_IO_EXITING; + + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); + + /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the + * bitwise-or of what L1 wants to trap for L2, and what we want to + * trap. Note that CR0.TS also needs updating - we do this later. + */ + update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + + /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */ + vmcs_write32(VM_EXIT_CONTROLS, + vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl); + vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls | + (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); + + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) + vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); + else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) + vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); + + + set_cr4_guest_host_mask(vmx); + + vmcs_write64(TSC_OFFSET, + vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); + + if (enable_vpid) { + /* + * Trivially support vpid by letting L2s share their parent + * L1's vpid. TODO: move to a more elaborate solution, giving + * each L2 its own vpid and exposing the vpid feature to L1. + */ + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); + vmx_flush_tlb(vcpu); + } + + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) + vcpu->arch.efer = vmcs12->guest_ia32_efer; + if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) + vcpu->arch.efer |= (EFER_LMA | EFER_LME); + else + vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); + /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ + vmx_set_efer(vcpu, vcpu->arch.efer); + + /* + * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified + * TS bit (for lazy fpu) and bits which we consider mandatory enabled. + * The CR0_READ_SHADOW is what L2 should have expected to read given + * the specifications by L1; It's not enough to take + * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we + * have more bits than L1 expected. + */ + vmx_set_cr0(vcpu, vmcs12->guest_cr0); + vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); + + vmx_set_cr4(vcpu, vmcs12->guest_cr4); + vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); + + /* shadow page tables on either EPT or shadow page tables */ + kvm_set_cr3(vcpu, vmcs12->guest_cr3); + kvm_mmu_reset_context(vcpu); + + kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); + kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); +} + +/* + * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 + * for running an L2 nested guest. + */ +static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) +{ + struct vmcs12 *vmcs12; + struct vcpu_vmx *vmx = to_vmx(vcpu); + int cpu; + struct loaded_vmcs *vmcs02; + + if (!nested_vmx_check_permission(vcpu) || + !nested_vmx_check_vmcs12(vcpu)) + return 1; + + skip_emulated_instruction(vcpu); + vmcs12 = get_vmcs12(vcpu); + + /* + * The nested entry process starts with enforcing various prerequisites + * on vmcs12 as required by the Intel SDM, and act appropriately when + * they fail: As the SDM explains, some conditions should cause the + * instruction to fail, while others will cause the instruction to seem + * to succeed, but return an EXIT_REASON_INVALID_STATE. + * To speed up the normal (success) code path, we should avoid checking + * for misconfigurations which will anyway be caught by the processor + * when using the merged vmcs02. + */ + if (vmcs12->launch_state == launch) { + nested_vmx_failValid(vcpu, + launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS + : VMXERR_VMRESUME_NONLAUNCHED_VMCS); + return 1; + } + + if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && + !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) { + /*TODO: Also verify bits beyond physical address width are 0*/ + nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + return 1; + } + + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) { + /*TODO: Also verify bits beyond physical address width are 0*/ + nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + return 1; + } + + if (vmcs12->vm_entry_msr_load_count > 0 || + vmcs12->vm_exit_msr_load_count > 0 || + vmcs12->vm_exit_msr_store_count > 0) { + if (printk_ratelimit()) + printk(KERN_WARNING + "%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__); + nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + return 1; + } + + if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, + nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) || + !vmx_control_verify(vmcs12->secondary_vm_exec_control, + nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || + !vmx_control_verify(vmcs12->pin_based_vm_exec_control, + nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || + !vmx_control_verify(vmcs12->vm_exit_controls, + nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) || + !vmx_control_verify(vmcs12->vm_entry_controls, + nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high)) + { + nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + return 1; + } + + if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || + ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { + nested_vmx_failValid(vcpu, + VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); + return 1; + } + + if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || + ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { + nested_vmx_entry_failure(vcpu, vmcs12, + EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); + return 1; + } + if (vmcs12->vmcs_link_pointer != -1ull) { + nested_vmx_entry_failure(vcpu, vmcs12, + EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); + return 1; + } + + /* + * We're finally done with prerequisite checking, and can start with + * the nested entry. + */ + + vmcs02 = nested_get_current_vmcs02(vmx); + if (!vmcs02) + return -ENOMEM; + + enter_guest_mode(vcpu); + + vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); + + cpu = get_cpu(); + vmx->loaded_vmcs = vmcs02; + vmx_vcpu_put(vcpu); + vmx_vcpu_load(vcpu, cpu); + vcpu->cpu = cpu; + put_cpu(); + + vmcs12->launch_state = 1; + + prepare_vmcs02(vcpu, vmcs12); + + /* + * Note no nested_vmx_succeed or nested_vmx_fail here. At this point + * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet + * returned as far as L1 is concerned. It will only return (and set + * the success flag) when L2 exits (see nested_vmx_vmexit()). + */ + return 1; +} + +/* + * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date + * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). + * This function returns the new value we should put in vmcs12.guest_cr0. + * It's not enough to just return the vmcs02 GUEST_CR0. Rather, + * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now + * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 + * didn't trap the bit, because if L1 did, so would L0). + * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have + * been modified by L2, and L1 knows it. So just leave the old value of + * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 + * isn't relevant, because if L0 traps this bit it can set it to anything. + * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have + * changed these bits, and therefore they need to be updated, but L0 + * didn't necessarily allow them to be changed in GUEST_CR0 - and rather + * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. + */ +static inline unsigned long +vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + return + /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | + /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | + /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | + vcpu->arch.cr0_guest_owned_bits)); +} + +static inline unsigned long +vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + return + /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | + /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | + /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | + vcpu->arch.cr4_guest_owned_bits)); +} + +/* + * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits + * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), + * and this function updates it to reflect the changes to the guest state while + * L2 was running (and perhaps made some exits which were handled directly by L0 + * without going back to L1), and to reflect the exit reason. + * Note that we do not have to copy here all VMCS fields, just those that + * could have changed by the L2 guest or the exit - i.e., the guest-state and + * exit-information fields only. Other fields are modified by L1 with VMWRITE, + * which already writes to vmcs12 directly. + */ +void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + /* update guest state fields: */ + vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); + vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); + + kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); + vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); + vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); + vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); + + vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); + vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); + vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); + vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); + vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); + vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); + vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); + vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); + vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); + vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); + vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); + vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); + vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); + vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); + vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); + vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); + vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); + vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); + vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); + vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); + vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); + vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); + vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); + vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); + vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); + vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); + vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); + vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); + vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); + vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); + vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); + vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); + vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); + vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); + vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); + vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); + + vmcs12->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE); + vmcs12->guest_interruptibility_info = + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); + vmcs12->guest_pending_dbg_exceptions = + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); + + /* TODO: These cannot have changed unless we have MSR bitmaps and + * the relevant bit asks not to trap the change */ + vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); + if (vmcs12->vm_entry_controls & VM_EXIT_SAVE_IA32_PAT) + vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); + vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); + vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); + vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); + + /* update exit information fields: */ + + vmcs12->vm_exit_reason = vmcs_read32(VM_EXIT_REASON); + vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + + vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); + vmcs12->idt_vectoring_info_field = + vmcs_read32(IDT_VECTORING_INFO_FIELD); + vmcs12->idt_vectoring_error_code = + vmcs_read32(IDT_VECTORING_ERROR_CODE); + vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + + /* clear vm-entry fields which are to be cleared on exit */ + if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) + vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; +} + +/* + * A part of what we need to when the nested L2 guest exits and we want to + * run its L1 parent, is to reset L1's guest state to the host state specified + * in vmcs12. + * This function is to be called not only on normal nested exit, but also on + * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry + * Failures During or After Loading Guest State"). + * This function should be called when the active VMCS is L1's (vmcs01). + */ +void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) + vcpu->arch.efer = vmcs12->host_ia32_efer; + if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) + vcpu->arch.efer |= (EFER_LMA | EFER_LME); + else + vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); + vmx_set_efer(vcpu, vcpu->arch.efer); + + kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); + kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); + /* + * Note that calling vmx_set_cr0 is important, even if cr0 hasn't + * actually changed, because it depends on the current state of + * fpu_active (which may have changed). + * Note that vmx_set_cr0 refers to efer set above. + */ + kvm_set_cr0(vcpu, vmcs12->host_cr0); + /* + * If we did fpu_activate()/fpu_deactivate() during L2's run, we need + * to apply the same changes to L1's vmcs. We just set cr0 correctly, + * but we also need to update cr0_guest_host_mask and exception_bitmap. + */ + update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + + /* + * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01 + * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask(); + */ + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); + kvm_set_cr4(vcpu, vmcs12->host_cr4); + + /* shadow page tables on either EPT or shadow page tables */ + kvm_set_cr3(vcpu, vmcs12->host_cr3); + kvm_mmu_reset_context(vcpu); + + if (enable_vpid) { + /* + * Trivially support vpid by letting L2s share their parent + * L1's vpid. TODO: move to a more elaborate solution, giving + * each L2 its own vpid and exposing the vpid feature to L1. + */ + vmx_flush_tlb(vcpu); + } + + + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base); + vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base); + vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base); + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector); + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector); + vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector); + vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector); + vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector); + vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector); + vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector); + + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) + vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) + vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, + vmcs12->host_ia32_perf_global_ctrl); +} + +/* + * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 + * and modify vmcs12 to make it see what it would expect to see there if + * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) + */ +static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int cpu; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + leave_guest_mode(vcpu); + prepare_vmcs12(vcpu, vmcs12); + + cpu = get_cpu(); + vmx->loaded_vmcs = &vmx->vmcs01; + vmx_vcpu_put(vcpu); + vmx_vcpu_load(vcpu, cpu); + vcpu->cpu = cpu; + put_cpu(); + + /* if no vmcs02 cache requested, remove the one we used */ + if (VMCS02_POOL_SIZE == 0) + nested_free_vmcs02(vmx, vmx->nested.current_vmptr); + + load_vmcs12_host_state(vcpu, vmcs12); + + /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */ + vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); + + /* This is needed for same reason as it was needed in prepare_vmcs02 */ + vmx->host_rsp = 0; + + /* Unpin physical memory we referred to in vmcs02 */ + if (vmx->nested.apic_access_page) { + nested_release_page(vmx->nested.apic_access_page); + vmx->nested.apic_access_page = 0; + } + + /* + * Exiting from L2 to L1, we're now back to L1 which thinks it just + * finished a VMLAUNCH or VMRESUME instruction, so we need to set the + * success or failure flag accordingly. + */ + if (unlikely(vmx->fail)) { + vmx->fail = 0; + nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); + } else + nested_vmx_succeed(vcpu); +} + +/* + * L1's failure to enter L2 is a subset of a normal exit, as explained in + * 23.7 "VM-entry failures during or after loading guest state" (this also + * lists the acceptable exit-reason and exit-qualification parameters). + * It should only be called before L2 actually succeeded to run, and when + * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). + */ +static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12, + u32 reason, unsigned long qualification) +{ + load_vmcs12_host_state(vcpu, vmcs12); + vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->exit_qualification = qualification; + nested_vmx_succeed(vcpu); } static int vmx_check_intercept(struct kvm_vcpu *vcpu, @@ -4670,16 +7119,13 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); if (enable_ept) { - bypass_guest_pf = 0; kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK); + ept_set_mmio_spte_mask(); kvm_enable_tdp(); } else kvm_disable_tdp(); - if (bypass_guest_pf) - kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); - return 0; out3: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 77c9d86..84a28ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -347,6 +347,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } +EXPORT_SYMBOL_GPL(kvm_inject_page_fault); void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { @@ -579,6 +580,22 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } +static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); + return best && (best->ebx & bit(X86_FEATURE_SMEP)); +} + +static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); + return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); +} + static void update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; @@ -598,14 +615,20 @@ static void update_cpuid(struct kvm_vcpu *vcpu) int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; - + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | + X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; + if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) + return 1; + + if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS)) + return 1; + if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; @@ -615,11 +638,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) kvm_read_cr3(vcpu))) return 1; - if (cr4 & X86_CR4_VMXE) + if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; - kvm_x86_ops->set_cr4(vcpu, cr4); - if ((cr4 ^ old_cr4) & pdptr_bits) kvm_mmu_reset_context(vcpu); @@ -787,12 +808,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr); * kvm-specific. Those are put in the beginning of the list. */ -#define KVM_SAVE_MSRS_BEGIN 8 +#define KVM_SAVE_MSRS_BEGIN 9 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, - HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, + HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 @@ -1388,7 +1409,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ - if (copy_to_user((void __user *)addr, instructions, 4)) + if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; break; @@ -1415,7 +1436,7 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; - if (clear_user((void __user *)addr, PAGE_SIZE)) + if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; @@ -1467,6 +1488,35 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) } } +static void accumulate_steal_time(struct kvm_vcpu *vcpu) +{ + u64 delta; + + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + + delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + vcpu->arch.st.accum_steal = delta; +} + +static void record_steal_time(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) + return; + + if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, + &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) + return; + + vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; + vcpu->arch.st.steal.version += 2; + vcpu->arch.st.accum_steal = 0; + + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, + &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); +} + int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { @@ -1549,6 +1599,33 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; + case MSR_KVM_STEAL_TIME: + + if (unlikely(!sched_info_on())) + return 1; + + if (data & KVM_STEAL_RESERVED_MASK) + return 1; + + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, + data & KVM_STEAL_VALID_BITS)) + return 1; + + vcpu->arch.st.msr_val = data; + + if (!(data & KVM_MSR_ENABLED)) + break; + + vcpu->arch.st.last_steal = current->sched_info.run_delay; + + preempt_disable(); + accumulate_steal_time(vcpu); + preempt_enable(); + + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + + break; + case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: @@ -1834,6 +1911,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; + case MSR_KVM_STEAL_TIME: + data = vcpu->arch.st.msr_val; + break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: @@ -2145,6 +2225,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } + + accumulate_steal_time(vcpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -2283,6 +2366,13 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->flags = 0; } +static bool supported_xcr0_bit(unsigned bit) +{ + u64 mask = ((u64)1 << bit); + + return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0; +} + #define F(x) bit(X86_FEATURE_##x) static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, @@ -2328,7 +2418,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, 0 /* Reserved, DCA */ | F(XMM4_1) | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | - F(F16C); + F(F16C) | F(RDRAND); /* cpuid 0x80000001.ecx */ const u32 kvm_supported_word6_x86_features = F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | @@ -2342,6 +2432,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | F(PMM) | F(PMM_EN); + /* cpuid 7.0.ebx */ + const u32 kvm_supported_word9_x86_features = + F(SMEP) | F(FSGSBASE) | F(ERMS); + /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); do_cpuid_1_ent(entry, function, index); @@ -2376,7 +2470,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } break; } - /* function 4 and 0xb have additional index. */ + /* function 4 has additional index. */ case 4: { int i, cache_type; @@ -2393,6 +2487,22 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } break; } + case 7: { + entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + /* Mask ebx against host capbability word 9 */ + if (index == 0) { + entry->ebx &= kvm_supported_word9_x86_features; + cpuid_mask(&entry->ebx, 9); + } else + entry->ebx = 0; + entry->eax = 0; + entry->ecx = 0; + entry->edx = 0; + break; + } + case 9: + break; + /* function 0xb has additional index. */ case 0xb: { int i, level_type; @@ -2410,16 +2520,17 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, break; } case 0xd: { - int i; + int idx, i; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; - for (i = 1; *nent < maxnent && i < 64; ++i) { - if (entry[i].eax == 0) + for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) { + do_cpuid_1_ent(&entry[i], function, idx); + if (entry[i].eax == 0 || !supported_xcr0_bit(idx)) continue; - do_cpuid_1_ent(&entry[i], function, i); entry[i].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; ++*nent; + ++i; } break; } @@ -2438,6 +2549,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); + + if (sched_info_on()) + entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); + entry->ebx = 0; entry->ecx = 0; entry->edx = 0; @@ -2451,6 +2566,24 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ecx &= kvm_supported_word6_x86_features; cpuid_mask(&entry->ecx, 6); break; + case 0x80000008: { + unsigned g_phys_as = (entry->eax >> 16) & 0xff; + unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); + unsigned phys_as = entry->eax & 0xff; + + if (!g_phys_as) + g_phys_as = phys_as; + entry->eax = g_phys_as | (virt_as << 8); + entry->ebx = entry->edx = 0; + break; + } + case 0x80000019: + entry->ecx = entry->edx = 0; + break; + case 0x8000001a: + break; + case 0x8000001d: + break; /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: /*Just support up to 0xC0000004 now*/ @@ -2460,10 +2593,16 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->edx &= kvm_supported_word5_x86_features; cpuid_mask(&entry->edx, 5); break; + case 3: /* Processor serial number */ + case 5: /* MONITOR/MWAIT */ + case 6: /* Thermal management */ + case 0xA: /* Architectural Performance Monitoring */ + case 0x80000007: /* Advanced power management */ case 0xC0000002: case 0xC0000003: case 0xC0000004: - /*Now nothing to do, reserved for the future*/ + default: + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } @@ -3817,7 +3956,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, exception); } -static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { @@ -3827,6 +3966,7 @@ static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } +EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, @@ -3836,7 +3976,7 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } -static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, +int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) @@ -3868,6 +4008,42 @@ static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, out: return r; } +EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); + +static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, + gpa_t *gpa, struct x86_exception *exception, + bool write) +{ + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + + if (vcpu_match_mmio_gva(vcpu, gva) && + check_write_user_access(vcpu, write, access, + vcpu->arch.access)) { + *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | + (gva & (PAGE_SIZE - 1)); + trace_vcpu_match_mmio(gva, *gpa, write, false); + return 1; + } + + if (write) + access |= PFERR_WRITE_MASK; + + *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); + + if (*gpa == UNMAPPED_GVA) + return -1; + + /* For APIC access vmexit */ + if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) + return 1; + + if (vcpu_match_mmio_gpa(vcpu, *gpa)) { + trace_vcpu_match_mmio(gva, *gpa, write, true); + return 1; + } + + return 0; +} static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, @@ -3876,8 +4052,8 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - gpa_t gpa; - int handled; + gpa_t gpa; + int handled, ret; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); @@ -3887,13 +4063,12 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; } - gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception); + ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, false); - if (gpa == UNMAPPED_GVA) + if (ret < 0) return X86EMUL_PROPAGATE_FAULT; - /* For APIC access vmexit */ - if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) + if (ret) goto mmio; if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception) @@ -3944,16 +4119,16 @@ static int emulator_write_emulated_onepage(unsigned long addr, struct x86_exception *exception, struct kvm_vcpu *vcpu) { - gpa_t gpa; - int handled; + gpa_t gpa; + int handled, ret; - gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception); + ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, true); - if (gpa == UNMAPPED_GVA) + if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ - if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) + if (ret) goto mmio; if (emulator_write_phys(vcpu, gpa, val, bytes)) @@ -4473,9 +4648,24 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) kvm_queue_exception(vcpu, ctxt->exception.vector); } +static void init_decode_cache(struct x86_emulate_ctxt *ctxt, + const unsigned long *regs) +{ + memset(&ctxt->twobyte, 0, + (void *)&ctxt->regs - (void *)&ctxt->twobyte); + memcpy(ctxt->regs, regs, sizeof(ctxt->regs)); + + ctxt->fetch.start = 0; + ctxt->fetch.end = 0; + ctxt->io_read.pos = 0; + ctxt->io_read.end = 0; + ctxt->mem_read.pos = 0; + ctxt->mem_read.end = 0; +} + static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { - struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; /* @@ -4488,40 +4678,38 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); - vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu); - vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu); - vcpu->arch.emulate_ctxt.mode = - (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : - (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM) - ? X86EMUL_MODE_VM86 : cs_l - ? X86EMUL_MODE_PROT64 : cs_db - ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; - vcpu->arch.emulate_ctxt.guest_mode = is_guest_mode(vcpu); - memset(c, 0, sizeof(struct decode_cache)); - memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); + ctxt->eflags = kvm_get_rflags(vcpu); + ctxt->eip = kvm_rip_read(vcpu); + ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : + (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : + cs_l ? X86EMUL_MODE_PROT64 : + cs_db ? X86EMUL_MODE_PROT32 : + X86EMUL_MODE_PROT16; + ctxt->guest_mode = is_guest_mode(vcpu); + + init_decode_cache(ctxt, vcpu->arch.regs); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { - struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); - vcpu->arch.emulate_ctxt.decode.op_bytes = 2; - vcpu->arch.emulate_ctxt.decode.ad_bytes = 2; - vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip + - inc_eip; - ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq); + ctxt->op_bytes = 2; + ctxt->ad_bytes = 2; + ctxt->_eip = ctxt->eip + inc_eip; + ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; - vcpu->arch.emulate_ctxt.eip = c->eip; - memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); - kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); - kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); + ctxt->eip = ctxt->_eip; + memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); + kvm_rip_write(vcpu, ctxt->eip); + kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = false; @@ -4582,21 +4770,21 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, int insn_len) { int r; - struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); - vcpu->arch.emulate_ctxt.interruptibility = 0; - vcpu->arch.emulate_ctxt.have_exception = false; - vcpu->arch.emulate_ctxt.perm_ok = false; + ctxt->interruptibility = 0; + ctxt->have_exception = false; + ctxt->perm_ok = false; - vcpu->arch.emulate_ctxt.only_vendor_specific_insn + ctxt->only_vendor_specific_insn = emulation_type & EMULTYPE_TRAP_UD; - r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len); + r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; @@ -4612,7 +4800,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, } if (emulation_type & EMULTYPE_SKIP) { - kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip); + kvm_rip_write(vcpu, ctxt->_eip); return EMULATE_DONE; } @@ -4620,11 +4808,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; - memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); + memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs); } restart: - r = x86_emulate_insn(&vcpu->arch.emulate_ctxt); + r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; @@ -4636,7 +4824,7 @@ restart: return handle_emulation_failure(vcpu); } - if (vcpu->arch.emulate_ctxt.have_exception) { + if (ctxt->have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { @@ -4655,13 +4843,12 @@ restart: r = EMULATE_DONE; if (writeback) { - toggle_interruptibility(vcpu, - vcpu->arch.emulate_ctxt.interruptibility); - kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); + toggle_interruptibility(vcpu, ctxt->interruptibility); + kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); - memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); + memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); + kvm_rip_write(vcpu, ctxt->eip); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; @@ -4878,6 +5065,30 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); +static void kvm_set_mmio_spte_mask(void) +{ + u64 mask; + int maxphyaddr = boot_cpu_data.x86_phys_bits; + + /* + * Set the reserved bits and the present bit of an paging-structure + * entry to generate page fault with PFER.RSV = 1. + */ + mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr; + mask |= 1ull; + +#ifdef CONFIG_X86_64 + /* + * If reserved bit is not supported, clear the present bit to disable + * mmio page fault. + */ + if (maxphyaddr == 52) + mask &= ~1ull; +#endif + + kvm_mmu_set_mmio_spte_mask(mask); +} + int kvm_arch_init(void *opaque) { int r; @@ -4904,10 +5115,10 @@ int kvm_arch_init(void *opaque) if (r) goto out; + kvm_set_mmio_spte_mask(); kvm_init_msr_list(); kvm_x86_ops = ops; - kvm_mmu_set_nonpresent_ptes(0ull, 0ull); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); @@ -5082,8 +5293,7 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->patch_hypercall(vcpu, instruction); - return emulator_write_emulated(&vcpu->arch.emulate_ctxt, - rip, instruction, 3, NULL); + return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) @@ -5384,6 +5594,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) r = 1; goto out; } + if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) + record_steal_time(vcpu); + } r = kvm_mmu_reload(vcpu); @@ -5671,8 +5884,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ - struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; - memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); @@ -5801,21 +6014,20 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, bool has_error_code, u32 error_code) { - struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode; + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); - ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, - tss_selector, reason, has_error_code, - error_code); + ret = emulator_task_switch(ctxt, tss_selector, reason, + has_error_code, error_code); if (ret) return EMULATE_FAIL; - memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); - kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); - kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); + memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); + kvm_rip_write(vcpu, ctxt->eip); + kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } @@ -6093,12 +6305,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) if (r == 0) r = kvm_mmu_setup(vcpu); vcpu_put(vcpu); - if (r < 0) - goto free_vcpu; - return 0; -free_vcpu: - kvm_x86_ops->vcpu_free(vcpu); return r; } @@ -6126,6 +6333,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; + vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index e407ed3..d36fe23 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -75,10 +75,54 @@ static inline u32 bit(int bitno) return 1 << (bitno & 31); } +static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, + gva_t gva, gfn_t gfn, unsigned access) +{ + vcpu->arch.mmio_gva = gva & PAGE_MASK; + vcpu->arch.access = access; + vcpu->arch.mmio_gfn = gfn; +} + +/* + * Clear the mmio cache info for the given gva, + * specially, if gva is ~0ul, we clear all mmio cache info. + */ +static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) +{ + if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) + return; + + vcpu->arch.mmio_gva = 0; +} + +static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) +{ + if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) + return true; + + return false; +} + +static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) +{ + if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) + return true; + + return false; +} + void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data); +int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception); + +int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception); + #endif |