# HG changeset patch # User cegger # Date 1275382026 -7200 Allow paged real mode during vmrun emulation. Emulate cr0 and cr4 when guest does not intercept them. diff -r 93d2b0f9f955 -r b009d9b7b708 xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -20,6 +20,7 @@ #include #include #include +#include static void hvmtrace_io_assist(int is_mmio, ioreq_t *p) { @@ -780,13 +781,17 @@ static int hvmemul_read_cr( unsigned long *val, struct x86_emulate_ctxt *ctxt) { + struct vcpu *v = current; switch ( reg ) { - case 0: case 2: case 3: + *val = v->arch.hvm_vcpu.guest_cr[reg]; + HVMTRACE_LONG_2D(CR_READ, reg, TRC_PAR_LONG(*val)); + return X86EMUL_OKAY; + case 0: case 4: - *val = current->arch.hvm_vcpu.guest_cr[reg]; + *val = hvm_get_cr(v, reg); HVMTRACE_LONG_2D(CR_READ, reg, TRC_PAR_LONG(*val)); return X86EMUL_OKAY; default: @@ -801,18 +806,32 @@ static int hvmemul_write_cr( unsigned long val, struct x86_emulate_ctxt *ctxt) { + struct vcpu *v = current; + bool_t vcpu_guestmode; + + vcpu_guestmode = (nestedhvm_enabled(v->domain) + && nestedhvm_vcpu_in_guestmode(v)); + HVMTRACE_LONG_2D(CR_WRITE, reg, TRC_PAR_LONG(val)); switch ( reg ) { case 0: - return hvm_set_cr0(val); + if (vcpu_guestmode) { + VCPU_NESTEDHVM(v).nh_cr[0] = val; + return X86EMUL_OKAY; + } else + return hvm_set_cr0(val); case 2: current->arch.hvm_vcpu.guest_cr[2] = val; return X86EMUL_OKAY; case 3: return hvm_set_cr3(val); case 4: - return hvm_set_cr4(val); + if (vcpu_guestmode) { + VCPU_NESTEDHVM(v).nh_cr[4] = val; + return X86EMUL_OKAY; + } else + return hvm_set_cr4(val); default: break; } diff -r 93d2b0f9f955 -r b009d9b7b708 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -1102,7 +1103,8 @@ int hvm_set_cr0(unsigned long value) /* ET is reserved and should be always be 1. */ value |= X86_CR0_ET; - if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG ) + if ( !nestedhvm_emulate_vmrun(v) && + (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG ) goto gpf; if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) ) @@ -1266,6 +1268,26 @@ int hvm_set_cr4(unsigned long value) return X86EMUL_EXCEPTION; } +unsigned long hvm_get_cr(struct vcpu *v, unsigned int cr) +{ + switch (cr) { + case 1: + case 2: + case 3: + return v->arch.hvm_vcpu.guest_cr[cr]; + case 0: + case 4: + break; + default: + BUG(); + } + + if (nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v)) { + return VCPU_NESTEDHVM(v).nh_cr[cr]; + } else + return v->arch.hvm_vcpu.guest_cr[cr]; +} + int hvm_virtual_to_linear_addr( enum x86_segment seg, struct segment_register *reg, diff -r 93d2b0f9f955 -r b009d9b7b708 xen/include/asm-x86/hvm/support.h --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -136,4 +136,6 @@ int hvm_set_cr4(unsigned long value); int hvm_msr_read_intercept(struct cpu_user_regs *regs); int hvm_msr_write_intercept(struct cpu_user_regs *regs); +unsigned long hvm_get_cr(struct vcpu *v, unsigned int cr); + #endif /* __ASM_X86_HVM_SUPPORT_H__ */