# HG changeset patch # User cegger # Date 1280925500 -7200 Nested Virtualization core implementation diff -r c19b4e3959c5 -r 917f9eb07fc3 xen/arch/x86/hvm/Makefile --- a/xen/arch/x86/hvm/Makefile +++ b/xen/arch/x86/hvm/Makefile @@ -10,6 +10,7 @@ obj-y += intercept.o obj-y += io.o obj-y += irq.o obj-y += mtrr.o +obj-y += nestedhvm.o obj-y += pmtimer.o obj-y += quirks.o obj-y += rtc.o diff -r c19b4e3959c5 -r 917f9eb07fc3 xen/arch/x86/hvm/nestedhvm.c --- /dev/null +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -0,0 +1,673 @@ +/* + * Nested HVM + * Copyright (c) 2010, Advanced Micro Devices, Inc. + * Author: Christoph Egger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#include +#include /* for HVM_DELIVER_NO_ERROR_CODE */ +#include +#include +#include /* for local_event_delivery_(en|dis)able */ +#include /* for paging_mode_hap() */ + +/* Nested HVM on/off per domain */ +bool_t +nestedhvm_enabled(struct domain *d) +{ + bool_t enabled; + + enabled = !!(d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]); + /* sanity check */ + BUG_ON(enabled && !is_hvm_domain(d)); + + if (!is_hvm_domain(d)) + return 0; + + return enabled; +} + +/* Nested VM */ +bool_t +nestedhvm_vmaddr_isvalid(struct nestedhvm *hvm, uint64_t addr) +{ + /* Maximum valid physical address. + * See AMD BKDG for HSAVE_PA MSR. + */ + if (addr > hvm->nh_vmmaxaddr) + return 0; + if ((addr & ~PAGE_MASK) != 0) + return 0; + return 1; +} + +/* Get VM from the guest for the nested guest. */ +enum hvm_copy_result +nestedhvm_vm_fromguest(struct nestedhvm *hvm, uint64_t vmaddr) +{ + return hvm_copy_from_guest_phys(hvm->nh_vm, vmaddr, + hvm->nh_vmsize); +} + +/* Put VM into the guest for the nested guest. */ +enum hvm_copy_result +nestedhvm_vm_toguest(struct nestedhvm *hvm, uint64_t vmaddr) +{ + return hvm_copy_to_guest_phys(vmaddr, hvm->nh_vm, + hvm->nh_vmsize); +} + +/* Nested VCPU */ +int +nestedhvm_vcpu_state_validate(struct vcpu *v, uint64_t vmaddr) +{ + struct segment_register reg; + + if ( !nestedhvm_enabled(v->domain) ) + return TRAP_invalid_op; + + if ( !hvm_svm_enabled(v) || hvm_guest_x86_mode(v) < 2 ) + return TRAP_invalid_op; + + /* if CPL != 0 inject #GP */ + hvm_get_segment_register(v, x86_seg_ss, ®); + if (reg.attr.fields.dpl != 0) + return TRAP_gp_fault; + + if (!nestedhvm_vmaddr_isvalid(&VCPU_NESTEDHVM(v), vmaddr)) + return TRAP_gp_fault; + + return 0; +} + +bool_t +nestedhvm_vcpu_in_guestmode(struct vcpu *v) +{ + return VCPU_NESTEDHVM(v).nh_guestmode; +} + +void +nestedhvm_vcpu_reset(struct vcpu *v) +{ + struct nestedhvm *nh = &VCPU_NESTEDHVM(v); + + nh->nh_hap_enabled = 0; + nh->nh_vmcb_cr3 = 0; + nh->nh_vmcb_hcr3 = 0; + nh->nh_guest_asid = 0; + nh->nh_flushp2m = 0; + nh->nh_p2m = NULL; + + hvm_nestedhvm_vcpu_reset(v); + + /* Always on */ + nestedsvm_vcpu_stgi(v); + /* vcpu is in host mode */ + nestedhvm_vcpu_exit_guestmode(v); +} + +int +nestedhvm_vcpu_initialise(struct vcpu *v) +{ + int rc; + struct nestedhvm *hvm = &VCPU_NESTEDHVM(v); + + if (!nestedhvm_enabled(v->domain)) + return 0; + + memset(hvm, 0x0, sizeof(struct nestedhvm)); + + /* initialise hostsave, for example */ + rc = hvm_nestedhvm_vcpu_initialise(v); + if (rc) { + hvm_nestedhvm_vcpu_destroy(v); + return rc; + } + + nestedhvm_vcpu_reset(v); + return 0; +} + +int +nestedhvm_vcpu_destroy(struct vcpu *v) +{ + int ret = 0; + + if (!nestedhvm_enabled(v->domain)) + return 0; + + if (nestedhvm_vcpu_in_guestmode(v)) { + nestedhvm_vcpu_exit_guestmode(v); + ret = -EBUSY; + } + + /* Enable interrupts or the guest won't see any interrupts + * after nested guest exited. + */ + nestedsvm_vcpu_stgi(v); + + hvm_nestedhvm_vcpu_destroy(v); + return ret; +} + +int +nestedhvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t vmaddr, unsigned int inst_len) +{ + int ret; + enum nestedhvm_vmexits vmret; + enum hvm_copy_result rc; + struct nestedhvm *hvm = &VCPU_NESTEDHVM(v); + uint64_t exitreason; + + hvm->nh_hostflags.fields.vmentry = 1; + + ret = nestedhvm_vcpu_state_validate(v, vmaddr); + if (ret) { + gdprintk(XENLOG_ERR, + "nestedhvm_vcpu_state_validate failed, injecting 0x%x\n", + ret); + hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0); + return ret; + } + + /* Save vmaddr. Needed for VMEXIT */ + hvm->nh_vmaddr = vmaddr; + + /* get nested vm */ + rc = nestedhvm_vm_fromguest(hvm, vmaddr); + if (rc) { + vmret = NESTEDHVM_VMEXIT_FATALERROR; + gdprintk(XENLOG_ERR, + "nestedhvm_vm_fromguest failed, injecting #GP\n"); + hvm_inject_exception(TRAP_gp_fault, + HVM_DELIVER_NO_ERROR_CODE, 0); + hvm->nh_hostflags.fields.vmentry = 0; + return TRAP_gp_fault; + } + + ret = hvm_nestedhvm_vcpu_vmentry(v); + if (ret) { + gdprintk(XENLOG_ERR, + "hvm_nestedhvm_vcpu_hostsave failed, injecting #UD\n"); + hvm_inject_exception(TRAP_invalid_op, + HVM_DELIVER_NO_ERROR_CODE, 0); + hvm->nh_hostflags.fields.vmentry = 0; + return ret; + } + + /* save host state */ + ret = hvm_nestedhvm_vcpu_hostsave(v, inst_len); + if (ret) { + gdprintk(XENLOG_ERR, + "hvm_nestedhvm_vcpu_hostsave failed, injecting #UD\n"); + hvm_inject_exception(TRAP_invalid_op, + HVM_DELIVER_NO_ERROR_CODE, 0); + hvm->nh_hostflags.fields.vmentry = 0; + return ret; + } + + /* Switch vcpu to guest mode. + */ + nestedhvm_vcpu_enter_guestmode(v); + + /* prepare VM to run nested guest, validate VMCB/VMCS */ + ret = hvm_nestedhvm_vm_prepare4vmentry(v, regs); + if (ret) { + exitreason = NESTEDHVM_INTERCEPT_INVALID; + goto err; + } + + ret = nestedhvm_vm_toguest(hvm, vmaddr); + if (ret) { + exitreason = NESTEDHVM_INTERCEPT_SHUTDOWN; + goto err; + } + + nestedsvm_vcpu_stgi(v); + + hvm->nh_hostflags.fields.vmentry = 0; + return 0; + +err: + rc = hvm_nestedhvm_vcpu_hostrestore(v, regs); + if (rc) + exitreason = NESTEDHVM_INTERCEPT_SHUTDOWN; + + hvm->nh_hostflags.fields.forcevmexit = 1; + hvm->nh_forcevmexit.exitcode = exitreason; + vmret = nestedhvm_vcpu_vmexit(v, regs, exitreason); + hvm->nh_hostflags.fields.forcevmexit = 0; + hvm->nh_hostflags.fields.vmentry = 0; + switch (vmret) { + case NESTEDHVM_VMEXIT_DONE: + case NESTEDHVM_VMEXIT_CONTINUE: + case NESTEDHVM_VMEXIT_HOST: + break; + default: + hvm_inject_exception(TRAP_gp_fault, + HVM_DELIVER_NO_ERROR_CODE, 0); + return TRAP_gp_fault; + } + return 0; +} + +static enum nestedhvm_vmexits +nestedhvm_vmexit_msr(unsigned long *msr_bitmap, uint32_t msr, bool_t write) +{ + bool_t enabled; + unsigned long *msr_bit = NULL; + + /* + * See AMD64 Programmers Manual, Vol 2, Section 15.10 + * (MSR-Bitmap Address). + */ + if ( msr <= 0x1fff ) + msr_bit = msr_bitmap + 0x0000 / BYTES_PER_LONG; + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) + msr_bit = msr_bitmap + 0x0800 / BYTES_PER_LONG; + else if ( (msr >= 0xc0010000) && (msr <= 0xc0011fff) ) + msr_bit = msr_bitmap + 0x1000 / BYTES_PER_LONG; + + if (msr_bit == NULL) + /* MSR not in the permission map: Let the guest handle it. */ + return NESTEDHVM_VMEXIT_INJECT; + + BUG_ON(msr_bit == NULL); + + msr &= 0x1fff; + + if (write) + /* write access */ + enabled = test_bit(msr * 2 + 1, msr_bit); + else + /* read access */ + enabled = test_bit(msr * 2, msr_bit); + + if (!enabled) + return NESTEDHVM_VMEXIT_HOST; + + return NESTEDHVM_VMEXIT_CONTINUE; +} + +int +nestedhvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) +{ + uint64_t exitcode = NESTEDHVM_INTERCEPT_INTR; + uint64_t exitinfo2 = 0; + struct nestedhvm *hvm = &VCPU_NESTEDHVM(v); + ASSERT(nestedhvm_vcpu_in_guestmode(v)); + + if ( hvm->nh_hostflags.fields.vintrmask ) + if ( !hvm->nh_hostflags.fields.rflagsif ) + return NESTEDHVM_INTR_MASKED; + + switch (intack.source) { + case hvm_intsrc_pic: + case hvm_intsrc_lapic: + exitcode = NESTEDHVM_INTERCEPT_INTR; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_nmi: + exitcode = NESTEDHVM_INTERCEPT_NMI; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_mce: + exitcode = NESTEDHVM_INTERCEPT_MCE; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_none: + return NESTEDHVM_INTR_NOTHANDLED; + default: + BUG(); + } + + hvm->nh_hostflags.fields.forcevmexit = 1; + hvm->nh_forcevmexit.exitcode = exitcode; + hvm->nh_forcevmexit.exitinfo1 = intack.source; + hvm->nh_forcevmexit.exitinfo2 = exitinfo2; + if ( hvm_nestedhvm_vm_intercepted_by_guest(v, exitcode) ) + return NESTEDHVM_INTR_FORCEVMEXIT; + + hvm->nh_hostflags.fields.forcevmexit = 0; + return NESTEDHVM_INTR_NOTINTERCEPTED; +} + +static enum nestedhvm_vmexits +nestedhvm_vmexit_intercepts(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode) +{ + bool_t is_intercepted; + struct nestedhvm *hvm = &VCPU_NESTEDHVM(v); + uint64_t info1, info2; + + is_intercepted = hvm_nestedhvm_vm_intercepted_by_guest(v, exitcode); + + if (hvm->nh_hostflags.fields.forcevmexit) { + if (is_intercepted) + return NESTEDHVM_VMEXIT_INJECT; + gdprintk(XENLOG_ERR, + "forced VMEXIT can't happen as guest can't " + "handle the intercept\n"); + return NESTEDHVM_VMEXIT_FATALERROR; + } + + exitcode = hvm_nestedhvm_vm_exitcode_native2generic(v, regs, exitcode, + &info1, &info2); + + switch (exitcode) { + case NESTEDHVM_INTERCEPT_INVALID: + if (is_intercepted) + return NESTEDHVM_VMEXIT_INJECT; + return NESTEDHVM_VMEXIT_HOST; + + case NESTEDHVM_INTERCEPT_INTR: + return NESTEDHVM_VMEXIT_HOST; + case NESTEDHVM_INTERCEPT_NMI: + return NESTEDHVM_VMEXIT_HOST; + + case NESTEDHVM_INTERCEPT_NPF: + if (nestedhvm_paging_mode_hap(v)) { + if (!is_intercepted) + return NESTEDHVM_VMEXIT_FATALERROR; + /* host nested paging + guest nested paging */ + return NESTEDHVM_VMEXIT_HOST; + } + if (paging_mode_hap(v->domain)) { + if (is_intercepted) + return NESTEDHVM_VMEXIT_FATALERROR; + /* host nested paging + guest shadow paging */ + return NESTEDHVM_VMEXIT_HOST; + } + /* host shadow paging + guest shadow paging */ + /* Can this happen? */ + BUG(); + return NESTEDHVM_VMEXIT_FATALERROR; + case NESTEDHVM_INTERCEPT_PF: + if (nestedhvm_paging_mode_hap(v)) { + /* host nested paging + guest nested paging */ + if (!is_intercepted) + /* guest intercepts #PF unnecessarily */ + return NESTEDHVM_VMEXIT_HOST; + /* nested guest intercepts #PF unnecessarily */ + return NESTEDHVM_VMEXIT_INJECT; + } + if (!paging_mode_hap(v->domain)) { + /* host shadow paging + guest shadow paging */ + return NESTEDHVM_VMEXIT_HOST; + } + /* host nested paging + guest shadow paging */ + return NESTEDHVM_VMEXIT_INJECT; + case NESTEDHVM_INTERCEPT_MSR_READ: + return nestedhvm_vmexit_msr(hvm->nh_cached_msrpm, info1, 1); + case NESTEDHVM_INTERCEPT_MSR_WRITE: + return nestedhvm_vmexit_msr(hvm->nh_cached_msrpm, info1, 0); + case NESTEDHVM_INTERCEPT_IOIO: + /* Always let the guest handle IO access */ + return NESTEDHVM_VMEXIT_INJECT; + default: + break; + } + + if (is_intercepted) + return NESTEDHVM_VMEXIT_CONTINUE; + return NESTEDHVM_VMEXIT_HOST; +} + +static enum nestedhvm_vmexits +nestedhvm_vmexit(struct vcpu *v, struct cpu_user_regs *regs, uint64_t exitcode) +{ + int rc; + enum nestedhvm_vmexits ret; + + ASSERT(nestedhvm_vcpu_in_guestmode(v)); + + rc = hvm_nestedhvm_vm_prepare4vmexit(v); + if (rc) { + ret = NESTEDHVM_VMEXIT_ERROR; + goto err1; + } + + ret = nestedhvm_vmexit_intercepts(v, regs, exitcode); + switch (ret) { + case NESTEDHVM_VMEXIT_CONTINUE: + case NESTEDHVM_VMEXIT_INJECT: + break; + case NESTEDHVM_VMEXIT_ERROR: + case NESTEDHVM_VMEXIT_FATALERROR: + goto err1; + case NESTEDHVM_VMEXIT_HOST: + return ret; + default: + break; + } + + rc = hvm_nestedhvm_vcpu_hostrestore(v, regs); + if (rc) { + ret = NESTEDHVM_VMEXIT_FATALERROR; + goto err0; + } + + nestedhvm_vcpu_exit_guestmode(v); + + return ret; + +err1: + rc = hvm_nestedhvm_vcpu_hostrestore(v, regs); + if (rc) + ret = NESTEDHVM_VMEXIT_FATALERROR; +err0: + nestedhvm_vcpu_exit_guestmode(v); + return ret; +} + +/* The exitcode is in native SVM/VMX format. The forced exitcode + * is in generic format. + */ +enum nestedhvm_vmexits +nestedhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode) +{ + int rc; + struct nestedhvm *hvm = &VCPU_NESTEDHVM(v); + enum nestedhvm_vmexits ret; + enum hvm_copy_result hvm_rc; + + hvm->nh_hostflags.fields.vmentry = 1; + if (nestedhvm_vcpu_in_guestmode(v)) { + ret = nestedhvm_vmexit(v, regs, exitcode); + switch (ret) { + case NESTEDHVM_VMEXIT_FATALERROR: + case NESTEDHVM_VMEXIT_HOST: + hvm->nh_hostflags.fields.vmentry = 0; + return ret; + case NESTEDHVM_VMEXIT_ERROR: + hvm->nh_hostflags.fields.forcevmexit = 1; + hvm->nh_forcevmexit.exitcode = NESTEDHVM_INTERCEPT_INVALID; + hvm->nh_forcevmexit.exitinfo1 = 0; + hvm->nh_forcevmexit.exitinfo2 = 0; + break; + default: + ASSERT(!nestedhvm_vcpu_in_guestmode(v)); + break; + } + + /* host state has been restored */ + } + + nestedsvm_vcpu_clgi(v); + + /* Prepare for running the guest. Do some final SVM/VMX + * specific tweaks if necessary to make it work. + */ + rc = hvm_nestedhvm_vcpu_vmexit(v, regs, exitcode); + hvm->nh_hostflags.fields.forcevmexit = 0; + if (rc) { + hvm->nh_hostflags.fields.vmentry = 0; + return NESTEDHVM_VMEXIT_FATALERROR; + } + + ASSERT(!nestedhvm_vcpu_in_guestmode(v)); + /* Inject VMEXIT into guest. */ + hvm_rc = nestedhvm_vm_toguest(hvm, hvm->nh_vmaddr); + switch (hvm_rc) { + case HVMCOPY_okay: + ret = NESTEDHVM_VMEXIT_DONE; + break; + default: + hvm->nh_hostflags.fields.vmentry = 0; + return NESTEDHVM_VMEXIT_FATALERROR; + } + + hvm->nh_hostflags.fields.vmentry = 0; + return ret; +} + +/* Virtual GIF */ +int +nestedsvm_vcpu_clgi(struct vcpu *v) +{ + if (!nestedhvm_enabled(v->domain)) { + hvm_inject_exception(TRAP_invalid_op, 0, 0); + return -1; + } + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return 0; + + /* clear gif flag */ + VCPU_NESTEDHVM(v).nh_gif = 0; + local_event_delivery_disable(v); /* mask events for PV drivers */ + return 0; +} + +int +nestedsvm_vcpu_stgi(struct vcpu *v) +{ + if (!nestedhvm_enabled(v->domain)) { + hvm_inject_exception(TRAP_invalid_op, 0, 0); + return -1; + } + + /* Always set the GIF to make hvm_interrupt_blocked work. */ + VCPU_NESTEDHVM(v).nh_gif = 1; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return 0; + + local_event_delivery_enable(v); /* unmask events for PV drivers */ + return 0; +} + +uint64_t +nestedhvm_exception2exitcode(unsigned int trapnr) +{ + switch (trapnr) { + case TRAP_divide_error: + return NESTEDHVM_INTERCEPT_DE; + case TRAP_debug: + return NESTEDHVM_INTERCEPT_DB; + case TRAP_nmi: + return NESTEDHVM_INTERCEPT_NMI; + case TRAP_int3: + return NESTEDHVM_INTERCEPT_BP; + case TRAP_overflow: + return NESTEDHVM_INTERCEPT_OF; + case TRAP_bounds: + return NESTEDHVM_INTERCEPT_BR; + case TRAP_invalid_op: + return NESTEDHVM_INTERCEPT_UD; + case TRAP_no_device: + return NESTEDHVM_INTERCEPT_NM; + case TRAP_double_fault: + return NESTEDHVM_INTERCEPT_DF; + case TRAP_copro_seg: + return NESTEDHVM_INTERCEPT_09; + case TRAP_invalid_tss: + return NESTEDHVM_INTERCEPT_TS; + case TRAP_no_segment: + return NESTEDHVM_INTERCEPT_NP; + case TRAP_stack_error: + return NESTEDHVM_INTERCEPT_SS; + case TRAP_gp_fault: + return NESTEDHVM_INTERCEPT_GP; + case TRAP_page_fault: + return NESTEDHVM_INTERCEPT_PF; + case TRAP_spurious_int: + return NESTEDHVM_INTERCEPT_15; + case TRAP_copro_error: + return NESTEDHVM_INTERCEPT_MF; + case TRAP_alignment_check: + return NESTEDHVM_INTERCEPT_AC; + case TRAP_machine_check: + return NESTEDHVM_INTERCEPT_MCE; + case TRAP_simd_error: + return NESTEDHVM_INTERCEPT_XF; + } + + BUG(); + return 0; +} + +unsigned int +nestedhvm_exitcode2exception(uint64_t exitcode) +{ + switch (exitcode) { + case NESTEDHVM_INTERCEPT_DE: + return TRAP_divide_error; + case NESTEDHVM_INTERCEPT_DB: + return TRAP_debug; + case NESTEDHVM_INTERCEPT_NMI: + return TRAP_nmi; + case NESTEDHVM_INTERCEPT_BP: + return TRAP_int3; + case NESTEDHVM_INTERCEPT_OF: + return TRAP_overflow; + case NESTEDHVM_INTERCEPT_BR: + return TRAP_bounds; + case NESTEDHVM_INTERCEPT_UD: + return TRAP_invalid_op; + case NESTEDHVM_INTERCEPT_NM: + return TRAP_no_device; + case NESTEDHVM_INTERCEPT_DF: + return TRAP_double_fault; + case NESTEDHVM_INTERCEPT_09: + return TRAP_copro_seg; + case NESTEDHVM_INTERCEPT_TS: + return TRAP_invalid_tss; + case NESTEDHVM_INTERCEPT_NP: + return TRAP_no_segment; + case NESTEDHVM_INTERCEPT_SS: + return TRAP_stack_error; + case NESTEDHVM_INTERCEPT_GP: + return TRAP_gp_fault; + case NESTEDHVM_INTERCEPT_PF: + return TRAP_page_fault; + case NESTEDHVM_INTERCEPT_15: + return TRAP_spurious_int; + case NESTEDHVM_INTERCEPT_MF: + return TRAP_copro_error; + case NESTEDHVM_INTERCEPT_AC: + return TRAP_alignment_check; + case NESTEDHVM_INTERCEPT_MCE: + return TRAP_machine_check; + case NESTEDHVM_INTERCEPT_XF: + return TRAP_simd_error; + } + + BUG(); + return 0; +} diff -r c19b4e3959c5 -r 917f9eb07fc3 xen/include/asm-x86/hvm/nestedhvm.h --- /dev/null +++ b/xen/include/asm-x86/hvm/nestedhvm.h @@ -0,0 +1,139 @@ +/* + * Nested HVM + * Copyright (c) 2010, Advanced Micro Devices, Inc. + * Author: Christoph Egger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _HVM_NESTEDHVM_H +#define _HVM_NESTEDHVM_H + +#include /* for uintNN_t */ +#include /* for struct vcpu, struct domain */ +#include /* for VCPU_NESTEDHVM */ + +enum nestedhvm_vmexits { + NESTEDHVM_VMEXIT_ERROR = 0, /* inject VMEXIT w/ invalid VMCB */ + NESTEDHVM_VMEXIT_FATALERROR = 1, /* crash first level guest */ + NESTEDHVM_VMEXIT_HOST = 2, /* exit handled on host level */ + NESTEDHVM_VMEXIT_CONTINUE = 3, /* further handling */ + NESTEDHVM_VMEXIT_INJECT = 4, /* inject VMEXIT */ + NESTEDHVM_VMEXIT_DONE = 5, /* VMEXIT handled */ +}; + +/* Generic exit codes + * Note: This is not a complete list. Only maintain those which are + * used in the generic code. All other exit codes are represented + * by NESTEDHVM_INTERCEPT_LAST. + */ +enum nestedhvm_intercepts { + /* exitinfo1 and exitinfo2 undefined */ + NESTEDHVM_INTERCEPT_INVALID = 0, /* INVALID vmcb/vmcs */ + NESTEDHVM_INTERCEPT_SHUTDOWN = 1, /* kill guest */ + NESTEDHVM_INTERCEPT_MCE = 2, /* machine check exception */ + + /* exitinfo1 is hvm_intsrc_*, exitinfo2 is the vector */ + NESTEDHVM_INTERCEPT_INTR = 3, /* interrupt exit code */ + NESTEDHVM_INTERCEPT_NMI = 4, /* NMI exit code */ + + /* exitinfo1 is msr, exitinfo2 undefined */ + NESTEDHVM_INTERCEPT_MSR_READ = 5, /* MSR read access */ + /* exitinfo1 is msr, exitinfo2 is msr value */ + NESTEDHVM_INTERCEPT_MSR_WRITE = 6, /* MSR write access */ + + /* exitinfo1 not yet specified, exitinfo2: rip after instruction */ + NESTEDHVM_INTERCEPT_IOIO = 7, /* IO port access */ + + /* exitinfo1 is PF error code, exitinfo2 is PF fault address */ + NESTEDHVM_INTERCEPT_NPF = 8, /* nested page fault */ + NESTEDHVM_INTERCEPT_PF = 9, /* page fault */ + + /* exceptions: exitinfo1 and exitinfo2 are undefined */ + NESTEDHVM_INTERCEPT_DE = 10, /* divide by zero */ + NESTEDHVM_INTERCEPT_OF = 11, /* overflow */ + NESTEDHVM_INTERCEPT_BR = 12, /* bound-range */ + NESTEDHVM_INTERCEPT_UD = 13, /* invalid-opcode */ + NESTEDHVM_INTERCEPT_NM = 14, /* device-not-available */ + NESTEDHVM_INTERCEPT_DF = 15, /* double-fault */ + NESTEDHVM_INTERCEPT_09 = 16, /* unsupported (reserved) */ + NESTEDHVM_INTERCEPT_XF = 17, /* simd, floating-point */ + + /* exceptions: exitinfo1 is cs, exitinfo2 is rip */ + NESTEDHVM_INTERCEPT_DB = 18, /* debug */ + NESTEDHVM_INTERCEPT_BP = 19, /* breakpoint */ + + /* exceptions: exitinfo1 and exitinfo2 still need to be defined */ + NESTEDHVM_INTERCEPT_TS = 21, /* invalid-tss */ + + /* exceptions: exitinfo1 contains error code, exitinfo2 is undefined */ + NESTEDHVM_INTERCEPT_NP = 22, /* segment-not-present */ + NESTEDHVM_INTERCEPT_SS = 23, /* stack */ + NESTEDHVM_INTERCEPT_GP = 24, /* general-protection */ + NESTEDHVM_INTERCEPT_15 = 25, /* reserved */ + NESTEDHVM_INTERCEPT_MF = 26, /* x87 floating-point exception-pending */ + NESTEDHVM_INTERCEPT_AC = 27, /* alignment-check */ + + /* end mark */ + NESTEDHVM_INTERCEPT_LAST, +}; + +/* Nested HVM on/off per domain */ +bool_t nestedhvm_enabled(struct domain *d); +int nestedhvm_initialise(struct domain *d); + +/* Nested VM */ +enum hvm_copy_result +nestedhvm_vm_fromguest(struct nestedhvm *hvm, uint64_t vmaddr); +enum hvm_copy_result +nestedhvm_vm_toguest(struct nestedhvm *hvm, uint64_t vmaddr); + +/* Nested VCPU */ +int nestedhvm_vcpu_initialise(struct vcpu *v); +int nestedhvm_vcpu_destroy(struct vcpu *v); +void nestedhvm_vcpu_reset(struct vcpu *v); +bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); +#define nestedhvm_vcpu_enter_guestmode(v) VCPU_NESTEDHVM((v)).nh_guestmode = 1 +#define nestedhvm_vcpu_exit_guestmode(v) VCPU_NESTEDHVM((v)).nh_guestmode = 0 +int nestedhvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t vmcbaddr, unsigned int inst_len); +enum nestedhvm_vmexits nestedhvm_vcpu_vmexit(struct vcpu *v, + struct cpu_user_regs *regs, uint64_t exitcode); +int nestedhvm_vcpu_state_validate(struct vcpu *v, uint64_t vmcbaddr); +int nestedsvm_vcpu_clgi(struct vcpu *v); +int nestedsvm_vcpu_stgi(struct vcpu *v); + +/* Interrupts */ +#define nestedsvm_gif_isset(v) (!!VCPU_NESTEDHVM((v)).nh_gif) +#define NESTEDHVM_INTR_NOTHANDLED 3 +#define NESTEDHVM_INTR_NOTINTERCEPTED 2 +#define NESTEDHVM_INTR_FORCEVMEXIT 1 +#define NESTEDHVM_INTR_MASKED 0 +int nestedhvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack); + +/* Nested paging */ +#define NESTEDHVM_PAGEFAULT_DONE 0 +#define NESTEDHVM_PAGEFAULT_INJECT 1 +#define NESTEDHVM_PAGEFAULT_ERROR 2 +int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa); + +/* Misc */ +#define nestedhvm_paging_mode_hap(v) (!!VCPU_NESTEDHVM((v)).nh_hap_enabled) +#define nestedhvm_vmentry_emulate(v) \ + (!!VCPU_NESTEDHVM((v)).nh_hostflags.fields.vmentry) +bool_t nestedhvm_vmaddr_isvalid(struct nestedhvm *hvm, uint64_t addr); +uint64_t nestedhvm_exception2exitcode(unsigned int trapnr); +unsigned int nestedhvm_exitcode2exception(uint64_t exitcode); + +#endif /* _HVM_NESTEDHVM_H */