|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 06/11] VMX/altp2m: add code to support EPTP switching and #VE.
Implement and hook up the code to enable VMX support of VMFUNC and #VE.
VMFUNC leaf 0 (EPTP switching) and #VE are emulated on hardware that
doesn't support them.
Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
xen/arch/x86/hvm/vmx/vmx.c | 138 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 138 insertions(+)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 931709b..a0a2d02 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -56,6 +56,7 @@
#include <asm/debugger.h>
#include <asm/apic.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/hvm/altp2mhvm.h>
#include <asm/event.h>
#include <public/arch-x86/cpuid.h>
@@ -1718,6 +1719,91 @@ static void vmx_enable_msr_exit_interception(struct
domain *d)
MSR_TYPE_W);
}
+static void vmx_vcpu_update_eptp(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ struct p2m_domain *p2m = altp2mhvm_active(d) ?
+ p2m_get_altp2m(v) : p2m_get_hostp2m(d);
+ struct ept_data *ept = &p2m->ept;
+
+ ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+
+ vmx_vmcs_enter(v);
+
+ __vmwrite(EPT_POINTER, ept_get_eptp(ept));
+
+ vmx_vmcs_exit(v);
+}
+
+static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ u32 mask = SECONDARY_EXEC_ENABLE_VM_FUNCTIONS;
+
+ if ( !cpu_has_vmx_vmfunc )
+ return;
+
+ if ( cpu_has_vmx_virt_exceptions )
+ mask |= SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS;
+
+ vmx_vmcs_enter(v);
+
+ if ( !d->is_dying && altp2mhvm_active(d) )
+ {
+ v->arch.hvm_vmx.secondary_exec_control |= mask;
+ __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
+ __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+
+ if ( cpu_has_vmx_virt_exceptions )
+ {
+ p2m_type_t t;
+ mfn_t mfn;
+
+ mfn = get_gfn_query_unlocked(d, vcpu_altp2mhvm(v).veinfo, &t);
+ __vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT);
+ }
+ }
+ else
+ v->arch.hvm_vmx.secondary_exec_control &= ~mask;
+
+ __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+ v->arch.hvm_vmx.secondary_exec_control);
+
+ vmx_vmcs_exit(v);
+}
+
+static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
+{
+ bool_t rc = 0;
+ ve_info_t *veinfo = vcpu_altp2mhvm(v).veinfo ?
+ hvm_map_guest_frame_rw(vcpu_altp2mhvm(v).veinfo, 0) : NULL;
+
+ if ( !veinfo )
+ return 0;
+
+ if ( veinfo->semaphore != 0 )
+ goto out;
+
+ rc = 1;
+
+ veinfo->exit_reason = EXIT_REASON_EPT_VIOLATION;
+ veinfo->semaphore = ~0l;
+ veinfo->eptp_index = vcpu_altp2mhvm(v).p2midx;
+
+ vmx_vmcs_enter(v);
+ __vmread(EXIT_QUALIFICATION, &veinfo->exit_qualification);
+ __vmread(GUEST_LINEAR_ADDRESS, &veinfo->gla);
+ __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa);
+ vmx_vmcs_exit(v);
+
+ hvm_inject_hw_exception(TRAP_virtualisation,
+ HVM_DELIVER_NO_ERROR_CODE);
+
+out:
+ hvm_unmap_guest_frame(veinfo, 0);
+ return rc;
+}
+
static struct hvm_function_table __initdata vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -1777,6 +1863,9 @@ static struct hvm_function_table __initdata
vmx_function_table = {
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
.enable_msr_exit_interception = vmx_enable_msr_exit_interception,
+ .ahvm_vcpu_update_eptp = vmx_vcpu_update_eptp,
+ .ahvm_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
+ .ahvm_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
};
const struct hvm_function_table * __init start_vmx(void)
@@ -2551,6 +2640,17 @@ static void vmx_vmexit_ud_intercept(struct cpu_user_regs
*regs)
hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
break;
case X86EMUL_EXCEPTION:
+ /* check for a VMFUNC that should be emulated */
+ if ( !cpu_has_vmx_vmfunc && altp2mhvm_active(current->domain) &&
+ ctxt.insn_buf_bytes >= 3 && ctxt.insn_buf[0] == 0x0f &&
+ ctxt.insn_buf[1] == 0x01 && ctxt.insn_buf[2] == 0xd4 &&
+ regs->eax == 0 &&
+ p2m_switch_vcpu_altp2m_by_id(current, (uint16_t)regs->ecx) )
+ {
+ regs->eip += 3;
+ return;
+ }
+
if ( ctxt.exn_pending )
hvm_inject_trap(&ctxt.trap);
/* fall through */
@@ -2698,6 +2798,40 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
/* Now enable interrupts so it's safe to take locks. */
local_irq_enable();
+
+ /*
+ * If the guest has the ability to switch EPTP without an exit,
+ * figure out whether it has done so and update the altp2m data.
+ */
+ if ( altp2mhvm_active(v->domain) &&
+ (v->arch.hvm_vmx.secondary_exec_control &
+ SECONDARY_EXEC_ENABLE_VM_FUNCTIONS) )
+ {
+ unsigned long idx;
+
+ if ( v->arch.hvm_vmx.secondary_exec_control &
+ SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
+ __vmread(EPTP_INDEX, &idx);
+ else
+ {
+ unsigned long eptp;
+
+ __vmread(EPT_POINTER, &eptp);
+
+ if ( !p2m_find_altp2m_by_eptp(v->domain, eptp, &idx) )
+ {
+ gdprintk(XENLOG_ERR, "EPTP not found in alternate p2m list\n");
+ domain_crash(v->domain);
+ }
+ }
+
+ if ( (uint16_t)idx != vcpu_altp2mhvm(v).p2midx )
+ {
+ cpumask_clear_cpu(v->vcpu_id, p2m_get_altp2m(v)->dirty_cpumask);
+ vcpu_altp2mhvm(v).p2midx = (uint16_t)idx;
+ cpumask_set_cpu(v->vcpu_id, p2m_get_altp2m(v)->dirty_cpumask);
+ }
+ }
/* XXX: This looks ugly, but we need a mechanism to ensure
* any pending vmresume has really happened
@@ -3041,6 +3175,10 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
update_guest_eip();
break;
+ case EXIT_REASON_VMFUNC:
+ vmx_vmexit_ud_intercept(regs);
+ break;
+
case EXIT_REASON_INVEPT:
if ( nvmx_handle_invept(regs) == X86EMUL_OKAY )
update_guest_eip();
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |