[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 06/15] VMX/altp2m: add code to support EPTP switching and #VE.



On Fri, Jul 10, 2015 at 1:52 AM, Ed White <edmund.h.white@xxxxxxxxx> wrote:
> Implement and hook up the code to enable VMX support of VMFUNC and #VE.
>
> VMFUNC leaf 0 (EPTP switching) emulation is added in a later patch.
>
> Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
>
> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Acked-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/vmx/vmx.c | 138 
> +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 138 insertions(+)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 07527dd..28afdaa 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -56,6 +56,7 @@
>  #include <asm/debugger.h>
>  #include <asm/apic.h>
>  #include <asm/hvm/nestedhvm.h>
> +#include <asm/hvm/altp2m.h>
>  #include <asm/event.h>
>  #include <asm/monitor.h>
>  #include <public/arch-x86/cpuid.h>
> @@ -1763,6 +1764,104 @@ static void vmx_enable_msr_exit_interception(struct 
> domain *d)
>                                           MSR_TYPE_W);
>  }
>
> +static void vmx_vcpu_update_eptp(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +    struct p2m_domain *p2m = NULL;
> +    struct ept_data *ept;
> +
> +    if ( altp2m_active(d) )
> +        p2m = p2m_get_altp2m(v);
> +    if ( !p2m )
> +        p2m = p2m_get_hostp2m(d);
> +
> +    ept = &p2m->ept;
> +    ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
> +
> +    vmx_vmcs_enter(v);
> +
> +    __vmwrite(EPT_POINTER, ept_get_eptp(ept));
> +
> +    if ( v->arch.hvm_vmx.secondary_exec_control &
> +        SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS )
> +        __vmwrite(EPTP_INDEX, vcpu_altp2m(v).p2midx);
> +
> +    vmx_vmcs_exit(v);
> +}
> +
> +static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +    u32 mask = SECONDARY_EXEC_ENABLE_VM_FUNCTIONS;
> +
> +    if ( !cpu_has_vmx_vmfunc )
> +        return;
> +
> +    if ( cpu_has_vmx_virt_exceptions )
> +        mask |= SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS;
> +
> +    vmx_vmcs_enter(v);
> +
> +    if ( !d->is_dying && altp2m_active(d) )
> +    {
> +        v->arch.hvm_vmx.secondary_exec_control |= mask;
> +        __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
> +        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
> +
> +        if ( cpu_has_vmx_virt_exceptions )
> +        {
> +            p2m_type_t t;
> +            mfn_t mfn;
> +
> +            mfn = get_gfn_query_unlocked(d, 
> gfn_x(vcpu_altp2m(v).veinfo_gfn), &t);
> +
> +            if ( mfn_x(mfn) != INVALID_MFN )
> +                __vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT);
> +            else
> +                mask &= ~SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS;
> +        }
> +    }
> +    else
> +        v->arch.hvm_vmx.secondary_exec_control &= ~mask;
> +
> +    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
> +        v->arch.hvm_vmx.secondary_exec_control);
> +
> +    vmx_vmcs_exit(v);
> +}
> +
> +static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
> +{
> +    bool_t rc = 0;
> +    ve_info_t *veinfo = gfn_x(vcpu_altp2m(v).veinfo_gfn) != INVALID_GFN ?
> +        hvm_map_guest_frame_rw(gfn_x(vcpu_altp2m(v).veinfo_gfn), 0) : NULL;
> +
> +    if ( !veinfo )
> +        return 0;
> +
> +    if ( veinfo->semaphore != 0 )
> +        goto out;
> +
> +    rc = 1;
> +
> +    veinfo->exit_reason = EXIT_REASON_EPT_VIOLATION;
> +    veinfo->semaphore = ~0l;
> +    veinfo->eptp_index = vcpu_altp2m(v).p2midx;
> +
> +    vmx_vmcs_enter(v);
> +    __vmread(EXIT_QUALIFICATION, &veinfo->exit_qualification);
> +    __vmread(GUEST_LINEAR_ADDRESS, &veinfo->gla);
> +    __vmread(GUEST_PHYSICAL_ADDRESS, &veinfo->gpa);
> +    vmx_vmcs_exit(v);
> +
> +    hvm_inject_hw_exception(TRAP_virtualisation,
> +                            HVM_DELIVER_NO_ERROR_CODE);
> +
> +out:
> +    hvm_unmap_guest_frame(veinfo, 0);
> +    return rc;
> +}
> +
>  static struct hvm_function_table __initdata vmx_function_table = {
>      .name                 = "VMX",
>      .cpu_up_prepare       = vmx_cpu_up_prepare,
> @@ -1822,6 +1921,9 @@ static struct hvm_function_table __initdata 
> vmx_function_table = {
>      .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
>      .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
>      .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
> +    .ap2m_vcpu_update_eptp = vmx_vcpu_update_eptp,
> +    .ap2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
> +    .ap2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,

Just a bit of feeback for future patch series: This would have been a
lot easier to review if these hooks, and the wrappers which call them,
had all been added in a single patch, rather than having the hooks &
wrappers added in the previous patch and the functions implemented in
this patch.

(I'm trying to focus on the p2m-related stuff, so I'm just skimming this one.)

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.