[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 rebased 1/4] nested vmx: Use a list to store the launched vvmcs for L1 VMM



Acked-by Eddie Dong <eddie.dong@xxxxxxxxx>

> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxx
> [mailto:xen-devel-bounces@xxxxxxxxxxxxx] On Behalf Of Dongxiao Xu
> Sent: Wednesday, January 23, 2013 10:32 PM
> To: xen-devel@xxxxxxxxxxxxxxxxxxx
> Cc: JBeulich@xxxxxxxx; Dong, Eddie; Zhang, Xiantao; Nakajima, Jun
> Subject: [Xen-devel] [PATCH v5 rebased 1/4] nested vmx: Use a list to store
> the launched vvmcs for L1 VMM
> 
> Originally we use a virtual VMCS field to store the launch state of
> a certain vmcs. However if we introduce VMCS shadowing feature, this
> virtual VMCS should also be able to load into real hardware,
> and VMREAD/VMWRITE operate invalid fields.
> 
> The new approach is to store the launch state into a list for L1 VMM.
> 
> Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> ---
>  xen/arch/x86/hvm/vmx/vvmx.c        |   96
> ++++++++++++++++++++++++++++++++----
>  xen/include/asm-x86/hvm/vmx/vmcs.h |    2 -
>  xen/include/asm-x86/hvm/vmx/vvmx.h |    6 ++
>  3 files changed, 92 insertions(+), 12 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
> index 90b4da9..cd8bb90 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -51,6 +51,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>      nvmx->iobitmap[0] = NULL;
>      nvmx->iobitmap[1] = NULL;
>      nvmx->msrbitmap = NULL;
> +    INIT_LIST_HEAD(&nvmx->launched_list);
>      return 0;
>  out:
>      return -ENOMEM;
> @@ -58,7 +59,9 @@ out:
> 
>  void nvmx_vcpu_destroy(struct vcpu *v)
>  {
> +    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> +    struct vvmcs_list *item, *n;
> 
>      /*
>       * When destroying the vcpu, it may be running on behalf of L2 guest.
> @@ -74,6 +77,12 @@ void nvmx_vcpu_destroy(struct vcpu *v)
>          free_xenheap_page(nvcpu->nv_n2vmcx);
>          nvcpu->nv_n2vmcx = NULL;
>      }
> +
> +    list_for_each_entry_safe(item, n, &nvmx->launched_list, node)
> +    {
> +        list_del(&item->node);
> +        xfree(item);
> +    }
>  }
> 
>  void nvmx_domain_relinquish_resources(struct domain *d)
> @@ -1202,6 +1211,62 @@ int nvmx_handle_vmxoff(struct cpu_user_regs
> *regs)
>      return X86EMUL_OKAY;
>  }
> 
> +static bool_t vvmcs_launched(struct list_head *launched_list,
> +                             unsigned long vvmcs_mfn)
> +{
> +    struct vvmcs_list *vvmcs;
> +    struct list_head *pos;
> +    bool_t launched = 0;
> +
> +    list_for_each(pos, launched_list)
> +    {
> +        vvmcs = list_entry(pos, struct vvmcs_list, node);
> +        if ( vvmcs_mfn == vvmcs->vvmcs_mfn )
> +        {
> +            launched = 1;
> +            break;
> +        }
> +    }
> +
> +    return launched;
> +}
> +
> +static int set_vvmcs_launched(struct list_head *launched_list,
> +                              unsigned long vvmcs_mfn)
> +{
> +    struct vvmcs_list *vvmcs;
> +
> +    if ( vvmcs_launched(launched_list, vvmcs_mfn) )
> +        return 0;
> +
> +    vvmcs = xzalloc(struct vvmcs_list);
> +    if ( !vvmcs )
> +        return -ENOMEM;
> +
> +    vvmcs->vvmcs_mfn = vvmcs_mfn;
> +    list_add(&vvmcs->node, launched_list);
> +
> +    return 0;
> +}
> +
> +static void clear_vvmcs_launched(struct list_head *launched_list,
> +                                 paddr_t vvmcs_mfn)
> +{
> +    struct vvmcs_list *vvmcs;
> +    struct list_head *pos;
> +
> +    list_for_each(pos, launched_list)
> +    {
> +        vvmcs = list_entry(pos, struct vvmcs_list, node);
> +        if ( vvmcs_mfn == vvmcs->vvmcs_mfn )
> +        {
> +            list_del(&vvmcs->node);
> +            xfree(vvmcs);
> +            break;
> +        }
> +    }
> +}
> +
>  int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
>  {
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> @@ -1225,8 +1290,10 @@ int nvmx_vmresume(struct vcpu *v, struct
> cpu_user_regs *regs)
> 
>  int nvmx_handle_vmresume(struct cpu_user_regs *regs)
>  {
> -    int launched;
> +    bool_t launched;
>      struct vcpu *v = current;
> +    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> +    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> 
>      if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
>      {
> @@ -1234,8 +1301,8 @@ int nvmx_handle_vmresume(struct cpu_user_regs
> *regs)
>          return X86EMUL_OKAY;
>      }
> 
> -    launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
> -                           NVMX_LAUNCH_STATE);
> +    launched = vvmcs_launched(&nvmx->launched_list,
> +                   domain_page_map_to_mfn(nvcpu->nv_vvmcx));
>      if ( !launched ) {
>         vmreturn (regs, VMFAIL_VALID);
>         return X86EMUL_OKAY;
> @@ -1245,9 +1312,11 @@ int nvmx_handle_vmresume(struct
> cpu_user_regs *regs)
> 
>  int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
>  {
> -    int launched;
> +    bool_t launched;
>      int rc;
>      struct vcpu *v = current;
> +    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> +    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> 
>      if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
>      {
> @@ -1255,8 +1324,8 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs
> *regs)
>          return X86EMUL_OKAY;
>      }
> 
> -    launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
> -                           NVMX_LAUNCH_STATE);
> +    launched = vvmcs_launched(&nvmx->launched_list,
> +                   domain_page_map_to_mfn(nvcpu->nv_vvmcx));
>      if ( launched ) {
>         vmreturn (regs, VMFAIL_VALID);
>         return X86EMUL_OKAY;
> @@ -1264,8 +1333,12 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs
> *regs)
>      else {
>          rc = nvmx_vmresume(v,regs);
>          if ( rc == X86EMUL_OKAY )
> -            __set_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
> -                        NVMX_LAUNCH_STATE, 1);
> +        {
> +            if ( set_vvmcs_launched(&nvmx->launched_list,
> +                    domain_page_map_to_mfn(nvcpu->nv_vvmcx)) <
> 0 )
> +                return X86EMUL_UNHANDLEABLE;
> +        }
> +
>      }
>      return rc;
>  }
> @@ -1332,6 +1405,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs
> *regs)
>      struct vcpu *v = current;
>      struct vmx_inst_decoded decode;
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> +    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
>      unsigned long gpa = 0;
>      void *vvmcs;
>      int rc;
> @@ -1348,7 +1422,8 @@ int nvmx_handle_vmclear(struct cpu_user_regs
> *regs)
> 
>      if ( gpa == nvcpu->nv_vvmcxaddr )
>      {
> -        __set_vvmcs(nvcpu->nv_vvmcx, NVMX_LAUNCH_STATE, 0);
> +        clear_vvmcs_launched(&nvmx->launched_list,
> +            domain_page_map_to_mfn(nvcpu->nv_vvmcx));
>          nvmx_purge_vvmcs(v);
>      }
>      else
> @@ -1356,7 +1431,8 @@ int nvmx_handle_vmclear(struct cpu_user_regs
> *regs)
>          /* Even if this VMCS isn't the current one, we must clear it. */
>          vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
>          if ( vvmcs )
> -            __set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0);
> +            clear_vvmcs_launched(&nvmx->launched_list,
> +                domain_page_map_to_mfn(vvmcs));
>          hvm_unmap_guest_frame(vvmcs, 0);
>      }
> 
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
> b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 51df81e..9ff741f 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -421,8 +421,6 @@ enum vmcs_field {
>      HOST_SYSENTER_EIP               = 0x00006c12,
>      HOST_RSP                        = 0x00006c14,
>      HOST_RIP                        = 0x00006c16,
> -    /* A virtual VMCS field used for nestedvmx only */
> -    NVMX_LAUNCH_STATE               = 0x00006c20,
>  };
> 
>  #define VMCS_VPID_WIDTH 16
> diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h
> b/xen/include/asm-x86/hvm/vmx/vvmx.h
> index 9e1dc77..89e839f 100644
> --- a/xen/include/asm-x86/hvm/vmx/vvmx.h
> +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
> @@ -23,6 +23,11 @@
>  #ifndef __ASM_X86_HVM_VVMX_H__
>  #define __ASM_X86_HVM_VVMX_H__
> 
> +struct vvmcs_list {
> +    unsigned long vvmcs_mfn;
> +    struct list_head node;
> +};
> +
>  struct nestedvmx {
>      paddr_t    vmxon_region_pa;
>      void       *iobitmap[2];         /* map (va) of L1 guest I/O bitmap */
> @@ -38,6 +43,7 @@ struct nestedvmx {
>          uint32_t exit_qual;
>      } ept;
>      uint32_t guest_vpid;
> +    struct list_head launched_list;
>  };
> 
>  #define vcpu_2_nvmx(v)       (vcpu_nestedhvm(v).u.nvmx)
> --
> 1.7.1
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.