[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 07 of 20] Emulation of guest vmptrld



At 16:57 +0800 on 02 Jun (1307033840), Eddie Dong wrote:
> diff -r 8264b01b476b -r 4dad232d7fc3 xen/arch/x86/hvm/vmx/vvmx.c
> --- a/xen/arch/x86/hvm/vmx/vvmx.c     Thu Jun 02 16:33:20 2011 +0800
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c     Thu Jun 02 16:33:20 2011 +0800
> @@ -356,6 +356,41 @@ static void vmreturn(struct cpu_user_reg
>      regs->eflags = eflags;
>  }
>  
> +static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
> +{
> +    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> +    unsigned long gpa;
> +    unsigned long mfn;
> +    p2m_type_t p2mt;
> +
> +    if ( vmcs_reg == IO_BITMAP_A )
> +    {
> +        if (nvmx->iobitmap[0]) {
> +            unmap_domain_page_global(nvmx->iobitmap[0]);
> +        }
> +        gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, IO_BITMAP_A);
> +        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
> +                              gpa >> PAGE_SHIFT, &p2mt));
> +        nvmx->iobitmap[0] = map_domain_page_global(mfn);

Why are these maps _global?  It might be OK to use 2 more global
mappings per VCPU but the reason should probably go in a comment beside
the call.

Also, I don't see where these mappings get torn down on domain
destruction. 

(While I'm looking at this code, this function is quite ugly.  Why have
a single function if you're going to duplicate its contents anyway?)

> +    }
> +    else if ( vmcs_reg == IO_BITMAP_B )
> +    {
> +        if (nvmx->iobitmap[1]) {
> +            unmap_domain_page_global(nvmx->iobitmap[1]);
> +        }
> +        gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, IO_BITMAP_B);
> +        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
> +                               gpa >> PAGE_SHIFT, &p2mt));
> +        nvmx->iobitmap[1] = map_domain_page_global(mfn);
> +    }
> +}
> +
> +static inline void map_io_bitmap_all(struct vcpu *v)
> +{
> +   __map_io_bitmap (v, IO_BITMAP_A);
> +   __map_io_bitmap (v, IO_BITMAP_B);
> +}
> +
>  /*
>   * VMX instructions handling
>   */
> @@ -364,6 +399,7 @@ int nvmx_handle_vmxon(struct cpu_user_re
>  {
>      struct vcpu *v=current;
>      struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
> +    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>      struct vmx_inst_decoded decode;
>      unsigned long gpa = 0;
>      int rc;
> @@ -372,7 +408,22 @@ int nvmx_handle_vmxon(struct cpu_user_re
>      if ( rc != X86EMUL_OKAY )
>          return rc;
>  
> +    if ( nvmx->vmxon_region_pa )
> +        gdprintk(XENLOG_WARNING, 
> +                 "vmxon again: orig %lx new %lx\n",
> +                 nvmx->vmxon_region_pa, gpa);
> +
>      nvmx->vmxon_region_pa = gpa;
> +
> +    /*
> +     * `fork' the host vmcs to shadow_vmcs
> +     * vmcs_lock is not needed since we are on current
> +     */
> +    nvcpu->nv_n1vmcx = v->arch.hvm_vmx.vmcs;
> +    __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
> +    memcpy(nvcpu->nv_n2vmcx, v->arch.hvm_vmx.vmcs, PAGE_SIZE);
> +    __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
> +    v->arch.hvm_vmx.launched = 0;
>      vmreturn(regs, VMSUCCEED);
>  
>      return X86EMUL_OKAY;
> @@ -394,3 +445,38 @@ int nvmx_handle_vmxoff(struct cpu_user_r
>      return X86EMUL_OKAY;
>  }
>  
> +int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
> +{
> +    struct vcpu *v = current;
> +    struct vmx_inst_decoded decode;
> +    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
> +    unsigned long gpa = 0;
> +    unsigned long mfn;
> +    p2m_type_t p2mt;
> +    int rc;
> +
> +    rc = decode_vmx_inst(regs, &decode, &gpa, 0);
> +    if ( rc != X86EMUL_OKAY )
> +        return rc;
> +
> +    if ( gpa == vcpu_2_nvmx(v).vmxon_region_pa || gpa & 0xfff )
> +    {
> +        vmreturn(regs, VMFAIL_INVALID);
> +        goto out;
> +    }
> +
> +    if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
> +    {
> +        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
> +                               gpa >> PAGE_SHIFT, &p2mt));
> +        nvcpu->nv_vvmcx = map_domain_page_global(mfn);

Again, why _global?

Tim.

-- 
Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Principal Software Engineer, Xen Platform Team
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.