[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 15/18 V2]: PVH xen: grant related changes



On Fri, Mar 15, 2013 at 06:03:15PM -0700, Mukesh Rathor wrote:
>  In this patch, we make pvh be acomodated in the newly created

accommodated

>  domain_page.c file. Also, in grant, we replenish the frame in the EPT
>  so we don't leave a hole in it.
> 
> Change in V2:
>    - None. domain_page.c changes newer in this changeset.
>    - grant changes moved here in a separate patch.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> ---
>  xen/arch/x86/domain_page.c |   11 ++++++-----
>  xen/arch/x86/mm.c          |   23 +++++++++++++++++++++--
>  xen/common/grant_table.c   |    4 ++--
>  3 files changed, 29 insertions(+), 9 deletions(-)
> 
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index 7421e03..be41304 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -34,7 +34,8 @@ static inline struct vcpu *mapcache_current_vcpu(void)
>       * then it means we are running on the idle domain's page table and must
>       * therefore use its mapcache.
>       */
> -    if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v) 
> )
> +    if ( unlikely(pagetable_is_null(v->arch.guest_table)) && 
> +                  !is_hvm_or_pvh_vcpu(v) )
>      {
>          /* If we really are idling, perform lazy context switch now. */
>          if ( (v = idle_vcpu[smp_processor_id()]) == current )
> @@ -71,7 +72,7 @@ void *map_domain_page(unsigned long mfn)
>  #endif
>  
>      v = mapcache_current_vcpu();
> -    if ( !v || is_hvm_vcpu(v) )
> +    if ( !v || is_hvm_or_pvh_vcpu(v) )
>          return mfn_to_virt(mfn);
>  
>      dcache = &v->domain->arch.pv_domain.mapcache;
> @@ -175,7 +176,7 @@ void unmap_domain_page(const void *ptr)
>      ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
>  
>      v = mapcache_current_vcpu();
> -    ASSERT(v && !is_hvm_vcpu(v));
> +    ASSERT(v && !is_hvm_or_pvh_vcpu(v));
>  
>      dcache = &v->domain->arch.pv_domain.mapcache;
>      ASSERT(dcache->inuse);
> @@ -242,7 +243,7 @@ int mapcache_domain_init(struct domain *d)
>      struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
>      unsigned int bitmap_pages;
>  
> -    if ( is_hvm_domain(d) || is_idle_domain(d) )
> +    if ( is_hvm_or_pvh_domain(d) || is_idle_domain(d) )
>          return 0;
>  
>  #ifdef NDEBUG
> @@ -273,7 +274,7 @@ int mapcache_vcpu_init(struct vcpu *v)
>      unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
>      unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
>  
> -    if ( is_hvm_vcpu(v) || !dcache->inuse )
> +    if ( is_hvm_or_pvh_vcpu(v) || !dcache->inuse )
>          return 0;
>  
>      if ( ents > dcache->entries )
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index dbac811..64d0853 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -3780,16 +3780,35 @@ static int replace_grant_p2m_mapping(
>      old_mfn = get_gfn(d, gfn, &type);
>      if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
>      {
> -        put_gfn(d, gfn);
>          gdprintk(XENLOG_WARNING,
>                   "replace_grant_p2m_mapping: old mapping invalid (type %d, 
> mfn %lx, frame %lx)\n",
>                   type, mfn_x(old_mfn), frame);
> -        return GNTST_general_error;
> +        goto out_err;
>      }
>      guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K);
>  
> +    /* PVH: Because we free the existing mfn in XENMEM_add_to_physmap during
> +     * map, we undo that here so the guest P2M (EPT/NPT) is consistent */
> +    if ( is_pvh_domain(d) ) {
> +        struct page_info *page = alloc_domheap_page(d, 0);
> +
> +        if ( page == NULL ) {
> +            gdprintk(XENLOG_ERR, "domid:%d Unable to alloc domheap page\n",
> +                     d->domain_id);
> +            goto out_err;
> +        }
> +        if ( guest_physmap_add_page(d, gfn, page_to_mfn(page), 0) != 0 ) {
> +            gdprintk(XENLOG_ERR, "Unable to add mfn to replace grant\n");
> +            goto out_err;
> +        }
> +    }
> +
>      put_gfn(d, gfn);
>      return GNTST_okay;
> +
> +out_err:
> +    put_gfn(d, gfn);
> +    return GNTST_general_error;
>  }
>  
>  int replace_grant_host_mapping(
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index 3f97328..84ce267 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -721,7 +721,7 @@ __gnttab_map_grant_ref(
>  
>      double_gt_lock(lgt, rgt);
>  
> -    if ( !is_hvm_domain(ld) && need_iommu(ld) )
> +    if ( !is_hvm_or_pvh_domain(ld) && need_iommu(ld) )
>      {
>          unsigned int wrc, rdc;
>          int err = 0;
> @@ -932,7 +932,7 @@ __gnttab_unmap_common(
>              act->pin -= GNTPIN_hstw_inc;
>      }
>  
> -    if ( !is_hvm_domain(ld) && need_iommu(ld) )
> +    if ( !is_hvm_or_pvh_domain(ld) && need_iommu(ld) )
>      {
>          unsigned int wrc, rdc;
>          int err = 0;
> -- 
> 1.7.2.3
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.