WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: [PATCH 17/18] Nested Virtualization: p2m infrastructure

To: Christoph Egger <Christoph.Egger@xxxxxxx>
Subject: [Xen-devel] Re: [PATCH 17/18] Nested Virtualization: p2m infrastructure
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Fri, 16 Apr 2010 15:27:08 +0100
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Delivery-date: Sun, 18 Apr 2010 08:50:07 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <201004151443.06415.Christoph.Egger@xxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <201004151443.06415.Christoph.Egger@xxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.18 (2008-05-17)
This was mostly OK, with a few oddities (below), and assuming that we
can agree that later patches make it necessary. 

If you resubmit, please send it as two patches, one of which does _only_
the change from passing struct domains to passing p2m structs and
nothing else - no comment changes, no format tidying, nothing.  I don't
want to have to comb through another 4000 line patch looking for hidden
surprises.


> @@ -1391,8 +1390,9 @@ out:
>      return rv;
>  }
> 
> +/* Read p2m table (through the linear mapping). */

That's exactly wrong.  The gfn_to_mfn_current() function that you
removed was the one that used the linear mapping.  This one maps and
unmaps pages as it goes. 

>  static mfn_t
> -p2m_gfn_to_mfn(struct domain *d, unsigned long gfn, p2m_type_t *t,
> +p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
>                 p2m_query_t q)
>  {
     mfn_t mfn;

...

> @@ -1531,8 +1531,7 @@ pod_retry_l1:
>      return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
>  }
>  
> -/* Init the datastructures for later use by the p2m code */
> -int p2m_init(struct domain *d)
> +static int p2m_allocp2m(struct p2m_domain **_p2m)

Since the only error this routine can return is -ENOMEM, why not just
return the p2m pointer or NULL?

>  {
>      struct p2m_domain *p2m;
>  
> @@ -1540,39 +1539,68 @@ int p2m_init(struct domain *d)
>      if ( p2m == NULL )
>          return -ENOMEM;
>  
> -    d->arch.p2m = p2m;
> -
> +    *_p2m = p2m;
> +    return 0;
> +}
> +
> +/* Init the datastructures for later use by the p2m code */
> +static void p2m_initialise(struct domain *d, struct p2m_domain *p2m,
> +                          bool_t preserve_allocfree)
> +{
> +    void *alloc, *free;

These are function pointers; there's no need to hack them into void
pointers.

> +    alloc = free = NULL;
> +    if (preserve_allocfree) {
> +        alloc = p2m->alloc_page;
> +        free = p2m->free_page;
> +    }
>      memset(p2m, 0, sizeof(*p2m));
>      p2m_lock_init(p2m);
>      INIT_PAGE_LIST_HEAD(&p2m->pages);
>      INIT_PAGE_LIST_HEAD(&p2m->pod.super);
>      INIT_PAGE_LIST_HEAD(&p2m->pod.single);
>  
> +    p2m->domain = d;
>      p2m->set_entry = p2m_set_entry;
>      p2m->get_entry = p2m_gfn_to_mfn;
>      p2m->change_entry_type_global = p2m_change_type_global;
> +    if (preserve_allocfree) {
> +        p2m->alloc_page = alloc;
> +        p2m->free_page = free;
> +    }
>  
>      if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled &&
>           (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
>          ept_p2m_init(d);
>  
> +    return;
> +}

...

>  void p2m_final_teardown(struct domain *d)
>  {
> +    /* Iterate over all p2m tables per domain */

Even when the matching code arrives this comment's not really helpful.

>      xfree(d->arch.p2m);
>      d->arch.p2m = NULL;
>  }
>  
>  #if P2M_AUDIT
> -static void audit_p2m(struct domain *d)
> +static void audit_p2m(struct p2m_domain *p2m)
>  {
>      struct page_info *page;
>      struct domain *od;
> @@ -1735,6 +1761,7 @@ static void audit_p2m(struct domain *d)
>      unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
>      int test_linear;
>      p2m_type_t type;
> +    struct domain *d = p2m->domain;
>  
>      if ( !paging_mode_translate(d) )
>          return;
> @@ -1789,7 +1816,7 @@ static void audit_p2m(struct domain *d)
>              continue;
>          }
>  
> -        p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query);
> +        p2mfn = gfn_to_mfn_type_p2m(p2m, gfn, &type, p2m_query);
>          if ( mfn_x(p2mfn) != mfn )
>          {
>              mpbad++;
> @@ -1805,9 +1832,9 @@ static void audit_p2m(struct domain *d)
>              set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
>          }
>  
> -        if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) )
> +        if ( test_linear && (gfn <= p2m->max_mapped_pfn) )
>          {
> -            lp2mfn = mfn_x(gfn_to_mfn_query(d, gfn, &type));
> +            lp2mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &type));
>              if ( lp2mfn != mfn_x(p2mfn) )
>              {
>                  P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
> @@ -1822,21 +1849,19 @@ static void audit_p2m(struct domain *d)
>      spin_unlock(&d->page_alloc_lock);
>  
>      /* Audit part two: walk the domain's p2m table, checking the entries. */
> -    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 )
> +    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
>      {
> +        l3_pgentry_t *l3e;
>          l2_pgentry_t *l2e;
>          l1_pgentry_t *l1e;
> -        int i1, i2;
> +        int i1, i2, i3;

Why are you moving these definitions around?  You don't change the use
of the variables anywhere.

>  #if CONFIG_PAGING_LEVELS == 4
>          l4_pgentry_t *l4e;
> -        l3_pgentry_t *l3e;
> -        int i3, i4;
> -        l4e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
> +        int i4;
> +        l4e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
>  #else /* CONFIG_PAGING_LEVELS == 3 */
> -        l3_pgentry_t *l3e;
> -        int i3;

Ditto. 

> -        l3e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
> +        l3e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
>  #endif
>  
>          gfn = 0;

...

> diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/common/grant_table.c
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -109,7 +109,7 @@ static unsigned inline int max_nr_maptra
>  #define gfn_to_mfn_private(_d, _gfn) ({                     \
>      p2m_type_t __p2mt;                                      \
>      unsigned long __x;                                      \
> -    __x = mfn_x(gfn_to_mfn_unshare(_d, _gfn, &__p2mt, 1));  \
> +    __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt, 1));  
> \
>      if ( !p2m_is_valid(__p2mt) )                            \
>          __x = INVALID_MFN;                                  \
>      __x; })
> @@ -1059,7 +1059,7 @@ gnttab_unpopulate_status_frames(struct d
>  
>      for ( i = 0; i < nr_status_frames(gt); i++ )
>      {
> -        page_set_owner(virt_to_page(gt->status[i]), dom_xen);
> +        page_set_owner(virt_to_page(gt->status[i]), 
> p2m_get_hostp2m(dom_xen));

That seems very unlikely to be correct.  Did you get carried away?

>          free_xenheap_page(gt->status[i]);
>          gt->status[i] = NULL;
>      }
> @@ -1498,7 +1498,7 @@ gnttab_transfer(
>          if ( unlikely(e->tot_pages++ == 0) )
>              get_knownalive_domain(e);
>          page_list_add_tail(page, &e->page_list);
> -        page_set_owner(page, e);
> +        page_set_owner(page, p2m_get_hostp2m(e));

Likewise. 

>  
>          spin_unlock(&e->page_alloc_lock);
>  

...

> diff -r f96f6f2cd19a -r 3f48b73b0a30 xen/include/asm-x86/mem_sharing.h
> --- a/xen/include/asm-x86/mem_sharing.h
> +++ b/xen/include/asm-x86/mem_sharing.h
> @@ -23,22 +23,22 @@
>  #define __MEM_SHARING_H__
>  
>  #define sharing_supported(_d) \
> -    (is_hvm_domain(_d) && (_d)->arch.hvm_domain.hap_enabled) 
> +    (is_hvm_domain(_d) && paging_mode_hap(_d)) 
>  

Okay, but it really doesn't belong in this patch.

Cheers,

Tim.


-- 
Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Principal Software Engineer, XenServer Engineering
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>