[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] add page_get_owner_and_reference() related ASSERT()s



At 09:50 +0100 on 13 Aug (1439459451), Ian Campbell wrote:
> On Fri, 2015-07-24 at 06:37 -0600, Jan Beulich wrote:
> > The function shouldn't return NULL after having obtained a reference,
> > or else the caller won't know to drop it.
> 
> > Also its result shouldn't be ignored - if calling code is certain that
> > a page already has a non-zero refcount, it better ASSERT()s so.
> 
> How is page_get_owner_and_reference sure the reference count was not zero
> on entry? (WRT the call to page_get_owner in page_get_owner_and_reference
> itself) Or have I misunderstood the assertion being made here?

That path is fine -- if the count was zero, it will return NULL before
trying to get the owner.  But I'm not convinced that the assertion is
correct -- are there really no pages anywhere that have a recount but
no owner?  What about, e.g. shadow pool pages or other uses of
MEMF_no_owner?  If a guest can cause Xen to try to get_page() one of
those, then we'd hit the new assertion.

How about unlikely(!owner) path that drops the taken ref instead?

It'd be great to add a comment explaining the semantics of the call,
since the uninformed reader might expect it to take a ref in all
cases.

Cheers,

Tim.

> Likewise where does the preexisting reference in the grant table case come
> from? Perhaps a comment alongside the ASSERT would make it clearer what the
> assert was doing to future readers ("/* Reference count must have already
> been >=1 due to XXX, so this cannot have failed */") or some such.
> 
> > Finally this as well as get_page() and put_page() are required to be
> > available on all architectures - move the declarations to xen/mm.h.
> > 
> > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> > 
> > --- a/xen/arch/arm/guestcopy.c
> > +++ b/xen/arch/arm/guestcopy.c
> > @@ -1,10 +1,8 @@
> > -#include <xen/config.h>
> >  #include <xen/lib.h>
> >  #include <xen/domain_page.h>
> > +#include <xen/mm.h>
> >  #include <xen/sched.h>
> >  #include <asm/current.h>
> > -
> > -#include <asm/mm.h>
> >  #include <asm/guest_access.h>
> >  
> >  static unsigned long raw_copy_to_guest_helper(void *to, const void 
> > *from,
> > --- a/xen/arch/arm/mm.c
> > +++ b/xen/arch/arm/mm.c
> > @@ -1170,6 +1170,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
> >  struct domain *page_get_owner_and_reference(struct page_info *page)
> >  {
> >      unsigned long x, y = page->count_info;
> > +    struct domain *owner;
> >  
> >      do {
> >          x = y;
> > @@ -1182,7 +1183,10 @@ struct domain *page_get_owner_and_refere
> >      }
> >      while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
> >  
> > -    return page_get_owner(page);
> > +    owner = page_get_owner(page);
> > +    ASSERT(owner);
> > +
> > +    return owner;
> >  }
> >  
> >  void put_page(struct page_info *page)
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -2039,6 +2039,7 @@ void put_page(struct page_info *page)
> >  struct domain *page_get_owner_and_reference(struct page_info *page)
> >  {
> >      unsigned long x, y = page->count_info;
> > +    struct domain *owner;
> >  
> >      do {
> >          x = y;
> > @@ -2052,7 +2053,10 @@ struct domain *page_get_owner_and_refere
> >      }
> >      while ( (y = cmpxchg(&page->count_info, x, x + 1)) != x );
> >  
> > -    return page_get_owner(page);
> > +    owner = page_get_owner(page);
> > +    ASSERT(owner);
> > +
> > +    return owner;
> >  }
> >  
> >  
> > --- a/xen/common/grant_table.c
> > +++ b/xen/common/grant_table.c
> > @@ -2245,7 +2245,8 @@ __acquire_grant_for_copy(
> >      {
> >          ASSERT(mfn_valid(act->frame));
> >          *page = mfn_to_page(act->frame);
> > -        (void)page_get_owner_and_reference(*page);
> > +        td = page_get_owner_and_reference(*page);
> > +        ASSERT(td);
> >      }
> >  
> >      act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
> > --- a/xen/include/asm-arm/mm.h
> > +++ b/xen/include/asm-arm/mm.h
> > @@ -275,10 +275,6 @@ static inline void *page_to_virt(const s
> >      return mfn_to_virt(page_to_mfn(pg));
> >  }
> >  
> > -struct domain *page_get_owner_and_reference(struct page_info *page);
> > -void put_page(struct page_info *page);
> > -int  get_page(struct page_info *page, struct domain *domain);
> > -
> >  struct page_info *get_page_from_gva(struct domain *d, vaddr_t va,
> >                                      unsigned long flags);
> >  
> > --- a/xen/include/asm-x86/mm.h
> > +++ b/xen/include/asm-x86/mm.h
> > @@ -352,9 +352,6 @@ const unsigned long *get_platform_badpag
> >  int page_lock(struct page_info *page);
> >  void page_unlock(struct page_info *page);
> >  
> > -struct domain *page_get_owner_and_reference(struct page_info *page);
> > -void put_page(struct page_info *page);
> > -int  get_page(struct page_info *page, struct domain *domain);
> >  void put_page_type(struct page_info *page);
> >  int  get_page_type(struct page_info *page, unsigned long type);
> >  int  put_page_type_preemptible(struct page_info *page);
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -45,6 +45,7 @@
> >  #ifndef __XEN_MM_H__
> >  #define __XEN_MM_H__
> >  
> > +#include <xen/compiler.h>
> >  #include <xen/types.h>
> >  #include <xen/list.h>
> >  #include <xen/spinlock.h>
> > @@ -77,9 +78,12 @@ TYPE_SAFE(unsigned long, pfn);
> >  #undef pfn_t
> >  #endif
> >  
> > -struct domain;
> >  struct page_info;
> >  
> > +struct domain *__must_check page_get_owner_and_reference(struct 
> > page_info *);
> > +void put_page(struct page_info *);
> > +int get_page(struct page_info *, struct domain *);
> > +
> >  /* Boot-time allocator. Turns into generic allocator after bootstrap. */
> >  void init_boot_pages(paddr_t ps, paddr_t pe);
> >  unsigned long alloc_boot_pages(
> > 
> > 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.