WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH 2 of 3] Make p2m lookups fully synchronized wrt m

To: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Subject: Re: [Xen-devel] [PATCH 2 of 3] Make p2m lookups fully synchronized wrt modifications
From: Tim Deegan <tim@xxxxxxx>
Date: Thu, 10 Nov 2011 12:53:42 +0000
Cc: olaf@xxxxxxxxx, xen-devel@xxxxxxxxxxxxxxxxxxx, George.Dunlap@xxxxxxxxxxxxx, andres@xxxxxxxxxxxxxx, keir.xen@xxxxxxxxx, adin@xxxxxxxxxxxxxx
Delivery-date: Thu, 10 Nov 2011 04:54:14 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <6203a0549d8a1a21753b.1320788545@xxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1320788543@xxxxxxxxxxxxxxxxxxx> <6203a0549d8a1a21753b.1320788545@xxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.4.2.1i
At 16:42 -0500 on 08 Nov (1320770545), Andres Lagar-Cavilla wrote:
>  xen/arch/x86/mm/mm-locks.h |  13 +++++++------
>  xen/arch/x86/mm/p2m.c      |  18 +++++++++++++++++-
>  xen/include/asm-x86/p2m.h  |  39 ++++++++++++++++++++++++---------------
>  3 files changed, 48 insertions(+), 22 deletions(-)
> 
> 
> We achieve this by locking/unlocking the global p2m_lock in get/put_gfn.
> This brings about a few consequences for the p2m_lock:
> 
>  - not ordered anymore in mm-locks.h: unshare does get_gfn -> shr_lock,
>    there are code paths that do paging_lock -> get_gfn. All of these
>    would cause mm-locks.h to panic.

In that case there's a potential deadlock in the sharing code, and
turning off the safety catches is not an acceptable response to that. :)

ISTR you had a plan to get rid of the shr_lock entirely, or am
I misremembering?

Tim.

>  - the lock is always taken recursively, as there are many paths that
>    do get_gfn -> p2m_lock
> 
> Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
> 
> diff -r a0c55cc5d696 -r 6203a0549d8a xen/arch/x86/mm/mm-locks.h
> --- a/xen/arch/x86/mm/mm-locks.h
> +++ b/xen/arch/x86/mm/mm-locks.h
> @@ -165,14 +165,15 @@ declare_mm_lock(nestedp2m)
>  
>  /* P2M lock (per-p2m-table)
>   * 
> - * This protects all updates to the p2m table.  Updates are expected to
> - * be safe against concurrent reads, which do *not* require the lock. */
> + * This protects all queries and updates to the p2m table. Because queries
> + * can happen interleaved with other locks in here, we do not enforce
> + * ordering, and make all locking recursive. */
>  
> -declare_mm_lock(p2m)
> -#define p2m_lock(p)           mm_lock(p2m, &(p)->lock)
> -#define p2m_lock_recursive(p) mm_lock_recursive(p2m, &(p)->lock)
> -#define p2m_unlock(p)         mm_unlock(&(p)->lock)
> +#define p2m_lock(p)           spin_lock_recursive(&(p)->lock.lock)
> +#define p2m_lock_recursive(p) spin_lock_recursive(&(p)->lock.lock)
> +#define p2m_unlock(p)         spin_unlock_recursive(&(p)->lock.lock)
>  #define p2m_locked_by_me(p)   mm_locked_by_me(&(p)->lock)
> +#define gfn_locked_by_me(p,g) mm_locked_by_me(&(p)->lock)
>  
>  /* Page alloc lock (per-domain)
>   *
> diff -r a0c55cc5d696 -r 6203a0549d8a xen/arch/x86/mm/p2m.c
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -158,6 +158,9 @@ mfn_t gfn_to_mfn_type_p2m(struct p2m_dom
>          return _mfn(gfn);
>      }
>  
> +    /* Grab the lock here, don't release until put_gfn */
> +    p2m_lock(p2m);
> +
>      mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
>  
>  #ifdef __x86_64__
> @@ -182,6 +185,19 @@ mfn_t gfn_to_mfn_type_p2m(struct p2m_dom
>      return mfn;
>  }
>  
> +void put_gfn(struct domain *d, unsigned long gfn)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +
> +    if ( !p2m || !paging_mode_translate(d) )
> +        /* Nothing to do in this case */
> +        return;
> +
> +    ASSERT(p2m_locked_by_me(p2m));
> +
> +    p2m_unlock(p2m);
> +}
> +
>  int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
>                    unsigned int page_order, p2m_type_t p2mt, p2m_access_t 
> p2ma)
>  {
> @@ -190,7 +206,7 @@ int set_p2m_entry(struct p2m_domain *p2m
>      unsigned int order;
>      int rc = 1;
>  
> -    ASSERT(p2m_locked_by_me(p2m));
> +    ASSERT(gfn_locked_by_me(p2m, gfn));
>  
>      while ( todo )
>      {
> diff -r a0c55cc5d696 -r 6203a0549d8a xen/include/asm-x86/p2m.h
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -305,9 +305,10 @@ struct p2m_domain *p2m_get_p2m(struct vc
>  
>  #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
>  
> -/**** p2m query accessors. After calling any of the variants below, you
> - * need to call put_gfn(domain, gfn). If you don't, you'll lock the
> - * hypervisor. ****/
> +/**** p2m query accessors. They lock p2m_lock, and thus serialize
> + * lookups wrt modifications. They _do not_ release the lock on exit.
> + * After calling any of the variants below, caller needs to use
> + * put_gfn. ****/
>  
>  /* Read a particular P2M table, mapping pages as we go.  Most callers
>   * should _not_ call this directly; use the other get_gfn* functions
> @@ -349,19 +350,27 @@ static inline unsigned long get_gfn_unty
>      return INVALID_MFN;
>  }
>  
> -/* This is a noop for now. */
> -static inline void put_gfn(struct domain *d, unsigned long gfn)
> -{
> -}
> +/* Will release the p2m_lock and put the page behind this mapping. */
> +void put_gfn(struct domain *d, unsigned long gfn);
>  
> -/* These are identical for now. The intent is to have the caller not worry 
> - * about put_gfn. To only be used in printk's, crash situations, or to 
> - * peek at a type. You're not holding the p2m entry exclsively after calling
> - * this. */
> -#define get_gfn_unlocked(d, g, t)         get_gfn_type((d), (g), (t), 
> p2m_alloc)
> -#define get_gfn_query_unlocked(d, g, t)   get_gfn_type((d), (g), (t), 
> p2m_query)
> -#define get_gfn_guest_unlocked(d, g, t)   get_gfn_type((d), (g), (t), 
> p2m_guest)
> -#define get_gfn_unshare_unlocked(d, g, t) get_gfn_type((d), (g), (t), 
> p2m_unshare)
> +/* The intent is to have the caller not worry about put_gfn. They apply to 
> + * very specific situations: debug printk's, dumps during a domain crash,
> + * or to peek at a p2m entry/type. Caller is not holding the p2m entry 
> + * exclusively after calling this. */
> +#define build_unlocked_accessor(name)                                   \
> +    static inline mfn_t get_gfn ## name ## _unlocked(struct domain *d,  \
> +                                                unsigned long gfn,      \
> +                                                p2m_type_t *t)          \
> +    {                                                                   \
> +        mfn_t mfn = get_gfn ##name (d, gfn, t);                         \
> +        put_gfn(d, gfn);                                                \
> +        return mfn;                                                     \
> +    }
> +
> +build_unlocked_accessor()
> +build_unlocked_accessor(_query)
> +build_unlocked_accessor(_guest)
> +build_unlocked_accessor(_unshare)
>  
>  /* General conversion function from mfn to gfn */
>  static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel