WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 07 of 17] x86/mm: Fix memory-sharing code's locking d

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 07 of 17] x86/mm: Fix memory-sharing code's locking discipline
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Thu, 2 Jun 2011 13:20:17 +0100
Delivery-date: Thu, 02 Jun 2011 05:31:57 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1307017210@xxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1307017210@xxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.8.3
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID 51518c502d1b23a1cc4b325f81d17027a9d31a74
# Parent  39603c2f0dab39bf399910e180aa60deca7db680
x86/mm: Fix memory-sharing code's locking discipline.

memshr_audit is sometimes called with the shr_lock held.  Make it so for
every call.

Move the unsharing loop in p2m_teardown out of the p2m_lock to avoid
deadlocks.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r 39603c2f0dab -r 51518c502d1b xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
@@ -225,7 +225,7 @@ static void mem_sharing_audit(void)
     int bucket;
     struct page_info *pg;
 
-    shr_lock();
+    ASSERT(shr_locked_by_me());
 
     for(bucket=0; bucket < SHR_HASH_LENGTH; bucket++)
     {
@@ -285,8 +285,6 @@ static void mem_sharing_audit(void)
             e = e->next;
         }
     }
-
-    shr_unlock();
 }
 #endif
 
@@ -632,10 +630,10 @@ int mem_sharing_unshare_page(struct doma
     shr_handle_t handle;
     struct list_head *le;
 
+    shr_lock();
     mem_sharing_audit();
+    
     /* Remove the gfn_info from the list */
-    shr_lock();
-    
     mfn = gfn_to_mfn(d, gfn, &p2mt);
     
     /* Has someone already unshared it? */
@@ -739,7 +737,6 @@ int mem_sharing_domctl(struct domain *d,
         case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
         {
             d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
-            mem_sharing_audit();
             rc = 0;
         }
         break;
@@ -752,7 +749,6 @@ int mem_sharing_domctl(struct domain *d,
                 return -EINVAL;
             rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
             mec->u.nominate.handle = handle;
-            mem_sharing_audit();
         }
         break;
 
@@ -768,7 +764,6 @@ int mem_sharing_domctl(struct domain *d,
                 return -EINVAL;
             rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
             mec->u.nominate.handle = handle;
-            mem_sharing_audit();
         }
         break;
 
@@ -777,7 +772,6 @@ int mem_sharing_domctl(struct domain *d,
             shr_handle_t sh = mec->u.share.source_handle;
             shr_handle_t ch = mec->u.share.client_handle;
             rc = mem_sharing_share_pages(sh, ch); 
-            mem_sharing_audit();
         }
         break;
 
@@ -785,7 +779,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             if(!mem_sharing_enabled(d))
                 return -EINVAL;
-            mem_sharing_audit();
             rc = mem_sharing_sharing_resume(d);
         }
         break;
@@ -794,7 +787,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             unsigned long gfn = mec->u.debug.u.gfn;
             rc = mem_sharing_debug_gfn(d, gfn);
-            mem_sharing_audit();
         }
         break;
 
@@ -802,7 +794,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             unsigned long mfn = mec->u.debug.u.mfn;
             rc = mem_sharing_debug_mfn(mfn);
-            mem_sharing_audit();
         }
         break;
 
@@ -810,7 +801,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             grant_ref_t gref = mec->u.debug.u.gref;
             rc = mem_sharing_debug_gref(d, gref);
-            mem_sharing_audit();
         }
         break;
 
@@ -819,6 +809,10 @@ int mem_sharing_domctl(struct domain *d,
             break;
     }
 
+    shr_lock();
+    mem_sharing_audit();
+    shr_unlock();
+
     return rc;
 }
 
diff -r 39603c2f0dab -r 51518c502d1b xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu Jun 02 13:16:52 2011 +0100
@@ -296,12 +296,10 @@ void p2m_teardown(struct p2m_domain *p2m
     if (p2m == NULL)
         return;
 
-    p2m_lock(p2m);
-
 #ifdef __x86_64__
     for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
     {
-        mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
+        mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query);
         if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
         {
             ASSERT(!p2m_is_nestedp2m(p2m));
@@ -311,6 +309,8 @@ void p2m_teardown(struct p2m_domain *p2m
     }
 #endif
 
+    p2m_lock(p2m);
+
     p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>