WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 5 of 5] Nested p2m: rework locking around nested-p2m

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 5 of 5] Nested p2m: rework locking around nested-p2m flushes and updates
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Mon, 27 Jun 2011 11:46:15 +0100
Cc: Christoph Egger <Christoph.Egger@xxxxxxx>
Delivery-date: Mon, 27 Jun 2011 03:55:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1309171570@xxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1309171570@xxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.8.3
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1308929085 -3600
# Node ID c82cebcfec2546260e4c3b75bb6a47cfdf8bc162
# Parent  0753351afbbe1c3fdde3a72dfb5a67105524f813
Nested p2m: rework locking around nested-p2m flushes and updates.

The nestedp2m_lock now only covers the mapping from nested-cr3 to
nested-p2m; the tables themselves may be updated or flushed using only
the relevant p2m lock.

This means that the nested-p2m lock is only taken on one path, and
always before any p2m locks.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r 0753351afbbe -r c82cebcfec25 xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Fri Jun 24 16:24:44 2011 +0100
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Fri Jun 24 16:24:45 2011 +0100
@@ -96,17 +96,23 @@ nestedp2m_write_p2m_entry(struct p2m_dom
 /*          NESTED VIRT FUNCTIONS           */
 /********************************************/
 static void
-nestedhap_fix_p2m(struct p2m_domain *p2m, paddr_t L2_gpa, paddr_t L0_gpa,
-    p2m_type_t p2mt, p2m_access_t p2ma)
+nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, 
+                  paddr_t L2_gpa, paddr_t L0_gpa,
+                  p2m_type_t p2mt, p2m_access_t p2ma)
 {
-    int rv;
+    int rv = 1;
     ASSERT(p2m);
     ASSERT(p2m->set_entry);
 
     p2m_lock(p2m);
-    rv = set_p2m_entry(p2m, L2_gpa >> PAGE_SHIFT,
-                         page_to_mfn(maddr_to_page(L0_gpa)),
-                         0 /*4K*/, p2mt, p2ma);
+
+    /* If this p2m table has been flushed or recycled under our feet, 
+     * leave it alone.  We'll pick up the right one as we try to 
+     * vmenter the guest. */
+    if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) )
+         rv = set_p2m_entry(p2m, L2_gpa >> PAGE_SHIFT,
+                            page_to_mfn(maddr_to_page(L0_gpa)),
+                            0 /*4K*/, p2mt, p2ma);
     p2m_unlock(p2m);
 
     if (rv == 0) {
@@ -211,12 +217,10 @@ nestedhvm_hap_nested_page_fault(struct v
         break;
     }
 
-    nestedp2m_lock(d);
     /* fix p2m_get_pagetable(nested_p2m) */
-    nestedhap_fix_p2m(nested_p2m, L2_gpa, L0_gpa,
+    nestedhap_fix_p2m(v, nested_p2m, L2_gpa, L0_gpa,
         p2m_ram_rw,
         p2m_access_rwx /* FIXME: Should use same permission as l1 guest */);
-    nestedp2m_unlock(d);
 
     return NESTEDHVM_PAGEFAULT_DONE;
 }
diff -r 0753351afbbe -r c82cebcfec25 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h        Fri Jun 24 16:24:44 2011 +0100
+++ b/xen/arch/x86/mm/mm-locks.h        Fri Jun 24 16:24:45 2011 +0100
@@ -96,8 +96,11 @@ declare_mm_lock(shr)
 
 /* Nested P2M lock (per-domain)
  *
- * A per-domain lock that protects some of the nested p2m datastructures.
- * TODO: find out exactly what needs to be covered by this lock */
+ * A per-domain lock that protects the mapping from nested-CR3 to 
+ * nested-p2m.  In particular it covers:
+ * - the array of nested-p2m tables, and all LRU activity therein; and
+ * - setting the "cr3" field of any p2m table to a non-CR3_EADDR value. 
+ *   (i.e. assigning a p2m table to be the shadow of that cr3 */
 
 declare_mm_lock(nestedp2m)
 #define nestedp2m_lock(d)   mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
diff -r 0753351afbbe -r c82cebcfec25 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Jun 24 16:24:44 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri Jun 24 16:24:45 2011 +0100
@@ -1052,7 +1052,7 @@ p2m_getlru_nestedp2m(struct domain *d, s
 
 /* Reset this p2m table to be empty */
 static void
-p2m_flush_locked(struct p2m_domain *p2m)
+p2m_flush_table(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
@@ -1094,21 +1094,16 @@ p2m_flush(struct vcpu *v, struct p2m_dom
 
     ASSERT(v->domain == d);
     vcpu_nestedhvm(v).nv_p2m = NULL;
-    nestedp2m_lock(d);
-    p2m_flush_locked(p2m);
+    p2m_flush_table(p2m);
     hvm_asid_flush_vcpu(v);
-    nestedp2m_unlock(d);
 }
 
 void
 p2m_flush_nestedp2m(struct domain *d)
 {
     int i;
-
-    nestedp2m_lock(d);
     for ( i = 0; i < MAX_NESTEDP2M; i++ )
-        p2m_flush_locked(d->arch.nested_p2m[i]);
-    nestedp2m_unlock(d);
+        p2m_flush_table(d->arch.nested_p2m[i]);
 }
 
 struct p2m_domain *
@@ -1131,29 +1126,37 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
     d = v->domain;
     nestedp2m_lock(d);
     p2m = nv->nv_p2m;
-    if ( p2m && (p2m->cr3 == cr3 || p2m->cr3 == CR3_EADDR) )
+    if ( p2m ) 
     {
-        nv->nv_flushp2m = 0;
-        p2m_getlru_nestedp2m(d, p2m);
-        nv->nv_p2m = p2m;
-        if (p2m->cr3 == CR3_EADDR)
-            hvm_asid_flush_vcpu(v);
-        p2m->cr3 = cr3;
-        cpu_set(v->processor, p2m->p2m_dirty_cpumask);
-        nestedp2m_unlock(d);
-        return p2m;
+        p2m_lock(p2m);
+        if ( p2m->cr3 == cr3 || p2m->cr3 == CR3_EADDR )
+        {
+            nv->nv_flushp2m = 0;
+            p2m_getlru_nestedp2m(d, p2m);
+            nv->nv_p2m = p2m;
+            if (p2m->cr3 == CR3_EADDR)
+                hvm_asid_flush_vcpu(v);
+            p2m->cr3 = cr3;
+            cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+            p2m_unlock(p2m);
+            nestedp2m_unlock(d);
+            return p2m;
+        }
+        p2m_unlock(p2m);
     }
 
     /* All p2m's are or were in use. Take the least recent used one,
      * flush it and reuse. */
     p2m = p2m_getlru_nestedp2m(d, NULL);
-    p2m_flush_locked(p2m);
+    p2m_flush_table(p2m);
+    p2m_lock(p2m);
     nv->nv_p2m = p2m;
     p2m->cr3 = cr3;
     nv->nv_flushp2m = 0;
     hvm_asid_flush_vcpu(v);
     nestedhvm_vmcx_flushtlb(nv->nv_p2m);
     cpu_set(v->processor, p2m->p2m_dirty_cpumask);
+    p2m_unlock(p2m);
     nestedp2m_unlock(d);
 
     return p2m;
diff -r 0753351afbbe -r c82cebcfec25 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Jun 24 16:24:44 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Fri Jun 24 16:24:45 2011 +0100
@@ -201,8 +201,13 @@ struct p2m_domain {
     cpumask_t          p2m_dirty_cpumask;
 
     struct domain     *domain;   /* back pointer to domain */
+
+    /* Nested p2ms only: nested-CR3 value that this p2m shadows. 
+     * This can be cleared to CR3_EADDR under the per-p2m lock but
+     * needs both the per-p2m lock and the per-domain nestedp2m lock
+     * to set it to any other value. */
 #define CR3_EADDR     (~0ULL)
-    uint64_t           cr3;      /* to identify this p2m for re-use */
+    uint64_t           cr3;
 
     /* Pages used to construct the p2m */
     struct page_list_head pages;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>