[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/mm: Add debug code to detect illegal page_lock and put_page_type ordering



The fix for XSA-242 depends on the same cpu never calling
_put_page_type() while holding a page_lock() for that page.  By having
no locking discipline between pages, the current code also assumes
that we will never lock two pages on the same cpu.

Add a check to debug builds to verify that both of these are true.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/mm.c | 31 +++++++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5a1b472432..762e21fb40 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1813,10 +1813,25 @@ static int free_l4_table(struct page_info *page)
     return rc;
 }
 
+#ifndef NDEBUG
+/*
+ * Check to make sure that we never nest page_lock() calls on a single
+ * cpu (which may deadlock if two cpus attempt to lock the same pages
+ * in a different order), and that we never call _put_page_type() on a
+ * page while we hold its page_lock() (which would deadlock after
+ * XSA-242).
+ */
+static DEFINE_PER_CPU(struct page_info *, current_page_lock);
+#endif
+
 int page_lock(struct page_info *page)
 {
     unsigned long x, nx;
 
+#ifndef NDEBUG
+    ASSERT(this_cpu(current_page_lock) == NULL);
+#endif
+
     do {
         while ( (x = page->u.inuse.type_info) & PGT_locked )
             cpu_relax();
@@ -1827,6 +1842,10 @@ int page_lock(struct page_info *page)
             return 0;
     } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x );
 
+#ifndef NDEBUG
+    this_cpu(current_page_lock) = page;
+#endif
+
     return 1;
 }
 
@@ -1834,6 +1853,10 @@ void page_unlock(struct page_info *page)
 {
     unsigned long x, nx, y = page->u.inuse.type_info;
 
+#ifndef NDEBUG
+    ASSERT(this_cpu(current_page_lock) == page);
+#endif
+
     do {
         x = y;
         ASSERT((x & PGT_count_mask) && (x & PGT_locked));
@@ -1842,6 +1865,10 @@ void page_unlock(struct page_info *page)
         /* We must not drop the last reference here. */
         ASSERT(nx & PGT_count_mask);
     } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
+
+#ifndef NDEBUG
+    this_cpu(current_page_lock) = NULL;
+#endif
 }
 
 /*
@@ -2420,6 +2447,10 @@ static int _put_page_type(struct page_info *page, bool 
preemptible,
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
 
+#ifndef NDEBUG
+    ASSERT(this_cpu(current_page_lock) != page);
+#endif
+
     for ( ; ; )
     {
         x  = y;
-- 
2.15.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.