[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 83/84] x86/pmap: rewrite logic for locking.



From: Hongyan Xia <hongyax@xxxxxxxxxx>

Due to the limited PMAP entries, another pCPU is allowed to use PMAP
only when the current pCPU has unmapped all mappings.

Signed-off-by: Hongyan Xia <hongyax@xxxxxxxxxx>
---
 xen/arch/x86/domain_page.c |  4 ----
 xen/arch/x86/pmap.c        | 48 ++++++++++++++++++++++++++++++++------
 xen/include/asm-x86/pmap.h |  2 --
 3 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 782dd0650c..189ca41a1d 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -171,9 +171,7 @@ out:
     return (void *)MAPCACHE_VIRT_START + pfn_to_paddr(idx);
 
 pmap:
-    pmap_lock();
     ret = pmap_map(mfn);
-    pmap_unlock();
     flush_tlb_one_local(ret);
     return ret;
 }
@@ -188,9 +186,7 @@ void unmap_domain_page(const void *ptr)
 
     if ( va >= FIXADDR_START && va < FIXADDR_TOP )
     {
-        pmap_lock();
         pmap_unmap((void *)ptr);
-        pmap_unlock();
         return;
     }
 
diff --git a/xen/arch/x86/pmap.c b/xen/arch/x86/pmap.c
index 93104d0b86..af7438cbe4 100644
--- a/xen/arch/x86/pmap.c
+++ b/xen/arch/x86/pmap.c
@@ -15,18 +15,44 @@
  * used anywhere else other than the stated purpose above.
  */
 
-static DEFINE_SPINLOCK(lock);
+static DEFINE_SPINLOCK(lock_cpu);
 /* Bitmap to track which slot is used */
 static unsigned long inuse;
 
-void pmap_lock(void)
+static unsigned int lock_cpu_id = ~0;
+static unsigned int lock_count;
+
+/*
+ * Only one pCPU is allowed to use PMAP entries. Another pCPU can use PMAP only
+ * when the current pCPU has unmapped all entries.
+ */
+static void pmap_cpu_up(void)
 {
-    spin_lock(&lock);
+    int ret = -1;
+    unsigned int cpu_id = smp_processor_id();
+
+    do
+    {
+        while ( cpu_id != lock_cpu_id && lock_count != 0 )
+            ;
+        spin_lock(&lock_cpu);
+        if ( cpu_id == lock_cpu_id || lock_count == 0 )
+        {
+            lock_cpu_id = cpu_id;
+            lock_count++;
+            ret = 0;
+        }
+        spin_unlock(&lock_cpu);
+    } while ( ret == -1 );
 }
 
-void pmap_unlock(void)
+static void pmap_cpu_down(void)
 {
-    spin_unlock(&lock);
+    spin_lock(&lock_cpu);
+    ASSERT(smp_processor_id() == lock_cpu_id);
+    ASSERT(lock_count);
+    lock_count--;
+    spin_unlock(&lock_cpu);
 }
 
 void *pmap_map(mfn_t mfn)
@@ -37,7 +63,13 @@ void *pmap_map(mfn_t mfn)
     l1_pgentry_t *pl1e;
 
     ASSERT(!in_irq());
-    ASSERT(spin_is_locked(&lock));
+
+    /*
+     * This semaphore-like locking means only one pCPU is allowed, which also
+     * suggests PMAP should only be used to bootstrap other structures. Any
+     * general purpose use of PMAP is a mistake.
+     */
+    pmap_cpu_up();
 
     idx = find_first_zero_bit(&inuse, NUM_FIX_PMAP);
     if ( idx == NUM_FIX_PMAP )
@@ -63,13 +95,15 @@ void pmap_unmap(void *p)
 
     ASSERT(!in_irq());
     ASSERT(slot >= FIX_PMAP_BEGIN && slot <= FIX_PMAP_END);
-    ASSERT(spin_is_locked(&lock));
+
 
     idx = slot - FIX_PMAP_BEGIN;
     __clear_bit(idx, &inuse);
 
     pl1e = &l1_fixmap[L1_PAGETABLE_ENTRIES - 1 - slot];
     l1e_write_atomic(pl1e, l1e_from_mfn(_mfn(0), 0));
+
+    pmap_cpu_down();
 }
 
 static void __maybe_unused build_assertions(void)
diff --git a/xen/include/asm-x86/pmap.h b/xen/include/asm-x86/pmap.h
index 34d4f2bb38..790cd71fb3 100644
--- a/xen/include/asm-x86/pmap.h
+++ b/xen/include/asm-x86/pmap.h
@@ -4,8 +4,6 @@
 /* Large enough for mapping 5 levels of page tables with some headroom */
 #define NUM_FIX_PMAP 8
 
-void pmap_lock(void);
-void pmap_unlock(void);
 void *pmap_map(mfn_t mfn);
 void pmap_unmap(void *p);
 
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.