[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCHv1 3/3] x86/ept: defer the invalidation until the p2m lock is released



Holding the p2m lock while calling ept_sync_domain() is very expensive
since it does a on_selected_cpus() call.  IPIs on many socket machines
can be very slows and on_selected_cpus() is serialized.

Defer the invalidate until the p2m lock is released.  Since the processor
may cache partial translations, we also need to make sure any page table
pages to be freed are not freed until the invalidate is complete.  Such
pages are temporarily stored an per-PCPU list.

Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
 xen/arch/x86/mm/mm-locks.h | 23 +++++++++++++++--------
 xen/arch/x86/mm/p2m-ept.c  | 17 ++++++++++++++++-
 xen/arch/x86/mm/p2m.c      | 37 +++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/p2m.h  |  6 ++++++
 4 files changed, 72 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 76c7217..473aaab 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -263,14 +263,21 @@ declare_mm_lock(altp2mlist)
  */
 
 declare_mm_rwlock(altp2m);
-#define p2m_lock(p)                         \
-{                                           \
-    if ( p2m_is_altp2m(p) )                 \
-        mm_write_lock(altp2m, &(p)->lock);  \
-    else                                    \
-        mm_write_lock(p2m, &(p)->lock);     \
-}
-#define p2m_unlock(p)         mm_write_unlock(&(p)->lock);
+#define p2m_lock(p)                             \
+    do {                                        \
+        if ( p2m_is_altp2m(p) )                 \
+            mm_write_lock(altp2m, &(p)->lock);  \
+        else                                    \
+            mm_write_lock(p2m, &(p)->lock);     \
+        (p)->defer_flush++;                     \
+    } while (0)
+#define p2m_unlock(p)                                                   \
+    do {                                                                \
+        bool_t need_flush = --(p)->defer_flush == 0 && (p)->need_flush; \
+        mm_write_unlock(&(p)->lock);                                    \
+        if (need_flush && (p)->flush)                                   \
+            (p)->flush(p);                                              \
+    } while (0)
 #define gfn_lock(p,g,o)       p2m_lock(p)
 #define gfn_unlock(p,g,o)     p2m_unlock(p)
 #define p2m_read_lock(p)      mm_read_lock(p2m, &(p)->lock)
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index a41d7d2..a573c14 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -263,7 +263,7 @@ static void ept_free_entry(struct p2m_domain *p2m, 
ept_entry_t *ept_entry, int l
         unmap_domain_page(epte);
     }
     
-    p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
+    p2m_free_ptp_defer(p2m, mfn_to_page(ept_entry->mfn));
 }
 
 static bool_t ept_split_super_page(struct p2m_domain *p2m,
@@ -1103,6 +1103,14 @@ void ept_sync_domain(struct p2m_domain *p2m)
     if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
         return;
 
+    if ( p2m->defer_flush )
+    {
+        p2m->need_flush = 1;
+        return;
+    }
+    else
+        p2m->need_flush = 0;
+
     ASSERT(local_irq_is_enabled());
 
     if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
@@ -1121,6 +1129,12 @@ void ept_sync_domain(struct p2m_domain *p2m)
                      __ept_sync_domain, p2m, 1);
 }
 
+static void ept_flush(struct p2m_domain *p2m)
+{
+    ept_sync_domain(p2m);
+    p2m_free_deferred_ptp(p2m);
+}
+
 static void ept_enable_pml(struct p2m_domain *p2m)
 {
     /* Domain must have been paused */
@@ -1169,6 +1183,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
     p2m->change_entry_type_range = ept_change_entry_type_range;
     p2m->memory_type_changed = ept_memory_type_changed;
     p2m->audit_p2m = NULL;
+    p2m->flush = ept_flush;
 
     /* Set the memory type used when accessing EPT paging structures. */
     ept->ept_mt = EPT_DEFAULT_MT;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e13672d..2ad1de4 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -504,6 +504,26 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct page_info 
*pg)
     return;
 }
 
+DEFINE_PER_CPU(struct page_list_head, p2m_deferred_free_pages);
+
+void p2m_free_ptp_defer(struct p2m_domain *p2m, struct page_info *pg)
+{
+    page_list_del(pg, &p2m->pages);
+    page_list_add(pg, &this_cpu(p2m_deferred_free_pages));
+}
+
+void p2m_free_deferred_ptp(struct p2m_domain *p2m)
+{
+    struct page_list_head *list = &this_cpu(p2m_deferred_free_pages);
+    struct page_info *pg, *tmp;
+
+    page_list_for_each_safe(pg, tmp, list)
+    {
+        page_list_del(pg, list);
+        p2m->domain->arch.paging.free_page(p2m->domain, pg);
+    }
+}
+
 /*
  * Allocate a new p2m table for a domain.
  *
@@ -2827,20 +2847,33 @@ int p2m_add_foreign(struct domain *tdom, unsigned long 
fgfn,
                  "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
                  gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
 
-    put_page(page);
-
     /*
      * This put_gfn for the above get_gfn for prev_mfn.  We must do this
      * after set_foreign_p2m_entry so another cpu doesn't populate the gpfn
      * before us.
      */
     put_gfn(tdom, gpfn);
+    if ( prev_page )
+        put_page(prev_page);
+    put_page(page);
 
 out:
     if ( fdom )
         rcu_unlock_domain(fdom);
     return rc;
 }
+
+int p2m_setup(void)
+{
+    unsigned int cpu;
+
+    for_each_present_cpu(cpu)
+        INIT_PAGE_LIST_HEAD(&per_cpu(p2m_deferred_free_pages, cpu));
+
+    return 0;
+}
+__initcall(p2m_setup);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d748557..f572a1b 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -254,6 +254,10 @@ struct p2m_domain {
                                           unsigned long gfn, l1_pgentry_t *p,
                                           l1_pgentry_t new, unsigned int 
level);
     long               (*audit_p2m)(struct p2m_domain *p2m);
+    void               (*flush)(struct p2m_domain *p2m);
+
+    unsigned int defer_flush;
+    bool_t need_flush;
 
     /* Default P2M access type for each page in the the domain: new pages,
      * swapped in pages, cleared pages, and pages that are ambiguously
@@ -681,6 +685,8 @@ static inline bool_t p2m_mem_access_sanity_check(struct 
domain *d)
 
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
+void p2m_free_ptp_defer(struct p2m_domain *p2m, struct page_info *pg);
+void p2m_free_deferred_ptp(struct p2m_domain *p2m);
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.