[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3] tools/libs: Use superpages where possible on migrate/resume



Try to allocate larger order pages.
With some test memory program stressing TLB (many small random
memory accesses) you can get 15% performance improves.
On the first memory iteration the sender is currently sending
memory in 4mb aligned chunks which allows the receiver to
allocate most pages as 2mb superpages instead of single 4kb pages.
This works even for HVM where the first 2mb contains some holes.
This change does not handle 1gb superpages as this will require
change in the protocol to preallocate space.

Signed-off-by: Frediano Ziglio <frediano.ziglio@xxxxxxxxx>
Release-Acked-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
Changes since v1:
- updated commit message and subject;
- change the implementation detecting possible 2mb pages inside
  the packet sent allowing more 2mb superpages.

Changes since v2:
- change implementation simplifying detecting and allocations
  of 2mb pages.
---
 tools/libs/guest/xg_sr_restore.c | 45 +++++++++++++++++++++++++++++---
 1 file changed, 42 insertions(+), 3 deletions(-)

diff --git a/tools/libs/guest/xg_sr_restore.c b/tools/libs/guest/xg_sr_restore.c
index 06231ca826..ea5a137612 100644
--- a/tools/libs/guest/xg_sr_restore.c
+++ b/tools/libs/guest/xg_sr_restore.c
@@ -129,6 +129,30 @@ static int pfn_set_populated(struct xc_sr_context *ctx, 
xen_pfn_t pfn)
     return 0;
 }
 
+#if defined(__i386__) || defined(__x86_64__)
+/* Order of the smallest superpage */
+#define SMALL_SUPERPAGE_ORDER 9
+#else
+#error Define SMALL_SUPERPAGE_ORDER for this platform
+#endif
+
+static bool populate_small_superpage(struct xc_sr_context *ctx, xen_pfn_t pfn)
+{
+    xen_pfn_t mfn = pfn;
+
+    if ( xc_domain_populate_physmap_exact(
+         ctx->xch, ctx->domid, 1, SMALL_SUPERPAGE_ORDER, 0, &mfn) )
+        return false;
+
+    if ( mfn == INVALID_MFN )
+        return false;
+
+    for ( size_t i = 0; i < (1 << SMALL_SUPERPAGE_ORDER); ++i )
+        ctx->restore.ops.set_gfn(ctx, pfn + i, mfn + i);
+
+    return true;
+}
+
 /*
  * Given a set of pfns, obtain memory from Xen to fill the physmap for the
  * unpopulated subset.  If types is NULL, no page type checking is performed
@@ -142,6 +166,9 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned int 
count,
         *pfns = malloc(count * sizeof(*pfns));
     unsigned int i, nr_pfns = 0;
     int rc = -1;
+    xen_pfn_t prev = 0;
+    unsigned num_contiguous = 0;
+    xen_pfn_t mask = ~((~(xen_pfn_t)0) << SMALL_SUPERPAGE_ORDER);
 
     if ( !mfns || !pfns )
     {
@@ -152,14 +179,26 @@ int populate_pfns(struct xc_sr_context *ctx, unsigned int 
count,
 
     for ( i = 0; i < count; ++i )
     {
+        xen_pfn_t pfn = original_pfns[i];
+
         if ( (!types || page_type_to_populate(types[i])) &&
-             !pfn_is_populated(ctx, original_pfns[i]) )
+             !pfn_is_populated(ctx, pfn) )
         {
-            rc = pfn_set_populated(ctx, original_pfns[i]);
+            rc = pfn_set_populated(ctx, pfn);
             if ( rc )
                 goto err;
-            pfns[nr_pfns] = mfns[nr_pfns] = original_pfns[i];
+            pfns[nr_pfns] = mfns[nr_pfns] = pfn;
             ++nr_pfns;
+            if ( pfn != prev + 1 )
+                num_contiguous = 0;
+            num_contiguous++;
+            prev = pfn;
+            if ( num_contiguous > mask && (pfn & mask) == mask &&
+                 populate_small_superpage(ctx, pfn - mask) )
+            {
+                nr_pfns -= mask + 1;
+                num_contiguous = 0;
+            }
         }
     }
 
-- 
2.43.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.