[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/7] move global variables in suspend into a global context



move global variables in suspend into a global context
---
 tools/libxc/xc_domain_save.c |  220 +++++++++++++++++++++---------------------
 1 files changed, 108 insertions(+), 112 deletions(-)

diff --git a/tools/libxc/xc_domain_save.c b/tools/libxc/xc_domain_save.c
index eb5d48d..97bd4ad 100644
--- a/tools/libxc/xc_domain_save.c
+++ b/tools/libxc/xc_domain_save.c
@@ -30,27 +30,23 @@
 #define DEF_MAX_ITERS   29   /* limit us to 30 times round loop   */
 #define DEF_MAX_FACTOR   3   /* never send more than 3x p2m_size  */
 
-/* max mfn of the whole machine */
-static unsigned long max_mfn;
-
-/* virtual starting address of the hypervisor */
-static unsigned long hvirt_start;
-
-/* #levels of page tables used by the current guest */
-static unsigned int pt_levels;
-
-/* number of pfns this guest has (i.e. number of entries in the P2M) */
-static unsigned long p2m_size;
-
-/* Live mapping of the table mapping each PFN to its current MFN. */
-static xen_pfn_t *live_p2m = NULL;
+struct suspend_ctx {
+    unsigned long max_mfn; /* max mfn of the whole machine */
+    unsigned int pt_levels; /* #levels of page tables used by the current 
guest */
+    unsigned long hvirt_start; /* virtual starting address of the hypervisor */
+    unsigned long p2m_size; /* number of pfns this guest has (i.e. number of 
entries in the P2M) */
+    unsigned int guest_width; /* Address size of the guest */
+    unsigned long m2p_mfn0;
+    xen_pfn_t *live_m2p; /* Live mapping of system MFN to PFN table. */
+    xen_pfn_t *live_p2m; /* Live mapping of the table mapping each PFN to its 
current MFN. */
+};
 
-/* Live mapping of system MFN to PFN table. */
-static xen_pfn_t *live_m2p = NULL;
-static unsigned long m2p_mfn0;
+struct suspend_ctx _ctx = {
+       .live_p2m = NULL,
+       .live_m2p = NULL,
+};
 
-/* Address size of the guest */
-unsigned int guest_width;
+struct suspend_ctx *ctx = &_ctx;
 
 /* buffer for output */
 struct outbuf {
@@ -63,13 +59,13 @@ struct outbuf {
 
 /* grep fodder: machine_to_phys */
 
-#define mfn_to_pfn(_mfn)  (live_m2p[(_mfn)])
+#define mfn_to_pfn(_mfn)  (ctx->live_m2p[(_mfn)])
 
 #define pfn_to_mfn(_pfn)                                            \
-  ((xen_pfn_t) ((guest_width==8)                                    \
-                ? (((uint64_t *)live_p2m)[(_pfn)])                  \
-                : ((((uint32_t *)live_p2m)[(_pfn)]) == 0xffffffffU  \
-                   ? (-1UL) : (((uint32_t *)live_p2m)[(_pfn)]))))
+  ((xen_pfn_t) ((ctx->guest_width==8)                                    \
+                ? (((uint64_t *)ctx->live_p2m)[(_pfn)])                  \
+                : ((((uint32_t *)ctx->live_p2m)[(_pfn)]) == 0xffffffffU  \
+                   ? (-1UL) : (((uint32_t *)ctx->live_p2m)[(_pfn)]))))
 
 /*
  * Returns TRUE if the given machine frame number has a unique mapping
@@ -77,7 +73,7 @@ struct outbuf {
  */
 #define MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, _mfn) \
     (((_mfn) < (max_mfn)) &&                    \
-     ((mfn_to_pfn(_mfn) < (p2m_size)) &&        \
+     ((mfn_to_pfn(_mfn) < (ctx->p2m_size)) &&        \
       (pfn_to_mfn(mfn_to_pfn(_mfn)) == (_mfn))))
 
 /*
@@ -87,7 +83,7 @@ struct outbuf {
 
 #define BITS_PER_LONG (sizeof(unsigned long) * 8)
 #define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
-#define BITMAP_SIZE   (BITS_TO_LONGS(p2m_size) * sizeof(unsigned long))
+#define BITMAP_SIZE   (BITS_TO_LONGS(ctx->p2m_size) * sizeof(unsigned long))
 
 #define BITMAP_ENTRY(_nr,_bmap) \
    ((volatile unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
@@ -415,7 +411,7 @@ static int analysis_phase(int xc_handle, uint32_t domid,
         int i;
 
         xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
-                          arr, p2m_size, NULL, 0, NULL);
+                          arr, ctx->p2m_size, NULL, 0, NULL);
         DPRINTF("#Flush\n");
         for ( i = 0; i < 40; i++ )
         {
@@ -462,12 +458,12 @@ static void *map_frame_list_list(int xc_handle, uint32_t 
dom,
 {
     int count = 100;
     void *p;
-    uint64_t fll = GET_FIELD(guest_width, shinfo, 
arch.pfn_to_mfn_frame_list_list);
+    uint64_t fll = GET_FIELD(ctx->guest_width, shinfo, 
arch.pfn_to_mfn_frame_list_list);
 
     while ( count-- && (fll == 0) )
     {
         usleep(10000);
-        fll = GET_FIELD(guest_width, shinfo, arch.pfn_to_mfn_frame_list_list);
+        fll = GET_FIELD(ctx->guest_width, shinfo, 
arch.pfn_to_mfn_frame_list_list);
     }
 
     if ( fll == 0 )
@@ -504,12 +500,12 @@ static int canonicalize_pagetable(unsigned long type, 
unsigned long pfn,
     ** reserved hypervisor mappings. This depends on the current
     ** page table type as well as the number of paging levels.
     */
-    xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2) ? 4 : 8);
+    xen_start = xen_end = pte_last = PAGE_SIZE / ((ctx->pt_levels == 2) ? 4 : 
8);
 
-    if ( (pt_levels == 2) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
-        xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
+    if ( (ctx->pt_levels == 2) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
+        xen_start = (ctx->hvirt_start >> L2_PAGETABLE_SHIFT);
 
-    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L3TAB) )
+    if ( (ctx->pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L3TAB) )
         xen_start = L3_PAGETABLE_ENTRIES_PAE;
 
     /*
@@ -517,30 +513,30 @@ static int canonicalize_pagetable(unsigned long type, 
unsigned long pfn,
     ** We can spot this by looking for the guest's mappingof the m2p.
     ** Guests must ensure that this check will fail for other L2s.
     */
-    if ( (pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
+    if ( (ctx->pt_levels == 3) && (type == XEN_DOMCTL_PFINFO_L2TAB) )
     {
         int hstart;
         uint64_t he;
 
-        hstart = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
+        hstart = (ctx->hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
         he = ((const uint64_t *) spage)[hstart];
 
-        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86(guest_width)) == m2p_mfn0 )
+        if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86(ctx->guest_width)) == 
ctx->m2p_mfn0 )
         {
             /* hvirt starts with xen stuff... */
             xen_start = hstart;
         }
-        else if ( hvirt_start != 0xf5800000 )
+        else if ( ctx->hvirt_start != 0xf5800000 )
         {
             /* old L2s from before hole was shrunk... */
             hstart = (0xf5800000 >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
             he = ((const uint64_t *) spage)[hstart];
-            if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86(guest_width)) == m2p_mfn0 )
+            if ( ((he >> PAGE_SHIFT) & MFN_MASK_X86(ctx->guest_width)) == 
ctx->m2p_mfn0 )
                 xen_start = hstart;
         }
     }
 
-    if ( (pt_levels == 4) && (type == XEN_DOMCTL_PFINFO_L4TAB) )
+    if ( (ctx->pt_levels == 4) && (type == XEN_DOMCTL_PFINFO_L4TAB) )
     {
         /*
         ** XXX SMH: should compute these from hvirt_start (which we have)
@@ -555,7 +551,7 @@ static int canonicalize_pagetable(unsigned long type, 
unsigned long pfn,
     {
         unsigned long pfn, mfn;
 
-        if ( pt_levels == 2 )
+        if ( ctx->pt_levels == 2 )
             pte = ((const uint32_t*)spage)[i];
         else
             pte = ((const uint64_t*)spage)[i];
@@ -565,8 +561,8 @@ static int canonicalize_pagetable(unsigned long type, 
unsigned long pfn,
 
         if ( pte & _PAGE_PRESENT )
         {
-            mfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86(guest_width);
-            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, mfn) )
+            mfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86(ctx->guest_width);
+            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctx->max_mfn, mfn) )
             {
                 /* This will happen if the type info is stale which
                    is quite feasible under live migration */
@@ -576,13 +572,13 @@ static int canonicalize_pagetable(unsigned long type, 
unsigned long pfn,
                  * compat m2p, so we quietly zap them.  This doesn't
                  * count as a race, so don't report it. */
                 if ( !(type == XEN_DOMCTL_PFINFO_L2TAB 
-                       && sizeof (unsigned long) > guest_width) )
+                       && sizeof (unsigned long) > ctx->guest_width) )
                      race = 1;  /* inform the caller; fatal if !live */ 
             }
             else
                 pfn = mfn_to_pfn(mfn);
 
-            pte &= ~MADDR_MASK_X86(guest_width);
+            pte &= ~MADDR_MASK_X86(ctx->guest_width);
             pte |= (uint64_t)pfn << PAGE_SHIFT;
 
             /*
@@ -590,13 +586,13 @@ static int canonicalize_pagetable(unsigned long type, 
unsigned long pfn,
              * a 64bit hypervisor. We zap these here to avoid any
              * surprise at restore time...
              */
-            if ( (pt_levels == 3) &&
+            if ( (ctx->pt_levels == 3) &&
                  (type == XEN_DOMCTL_PFINFO_L3TAB) &&
                  (pte & (_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED)) )
                 pte &= ~(_PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);
         }
 
-        if ( pt_levels == 2 )
+        if ( ctx->pt_levels == 2 )
             ((uint32_t*)dpage)[i] = pte;
         else
             ((uint64_t*)dpage)[i] = pte;
@@ -704,20 +700,20 @@ static xen_pfn_t *map_and_save_p2m_table(int xc_handle,
     memcpy(p2m_frame_list_list, live_p2m_frame_list_list, PAGE_SIZE);
 
     /* Canonicalize guest's unsigned long vs ours */
-    if ( guest_width > sizeof(unsigned long) )
+    if ( ctx->guest_width > sizeof(unsigned long) )
         for ( i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++ )
-            if ( i < PAGE_SIZE/guest_width )
+            if ( i < PAGE_SIZE/ctx->guest_width )
                 p2m_frame_list_list[i] = ((uint64_t *)p2m_frame_list_list)[i];
             else
                 p2m_frame_list_list[i] = 0;
-    else if ( guest_width < sizeof(unsigned long) )
+    else if ( ctx->guest_width < sizeof(unsigned long) )
         for ( i = PAGE_SIZE/sizeof(unsigned long) - 1; i >= 0; i-- )
             p2m_frame_list_list[i] = ((uint32_t *)p2m_frame_list_list)[i];
 
     live_p2m_frame_list =
         xc_map_foreign_batch(xc_handle, dom, PROT_READ,
                              p2m_frame_list_list,
-                             P2M_FLL_ENTRIES(p2m_size, guest_width));
+                             P2M_FLL_ENTRIES(ctx->p2m_size, ctx->guest_width));
     if ( !live_p2m_frame_list )
     {
         ERROR("Couldn't map p2m_frame_list");
@@ -725,20 +721,20 @@ static xen_pfn_t *map_and_save_p2m_table(int xc_handle,
     }
 
     /* Get a local copy of the live_P2M_frame_list */
-    if ( !(p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE(p2m_size, guest_width))) )
+    if ( !(p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE(ctx->p2m_size, 
ctx->guest_width))) )
     {
         ERROR("Couldn't allocate p2m_frame_list array");
         goto out;
     }
-    memset(p2m_frame_list, 0, P2M_TOOLS_FL_SIZE(p2m_size, guest_width));
-    memcpy(p2m_frame_list, live_p2m_frame_list, P2M_GUEST_FL_SIZE(p2m_size, 
guest_width));
+    memset(p2m_frame_list, 0, P2M_TOOLS_FL_SIZE(ctx->p2m_size, 
ctx->guest_width));
+    memcpy(p2m_frame_list, live_p2m_frame_list, 
P2M_GUEST_FL_SIZE(ctx->p2m_size, ctx->guest_width));
 
     /* Canonicalize guest's unsigned long vs ours */
-    if ( guest_width > sizeof(unsigned long) )
-        for ( i = 0; i < P2M_FL_ENTRIES(p2m_size, guest_width); i++ )
+    if ( ctx->guest_width > sizeof(unsigned long) )
+        for ( i = 0; i < P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width); i++ )
             p2m_frame_list[i] = ((uint64_t *)p2m_frame_list)[i];
-    else if ( guest_width < sizeof(unsigned long) )
-        for ( i = P2M_FL_ENTRIES(p2m_size, guest_width) - 1; i >= 0; i-- )
+    else if ( ctx->guest_width < sizeof(unsigned long) )
+        for ( i = P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width) - 1; i >= 0; 
i-- )
             p2m_frame_list[i] = ((uint32_t *)p2m_frame_list)[i];
 
 
@@ -749,35 +745,35 @@ static xen_pfn_t *map_and_save_p2m_table(int xc_handle,
 
     p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
                                p2m_frame_list,
-                               P2M_FL_ENTRIES(p2m_size, guest_width));
+                               P2M_FL_ENTRIES(ctx->p2m_size, 
ctx->guest_width));
     if ( !p2m )
     {
         ERROR("Couldn't map p2m table");
         goto out;
     }
-    live_p2m = p2m; /* So that translation macros will work */
+    ctx->live_p2m = p2m; /* So that translation macros will work */
     
     /* Canonicalise the pfn-to-mfn table frame-number list. */
-    for ( i = 0; i < p2m_size; i += FPP(guest_width) )
+    for ( i = 0; i < ctx->p2m_size; i += FPP(ctx->guest_width) )
     {
-        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, 
p2m_frame_list[i/FPP(guest_width)]) )
+        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctx->max_mfn, 
p2m_frame_list[i/FPP(ctx->guest_width)]) )
         {
             ERROR("Frame# in pfn-to-mfn frame list is not in pseudophys");
             ERROR("entry %d: p2m_frame_list[%ld] is 0x%"PRIx64", max 0x%lx",
-                  i, i/FPP(guest_width), 
(uint64_t)p2m_frame_list[i/FPP(guest_width)], max_mfn);
-            if ( p2m_frame_list[i/FPP(guest_width)] < max_mfn ) 
+                  i, i/FPP(ctx->guest_width), 
(uint64_t)p2m_frame_list[i/FPP(ctx->guest_width)], ctx->max_mfn);
+            if ( p2m_frame_list[i/FPP(ctx->guest_width)] < ctx->max_mfn ) 
             {
                 ERROR("m2p[0x%"PRIx64"] = 0x%"PRIx64, 
-                      (uint64_t)p2m_frame_list[i/FPP(guest_width)],
-                      (uint64_t)live_m2p[p2m_frame_list[i/FPP(guest_width)]]);
+                      (uint64_t)p2m_frame_list[i/FPP(ctx->guest_width)],
+                      
(uint64_t)ctx->live_m2p[p2m_frame_list[i/FPP(ctx->guest_width)]]);
                 ERROR("p2m[0x%"PRIx64"] = 0x%"PRIx64, 
-                      (uint64_t)live_m2p[p2m_frame_list[i/FPP(guest_width)]],
-                      
(uint64_t)p2m[live_m2p[p2m_frame_list[i/FPP(guest_width)]]]);
+                      
(uint64_t)ctx->live_m2p[p2m_frame_list[i/FPP(ctx->guest_width)]],
+                      
(uint64_t)p2m[ctx->live_m2p[p2m_frame_list[i/FPP(ctx->guest_width)]]]);
 
             }
             goto out;
         }
-        p2m_frame_list[i/FPP(guest_width)] = 
mfn_to_pfn(p2m_frame_list[i/FPP(guest_width)]);
+        p2m_frame_list[i/FPP(ctx->guest_width)] = 
mfn_to_pfn(p2m_frame_list[i/FPP(ctx->guest_width)]);
     }
 
     if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
@@ -793,7 +789,7 @@ static xen_pfn_t *map_and_save_p2m_table(int xc_handle,
      */
     {
         unsigned long signature = ~0UL;
-        uint32_t chunk1_sz = ((guest_width==8) 
+        uint32_t chunk1_sz = ((ctx->guest_width==8) 
                               ? sizeof(ctxt.x64) 
                               : sizeof(ctxt.x32));
         uint32_t chunk2_sz = 0;
@@ -812,7 +808,7 @@ static xen_pfn_t *map_and_save_p2m_table(int xc_handle,
     }
 
     if ( write_exact(io_fd, p2m_frame_list, 
-                     P2M_FL_ENTRIES(p2m_size, guest_width) * 
sizeof(xen_pfn_t)) )
+                     P2M_FL_ENTRIES(ctx->p2m_size, ctx->guest_width) * 
sizeof(xen_pfn_t)) )
     {
         PERROR("write: p2m_frame_list");
         goto out;
@@ -823,13 +819,13 @@ static xen_pfn_t *map_and_save_p2m_table(int xc_handle,
  out:
     
     if ( !success && p2m )
-        munmap(p2m, P2M_FLL_ENTRIES(p2m_size, guest_width) * PAGE_SIZE);
+        munmap(p2m, P2M_FLL_ENTRIES(ctx->p2m_size, ctx->guest_width) * 
PAGE_SIZE);
 
     if ( live_p2m_frame_list_list )
         munmap(live_p2m_frame_list_list, PAGE_SIZE);
 
     if ( live_p2m_frame_list )
-        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES(p2m_size, guest_width) * 
PAGE_SIZE);
+        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES(ctx->p2m_size, 
ctx->guest_width) * PAGE_SIZE);
 
     if ( p2m_frame_list_list ) 
         free(p2m_frame_list_list);
@@ -908,7 +904,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t dom, 
uint32_t max_iters,
     initialize_mbit_rate();
 
     if ( !get_platform_info(xc_handle, dom,
-                            &max_mfn, &hvirt_start, &pt_levels, &guest_width) )
+                            &ctx->max_mfn, &ctx->hvirt_start, &ctx->pt_levels, 
&ctx->guest_width) )
     {
         ERROR("Unable to get platform info.");
         return 1;
@@ -935,7 +931,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t dom, 
uint32_t max_iters,
     }
 
     /* Get the size of the P2M table */
-    p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;
+    ctx->p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;
 
     /* Domain is still running at this point */
     if ( live )
@@ -981,7 +977,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t dom, 
uint32_t max_iters,
     last_iter = !live;
 
     /* pretend we sent all the pages last iteration */
-    sent_last_iter = p2m_size;
+    sent_last_iter = ctx->p2m_size;
 
     /* Setup to_send / to_fix and to_skip bitmaps */
     to_send = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
@@ -1047,14 +1043,14 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
     }
 
     /* Setup the mfn_to_pfn table mapping */
-    if ( !(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ, &m2p_mfn0)) )
+    if ( !(ctx->live_m2p = xc_map_m2p(xc_handle, ctx->max_mfn, PROT_READ, 
&ctx->m2p_mfn0)) )
     {
         ERROR("Failed to map live M2P table");
         goto out;
     }
 
     /* Start writing out the saved-domain record. */
-    if ( write_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
+    if ( write_exact(io_fd, &ctx->p2m_size, sizeof(unsigned long)) )
     {
         PERROR("write: p2m_size");
         goto out;
@@ -1065,8 +1061,8 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
         int err = 0;
 
         /* Map the P2M table, and write the list of P2M frames */
-        live_p2m = map_and_save_p2m_table(xc_handle, io_fd, dom, live_shinfo);
-        if ( live_p2m == NULL )
+        ctx->live_p2m = map_and_save_p2m_table(xc_handle, io_fd, dom, 
live_shinfo);
+        if ( ctx->live_p2m == NULL )
         {
             ERROR("Failed to map/save the p2m frame list");
             goto out;
@@ -1076,7 +1072,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
          * Quick belt and braces sanity check.
          */
         
-        for ( i = 0; i < p2m_size; i++ )
+        for ( i = 0; i < ctx->p2m_size; i++ )
         {
             mfn = pfn_to_mfn(i);
             if( (mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i) )
@@ -1118,9 +1114,9 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
 
         DPRINTF("Saving memory pages: iter %d   0%%", iter);
 
-        while ( N < p2m_size )
+        while ( N < ctx->p2m_size )
         {
-            unsigned int this_pc = (N * 100) / p2m_size;
+            unsigned int this_pc = (N * 100) / ctx->p2m_size;
 
             if ( (this_pc - prev_pc) >= 5 )
             {
@@ -1134,8 +1130,8 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
                    but this is fast enough for the moment. */
                 frc = xc_shadow_control(
                     xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_skip, 
-                    p2m_size, NULL, 0, NULL);
-                if ( frc != p2m_size )
+                    ctx->p2m_size, NULL, 0, NULL);
+                if ( frc != ctx->p2m_size )
                 {
                     ERROR("Error peeking shadow bitmap");
                     goto out;
@@ -1145,7 +1141,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
             /* load pfn_type[] with the mfn of all the pages we're doing in
                this batch. */
             for  ( batch = 0;
-                   (batch < MAX_BATCH_SIZE) && (N < p2m_size);
+                   (batch < MAX_BATCH_SIZE) && (N < ctx->p2m_size);
                    N++ )
             {
                 int n = N;
@@ -1407,7 +1403,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
             print_stats( xc_handle, dom, sent_this_iter, &stats, 1);
 
             DPRINTF("Total pages sent= %ld (%.2fx)\n",
-                    total_sent, ((float)total_sent)/p2m_size );
+                    total_sent, ((float)total_sent)/ctx->p2m_size );
             DPRINTF("(of which %ld were fixups)\n", needed_to_fix  );
         }
 
@@ -1436,7 +1432,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
             if ( ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
                  (iter >= max_iters) ||
                  (sent_this_iter+skip_this_iter < 50) ||
-                 (total_sent > p2m_size*max_factor) )
+                 (total_sent > ctx->p2m_size*max_factor) )
             {
                 DPRINTF("Start last iteration\n");
                 last_iter = 1;
@@ -1460,7 +1456,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
 
             if ( xc_shadow_control(xc_handle, dom, 
                                    XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
-                                   p2m_size, NULL, 0, &stats) != p2m_size )
+                                   ctx->p2m_size, NULL, 0, &stats) != 
ctx->p2m_size )
             {
                 ERROR("Error flushing shadow PT");
                 goto out;
@@ -1593,7 +1589,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
         unsigned int i,j;
         unsigned long pfntab[1024];
 
-        for ( i = 0, j = 0; i < p2m_size; i++ )
+        for ( i = 0, j = 0; i < ctx->p2m_size; i++ )
         {
             if ( !is_mapped(pfn_to_mfn(i)) )
                 j++;
@@ -1605,13 +1601,13 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
             goto out;
         }
 
-        for ( i = 0, j = 0; i < p2m_size; )
+        for ( i = 0, j = 0; i < ctx->p2m_size; )
         {
             if ( !is_mapped(pfn_to_mfn(i)) )
                 pfntab[j++] = i;
 
             i++;
-            if ( (j == 1024) || (i == p2m_size) )
+            if ( (j == 1024) || (i == ctx->p2m_size) )
             {
                 if ( write_exact(io_fd, &pfntab, sizeof(unsigned long)*j) )
                 {
@@ -1630,13 +1626,13 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
     }
 
     /* Canonicalise the suspend-record frame number. */
-    mfn = GET_FIELD(guest_width, &ctxt, user_regs.edx);
-    if ( !MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, mfn) )
+    mfn = GET_FIELD(ctx->guest_width, &ctxt, user_regs.edx);
+    if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctx->max_mfn, mfn) )
     {
         ERROR("Suspend record is not in range of pseudophys map");
         goto out;
     }
-    SET_FIELD(guest_width, &ctxt, user_regs.edx, mfn_to_pfn(mfn));
+    SET_FIELD(ctx->guest_width, &ctxt, user_regs.edx, mfn_to_pfn(mfn));
 
     for ( i = 0; i <= info.max_vcpu_id; i++ )
     {
@@ -1650,41 +1646,41 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
         }
 
         /* Canonicalise each GDT frame number. */
-        for ( j = 0; (512*j) < GET_FIELD(guest_width, &ctxt, gdt_ents); j++ )
+        for ( j = 0; (512*j) < GET_FIELD(ctx->guest_width, &ctxt, gdt_ents); 
j++ )
         {
-            mfn = GET_FIELD(guest_width, &ctxt, gdt_frames[j]);
-            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, mfn) )
+            mfn = GET_FIELD(ctx->guest_width, &ctxt, gdt_frames[j]);
+            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctx->max_mfn, mfn) )
             {
                 ERROR("GDT frame is not in range of pseudophys map");
                 goto out;
             }
-            SET_FIELD(guest_width, &ctxt, gdt_frames[j], mfn_to_pfn(mfn));
+            SET_FIELD(ctx->guest_width, &ctxt, gdt_frames[j], mfn_to_pfn(mfn));
         }
 
         /* Canonicalise the page table base pointer. */
-        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, UNFOLD_CR3(guest_width,
-                                           GET_FIELD(guest_width, &ctxt, 
ctrlreg[3]))) )
+        if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctx->max_mfn, 
UNFOLD_CR3(ctx->guest_width,
+                                           GET_FIELD(ctx->guest_width, &ctxt, 
ctrlreg[3]))) )
         {
             ERROR("PT base is not in range of pseudophys map");
             goto out;
         }
-        SET_FIELD(guest_width, &ctxt, ctrlreg[3], 
-            FOLD_CR3(guest_width, mfn_to_pfn(UNFOLD_CR3(guest_width, 
GET_FIELD(guest_width, &ctxt, ctrlreg[3])))));
+        SET_FIELD(ctx->guest_width, &ctxt, ctrlreg[3], 
+            FOLD_CR3(ctx->guest_width, mfn_to_pfn(UNFOLD_CR3(ctx->guest_width, 
GET_FIELD(ctx->guest_width, &ctxt, ctrlreg[3])))));
 
         /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
-        if ( (pt_levels == 4) && ctxt.x64.ctrlreg[1] )
+        if ( (ctx->pt_levels == 4) && ctxt.x64.ctrlreg[1] )
         {
-            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(max_mfn, UNFOLD_CR3(guest_width, 
ctxt.x64.ctrlreg[1])) )
+            if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctx->max_mfn, 
UNFOLD_CR3(ctx->guest_width, ctxt.x64.ctrlreg[1])) )
             {
                 ERROR("PT base is not in range of pseudophys map");
                 goto out;
             }
             /* Least-significant bit means 'valid PFN'. */
             ctxt.x64.ctrlreg[1] = 1 |
-                FOLD_CR3(guest_width, mfn_to_pfn(UNFOLD_CR3(guest_width, 
ctxt.x64.ctrlreg[1])));
+                FOLD_CR3(ctx->guest_width, 
mfn_to_pfn(UNFOLD_CR3(ctx->guest_width, ctxt.x64.ctrlreg[1])));
         }
 
-        if ( write_exact(io_fd, &ctxt, ((guest_width==8) 
+        if ( write_exact(io_fd, &ctxt, ((ctx->guest_width==8) 
                                         ? sizeof(ctxt.x64) 
                                         : sizeof(ctxt.x32))) )
         {
@@ -1711,7 +1707,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
      * Reset the MFN to be a known-invalid value. See map_frame_list_list().
      */
     memcpy(page, live_shinfo, PAGE_SIZE);
-    SET_FIELD(guest_width, ((shared_info_any_t *)page), 
+    SET_FIELD(ctx->guest_width, ((shared_info_any_t *)page), 
               arch.pfn_to_mfn_frame_list_list, 0);
     if ( write_exact(io_fd, page, PAGE_SIZE) )
     {
@@ -1756,7 +1752,7 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
 
         if ( xc_shadow_control(xc_handle, dom,
                                XEN_DOMCTL_SHADOW_OP_CLEAN, to_send,
-                               p2m_size, NULL, 0, &stats) != p2m_size )
+                               ctx->p2m_size, NULL, 0, &stats) != 
ctx->p2m_size )
         {
             ERROR("Error flushing shadow PT");
         }
@@ -1780,11 +1776,11 @@ int xc_domain_save(int xc_handle, int io_fd, uint32_t 
dom, uint32_t max_iters,
     if ( live_shinfo )
         munmap(live_shinfo, PAGE_SIZE);
 
-    if ( live_p2m )
-        munmap(live_p2m, P2M_FLL_ENTRIES(p2m_size, guest_width) * PAGE_SIZE);
+    if ( ctx->live_p2m )
+        munmap(ctx->live_p2m, P2M_FLL_ENTRIES(ctx->p2m_size, ctx->guest_width) 
* PAGE_SIZE);
 
-    if ( live_m2p )
-        munmap(live_m2p, M2P_SIZE(max_mfn));
+    if ( ctx->live_m2p )
+        munmap(ctx->live_m2p, M2P_SIZE(ctx->max_mfn));
 
     free(pfn_type);
     free(pfn_batch);
-- 
1.6.5.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.