[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/7] pass restore context as an argument instead of a global context



pass restore context as an argument instead of a global context

---
 tools/libxc/xc_domain_restore.c |   70 ++++++++++++++++++++------------------
 1 files changed, 37 insertions(+), 33 deletions(-)

diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index 6430e91..70a50e9 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -44,14 +44,6 @@ struct restore_ctx {
     unsigned no_superpage_mem; /* If have enough continuous memory for super 
page allocation */
 };
 
-struct restore_ctx _ctx = {
-    .live_p2m = NULL,
-    .p2m = NULL,
-    .no_superpage_mem = 0,
-};
-
-struct restore_ctx *ctx = &_ctx;
-
 /*
 **
 **
@@ -71,7 +63,7 @@ struct restore_ctx *ctx = &_ctx;
 #define SUPER_PAGE_TRACKING(pfn) ( (pfn) != INVALID_SUPER_PAGE )
 #define SUPER_PAGE_DONE(pfn)     ( SUPER_PAGE_START(pfn) )
 
-static int super_page_populated(unsigned long pfn)
+static int super_page_populated(struct restore_ctx *ctx, unsigned long pfn)
 {
     int i;
     pfn &= ~(SUPERPAGE_NR_PFNS - 1);
@@ -88,7 +80,7 @@ static int super_page_populated(unsigned long pfn)
  * some new allocated 4K pages
  */
 static int break_super_page(int xc_handle,
-                            uint32_t dom,
+                            uint32_t dom, struct restore_ctx *ctx,
                             xen_pfn_t next_pfn)
 {
     xen_pfn_t *page_array, start_pfn, mfn;
@@ -202,6 +194,7 @@ out:
  */
 static int allocate_mfn_list(int xc_handle,
                               uint32_t dom,
+                              struct restore_ctx *ctx,
                               unsigned long nr_extents,
                               xen_pfn_t *batch_buf,
                               xen_pfn_t *next_pfn,
@@ -228,7 +221,7 @@ static int allocate_mfn_list(int xc_handle,
              !SUPER_PAGE_DONE(sp_pfn))
         {
             /* break previously allocated super page*/
-            if ( break_super_page(xc_handle, dom, sp_pfn) != 0 )
+            if ( break_super_page(xc_handle, dom, ctx, sp_pfn) != 0 )
             {
                 ERROR("Break previous super page fail!\n");
                 return 1;
@@ -251,7 +244,7 @@ static int allocate_mfn_list(int xc_handle,
         goto normal_page;
 
     pfn = batch_buf[0] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
-    if  ( super_page_populated(pfn) )
+    if  ( super_page_populated(ctx, pfn) )
         goto normal_page;
 
     pfn &= ~(SUPERPAGE_NR_PFNS - 1);
@@ -301,7 +294,7 @@ normal_page:
     return 0;
 }
 
-static int allocate_physmem(int xc_handle, uint32_t dom,
+static int allocate_physmem(int xc_handle, uint32_t dom, struct restore_ctx 
*ctx,
                             unsigned long *region_pfn_type, int region_size,
                             unsigned int hvm, xen_pfn_t *region_mfn, int 
superpages)
 {
@@ -342,7 +335,7 @@ static int allocate_physmem(int xc_handle, uint32_t dom,
         if ( SUPER_PAGE_START(pfn) )
         {
             /* Start of a 2M extent, populate previsous buf */
-            if ( allocate_mfn_list(xc_handle, dom,
+            if ( allocate_mfn_list(xc_handle, dom, ctx,
                                    batch_buf_len, batch_buf,
                                    &required_pfn, superpages) != 0 )
             {
@@ -364,7 +357,7 @@ static int allocate_physmem(int xc_handle, uint32_t dom,
         else if ( SUPER_PAGE_TRACKING(required_pfn) )
         {
             /* break of a 2M extent, populate previous buf */
-            if ( allocate_mfn_list(xc_handle, dom,
+            if ( allocate_mfn_list(xc_handle, dom, ctx,
                                    batch_buf_len, batch_buf,
                                    &required_pfn, superpages) != 0 )
             {
@@ -405,7 +398,7 @@ static int allocate_physmem(int xc_handle, uint32_t dom,
 alloc_page:
     if ( batch_buf )
     {
-        if ( allocate_mfn_list(xc_handle, dom,
+        if ( allocate_mfn_list(xc_handle, dom, ctx,
                     batch_buf_len, batch_buf,
                     &required_pfn,
                     superpages) != 0 )
@@ -498,7 +491,7 @@ static ssize_t read_exact_timed(int fd, void* buf, size_t 
size)
 ** This function inverts that operation, replacing the pfn values with
 ** the (now known) appropriate mfn values.
 */
-static int uncanonicalize_pagetable(int xc_handle, uint32_t dom, 
+static int uncanonicalize_pagetable(int xc_handle, uint32_t dom, struct 
restore_ctx *ctx,
                                     unsigned long type, void *page, int 
superpages)
 {
     int i, pte_last;
@@ -524,7 +517,7 @@ static int uncanonicalize_pagetable(int xc_handle, uint32_t 
dom,
         if ( ctx->p2m[pfn] == INVALID_P2M_ENTRY )
         {
             unsigned long force_pfn = superpages ? FORCE_SP_MASK : pfn;
-            if (allocate_mfn_list(xc_handle, dom,
+            if (allocate_mfn_list(xc_handle, dom, ctx,
                         1, &pfn, &force_pfn, superpages) != 0)
                 return 0;
         }
@@ -542,7 +535,7 @@ static int uncanonicalize_pagetable(int xc_handle, uint32_t 
dom,
 
 
 /* Load the p2m frame list, plus potential extended info chunk */
-static xen_pfn_t *load_p2m_frame_list(
+static xen_pfn_t *load_p2m_frame_list(struct restore_ctx *ctx,
     int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
 {
     xen_pfn_t *p2m_frame_list;
@@ -797,7 +790,8 @@ static int dump_qemu(uint32_t dom, struct tailbuf_hvm *buf)
     return 0;
 }
 
-static int buffer_tail_hvm(struct tailbuf_hvm *buf, int fd,
+static int buffer_tail_hvm(struct restore_ctx *ctx,
+                           struct tailbuf_hvm *buf, int fd,
                            unsigned int max_vcpu_id, uint64_t vcpumap,
                            int ext_vcpucontext)
 {
@@ -858,7 +852,8 @@ static int buffer_tail_hvm(struct tailbuf_hvm *buf, int fd,
     return -1;
 }
 
-static int buffer_tail_pv(struct tailbuf_pv *buf, int fd,
+static int buffer_tail_pv(struct restore_ctx *ctx,
+                          struct tailbuf_pv *buf, int fd,
                           unsigned int max_vcpu_id, uint64_t vcpumap,
                           int ext_vcpucontext)
 {
@@ -935,14 +930,15 @@ static int buffer_tail_pv(struct tailbuf_pv *buf, int fd,
     return -1;
 }
 
-static int buffer_tail(tailbuf_t *buf, int fd, unsigned int max_vcpu_id,
+static int buffer_tail(struct restore_ctx *ctx,
+                       tailbuf_t *buf, int fd, unsigned int max_vcpu_id,
                        uint64_t vcpumap, int ext_vcpucontext)
 {
     if ( buf->ishvm )
-        return buffer_tail_hvm(&buf->u.hvm, fd, max_vcpu_id, vcpumap,
+        return buffer_tail_hvm(ctx, &buf->u.hvm, fd, max_vcpu_id, vcpumap,
                                ext_vcpucontext);
     else
-        return buffer_tail_pv(&buf->u.pv, fd, max_vcpu_id, vcpumap,
+        return buffer_tail_pv(ctx, &buf->u.pv, fd, max_vcpu_id, vcpumap,
                               ext_vcpucontext);
 }
 
@@ -1147,8 +1143,8 @@ static int pagebuf_get(pagebuf_t* buf, int fd, int xch, 
uint32_t dom)
     return rc;
 }
 
-static int apply_batch(int xc_handle, uint32_t dom, xen_pfn_t* region_mfn,
-                       unsigned long* pfn_type, int pae_extended_cr3,
+static int apply_batch(int xc_handle, uint32_t dom, struct restore_ctx *ctx,
+                       xen_pfn_t* region_mfn, unsigned long* pfn_type, int 
pae_extended_cr3,
                        unsigned int hvm, struct xc_mmu* mmu,
                        pagebuf_t* pagebuf, int curbatch, int superpages)
 {
@@ -1167,7 +1163,7 @@ static int apply_batch(int xc_handle, uint32_t dom, 
xen_pfn_t* region_mfn,
     if (j > MAX_BATCH_SIZE)
         j = MAX_BATCH_SIZE;
 
-    if (allocate_physmem(xc_handle, dom, &pagebuf->pfn_types[curbatch],
+    if (allocate_physmem(xc_handle, dom, ctx, &pagebuf->pfn_types[curbatch],
                          j, hvm, region_mfn, superpages) != 0)
     {
         ERROR("allocate_physmem() failed\n");
@@ -1228,7 +1224,7 @@ static int apply_batch(int xc_handle, uint32_t dom, 
xen_pfn_t* region_mfn,
                 pae_extended_cr3 ||
                 (pagetype != XEN_DOMCTL_PFINFO_L1TAB)) {
 
-                if (!uncanonicalize_pagetable(xc_handle, dom,
+                if (!uncanonicalize_pagetable(xc_handle, dom, ctx,
                                               pagetype, page, superpages)) {
                     /*
                     ** Failing to uncanonicalize a page table can be ok
@@ -1335,6 +1331,14 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     tailbuf_t tailbuf, tmptail;
     void* vcpup;
 
+    /* restore context */
+    struct restore_ctx _ctx = {
+        .live_p2m = NULL,
+        .p2m = NULL,
+        .no_superpage_mem = 0,
+    };
+    struct restore_ctx *ctx = &_ctx;
+
     pagebuf_init(&pagebuf);
     memset(&tailbuf, 0, sizeof(tailbuf));
     tailbuf.ishvm = hvm;
@@ -1369,7 +1373,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     if ( !hvm ) 
     {
         /* Load the p2m frame list, plus potential extended info chunk */
-        p2m_frame_list = load_p2m_frame_list(
+        p2m_frame_list = load_p2m_frame_list(ctx,
             io_fd, &pae_extended_cr3, &ext_vcpucontext);
         if ( !p2m_frame_list )
             goto out;
@@ -1483,7 +1487,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
         while ( curbatch < j ) {
             int brc;
 
-            brc = apply_batch(xc_handle, dom, region_mfn, pfn_type,
+            brc = apply_batch(xc_handle, dom, ctx, region_mfn, pfn_type,
                               pae_extended_cr3, hvm, mmu, &pagebuf, curbatch, 
superpages);
             if ( brc < 0 )
                 goto out;
@@ -1524,7 +1528,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     if ( !completed ) {
         int flags = 0;
 
-        if ( buffer_tail(&tailbuf, io_fd, max_vcpu_id, vcpumap,
+        if ( buffer_tail(ctx, &tailbuf, io_fd, max_vcpu_id, vcpumap,
                          ext_vcpucontext) < 0 ) {
             ERROR ("error buffering image tail");
             goto out;
@@ -1544,7 +1548,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
     }
     memset(&tmptail, 0, sizeof(tmptail));
     tmptail.ishvm = hvm;
-    if ( buffer_tail(&tmptail, io_fd, max_vcpu_id, vcpumap,
+    if ( buffer_tail(ctx, &tmptail, io_fd, max_vcpu_id, vcpumap,
                      ext_vcpucontext) < 0 ) {
         ERROR ("error buffering image tail, finishing");
         goto finish;
@@ -1647,7 +1651,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t 
dom,
                 for ( k = 0; k < j; k++ )
                 {
                     if ( !uncanonicalize_pagetable(
-                        xc_handle, dom, XEN_DOMCTL_PFINFO_L1TAB,
+                        xc_handle, dom, ctx, XEN_DOMCTL_PFINFO_L1TAB,
                         region_base + k*PAGE_SIZE, superpages) )
                     {
                         ERROR("failed uncanonicalize pt!");
-- 
1.6.5.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.