WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 13 of 24] libxc: convert shadow domctl interfaces and

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 13 of 24] libxc: convert shadow domctl interfaces and save/restore over to hypercall buffers
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Mon, 06 Sep 2010 14:38:33 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Mon, 06 Sep 2010 06:54:32 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1283780300@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1283779691 -3600
# Node ID 2a5e84fe718ae25e91785643388411b70d4c013b
# Parent  7f735088ac1d169d140d978441c772b62083fae0
libxc: convert shadow domctl interfaces and save/restore over to hypercall 
buffers

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 7f735088ac1d -r 2a5e84fe718a tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_domain.c   Mon Sep 06 14:28:11 2010 +0100
@@ -404,7 +404,7 @@ int xc_shadow_control(xc_interface *xch,
 int xc_shadow_control(xc_interface *xch,
                       uint32_t domid,
                       unsigned int sop,
-                      unsigned long *dirty_bitmap,
+                      xc_hypercall_buffer_t *dirty_bitmap,
                       unsigned long pages,
                       unsigned long *mb,
                       uint32_t mode,
@@ -412,14 +412,17 @@ int xc_shadow_control(xc_interface *xch,
 {
     int rc;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(dirty_bitmap);
+
     domctl.cmd = XEN_DOMCTL_shadow_op;
     domctl.domain = (domid_t)domid;
     domctl.u.shadow_op.op     = sop;
     domctl.u.shadow_op.pages  = pages;
     domctl.u.shadow_op.mb     = mb ? *mb : 0;
     domctl.u.shadow_op.mode   = mode;
-    set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap,
-                         (uint8_t *)dirty_bitmap);
+    if (dirty_bitmap != NULL)
+        xc_set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap,
+                                dirty_bitmap);
 
     rc = do_domctl(xch, &domctl);
 
diff -r 7f735088ac1d -r 2a5e84fe718a tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_domain_restore.c   Mon Sep 06 14:28:11 2010 +0100
@@ -1063,7 +1063,7 @@ int xc_domain_restore(xc_interface *xch,
     shared_info_any_t *new_shared_info;
 
     /* A copy of the CPU context of the guest. */
-    vcpu_guest_context_any_t ctxt;
+    DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
 
     /* A table containing the type of each PFN (/not/ MFN!). */
     unsigned long *pfn_type = NULL;
@@ -1112,6 +1112,15 @@ int xc_domain_restore(xc_interface *xch,
 
     if ( superpages )
         return 1;
+
+    ctxt = xc_hypercall_buffer_alloc(xch, ctxt, sizeof(*ctxt));
+
+    if ( ctxt == NULL )
+    {
+        PERROR("Unable to lock ctxt");
+        return 1;
+    }
+
 
     if ( (orig_io_fd_flags = fcntl(io_fd, F_GETFL, 0)) < 0 ) {
         PERROR("unable to read IO FD flags");
@@ -1539,26 +1548,20 @@ int xc_domain_restore(xc_interface *xch,
         }
     }
 
-    if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
-    {
-        PERROR("Unable to lock ctxt");
-        return 1;
-    }
-
     vcpup = tailbuf.u.pv.vcpubuf;
     for ( i = 0; i <= max_vcpu_id; i++ )
     {
         if ( !(vcpumap & (1ULL << i)) )
             continue;
 
-        memcpy(&ctxt, vcpup, ((dinfo->guest_width == 8) ? sizeof(ctxt.x64)
-                              : sizeof(ctxt.x32)));
-        vcpup += (dinfo->guest_width == 8) ? sizeof(ctxt.x64) : 
sizeof(ctxt.x32);
+        memcpy(ctxt, vcpup, ((dinfo->guest_width == 8) ? sizeof(ctxt->x64)
+                              : sizeof(ctxt->x32)));
+        vcpup += (dinfo->guest_width == 8) ? sizeof(ctxt->x64) : 
sizeof(ctxt->x32);
 
         DPRINTF("read VCPU %d\n", i);
 
         if ( !new_ctxt_format )
-            SET_FIELD(&ctxt, flags, GET_FIELD(&ctxt, flags) | VGCF_online);
+            SET_FIELD(ctxt, flags, GET_FIELD(ctxt, flags) | VGCF_online);
 
         if ( i == 0 )
         {
@@ -1566,7 +1569,7 @@ int xc_domain_restore(xc_interface *xch,
              * Uncanonicalise the suspend-record frame number and poke
              * resume record.
              */
-            pfn = GET_FIELD(&ctxt, user_regs.edx);
+            pfn = GET_FIELD(ctxt, user_regs.edx);
             if ( (pfn >= dinfo->p2m_size) ||
                  (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
             {
@@ -1574,7 +1577,7 @@ int xc_domain_restore(xc_interface *xch,
                 goto out;
             }
             mfn = ctx->p2m[pfn];
-            SET_FIELD(&ctxt, user_regs.edx, mfn);
+            SET_FIELD(ctxt, user_regs.edx, mfn);
             start_info = xc_map_foreign_range(
                 xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, mfn);
             SET_FIELD(start_info, nr_pages, dinfo->p2m_size);
@@ -1589,15 +1592,15 @@ int xc_domain_restore(xc_interface *xch,
             munmap(start_info, PAGE_SIZE);
         }
         /* Uncanonicalise each GDT frame number. */
-        if ( GET_FIELD(&ctxt, gdt_ents) > 8192 )
+        if ( GET_FIELD(ctxt, gdt_ents) > 8192 )
         {
             ERROR("GDT entry count out of range");
             goto out;
         }
 
-        for ( j = 0; (512*j) < GET_FIELD(&ctxt, gdt_ents); j++ )
+        for ( j = 0; (512*j) < GET_FIELD(ctxt, gdt_ents); j++ )
         {
-            pfn = GET_FIELD(&ctxt, gdt_frames[j]);
+            pfn = GET_FIELD(ctxt, gdt_frames[j]);
             if ( (pfn >= dinfo->p2m_size) ||
                  (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
             {
@@ -1605,10 +1608,10 @@ int xc_domain_restore(xc_interface *xch,
                       j, (unsigned long)pfn);
                 goto out;
             }
-            SET_FIELD(&ctxt, gdt_frames[j], ctx->p2m[pfn]);
+            SET_FIELD(ctxt, gdt_frames[j], ctx->p2m[pfn]);
         }
         /* Uncanonicalise the page table base pointer. */
-        pfn = UNFOLD_CR3(GET_FIELD(&ctxt, ctrlreg[3]));
+        pfn = UNFOLD_CR3(GET_FIELD(ctxt, ctrlreg[3]));
 
         if ( pfn >= dinfo->p2m_size )
         {
@@ -1625,12 +1628,12 @@ int xc_domain_restore(xc_interface *xch,
                   (unsigned long)ctx->pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
             goto out;
         }
-        SET_FIELD(&ctxt, ctrlreg[3], FOLD_CR3(ctx->p2m[pfn]));
+        SET_FIELD(ctxt, ctrlreg[3], FOLD_CR3(ctx->p2m[pfn]));
 
         /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
-        if ( (ctx->pt_levels == 4) && (ctxt.x64.ctrlreg[1] & 1) )
+        if ( (ctx->pt_levels == 4) && (ctxt->x64.ctrlreg[1] & 1) )
         {
-            pfn = UNFOLD_CR3(ctxt.x64.ctrlreg[1] & ~1);
+            pfn = UNFOLD_CR3(ctxt->x64.ctrlreg[1] & ~1);
             if ( pfn >= dinfo->p2m_size )
             {
                 ERROR("User PT base is bad: pfn=%lu p2m_size=%lu",
@@ -1645,12 +1648,12 @@ int xc_domain_restore(xc_interface *xch,
                       (unsigned 
long)ctx->pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
                 goto out;
             }
-            ctxt.x64.ctrlreg[1] = FOLD_CR3(ctx->p2m[pfn]);
+            ctxt->x64.ctrlreg[1] = FOLD_CR3(ctx->p2m[pfn]);
         }
         domctl.cmd = XEN_DOMCTL_setvcpucontext;
         domctl.domain = (domid_t)dom;
         domctl.u.vcpucontext.vcpu = i;
-        set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt.c);
+        xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
         frc = xc_domctl(xch, &domctl);
         if ( frc != 0 )
         {
@@ -1791,6 +1794,7 @@ int xc_domain_restore(xc_interface *xch,
  out:
     if ( (rc != 0) && (dom != 0) )
         xc_domain_destroy(xch, dom);
+    xc_hypercall_buffer_free(xch, ctxt);
     free(mmu);
     free(ctx->p2m);
     free(pfn_type);
diff -r 7f735088ac1d -r 2a5e84fe718a tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_domain_save.c      Mon Sep 06 14:28:11 2010 +0100
@@ -411,7 +411,7 @@ static int print_stats(xc_interface *xch
 
 
 static int analysis_phase(xc_interface *xch, uint32_t domid, struct save_ctx 
*ctx,
-                          unsigned long *arr, int runs)
+                          xc_hypercall_buffer_t *arr, int runs)
 {
     long long start, now;
     xc_shadow_op_stats_t stats;
@@ -915,7 +915,9 @@ int xc_domain_save(xc_interface *xch, in
        - that should be sent this iteration (unless later marked as skip);
        - to skip this iteration because already dirty;
        - to fixup by sending at the end if not already resent; */
-    unsigned long *to_send = NULL, *to_skip = NULL, *to_fix = NULL;
+    DECLARE_HYPERCALL_BUFFER(unsigned long, to_skip);
+    DECLARE_HYPERCALL_BUFFER(unsigned long, to_send);
+    unsigned long *to_fix = NULL;
 
     xc_shadow_op_stats_t stats;
 
@@ -1034,9 +1036,9 @@ int xc_domain_save(xc_interface *xch, in
     sent_last_iter = dinfo->p2m_size;
 
     /* Setup to_send / to_fix and to_skip bitmaps */
-    to_send = xc_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
+    to_send = xc_hypercall_buffer_alloc_pages(xch, to_send, 
NRPAGES(BITMAP_SIZE));
+    to_skip = xc_hypercall_buffer_alloc_pages(xch, to_skip, 
NRPAGES(BITMAP_SIZE));
     to_fix  = calloc(1, BITMAP_SIZE);
-    to_skip = xc_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
 
     if ( !to_send || !to_fix || !to_skip )
     {
@@ -1046,20 +1048,7 @@ int xc_domain_save(xc_interface *xch, in
 
     memset(to_send, 0xff, BITMAP_SIZE);
 
-    if ( lock_pages(xch, to_send, BITMAP_SIZE) )
-    {
-        PERROR("Unable to lock to_send");
-        return 1;
-    }
-
-    /* (to fix is local only) */
-    if ( lock_pages(xch, to_skip, BITMAP_SIZE) )
-    {
-        PERROR("Unable to lock to_skip");
-        return 1;
-    }
-
-    if ( hvm ) 
+    if ( hvm )
     {
         /* Need another buffer for HVM context */
         hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0);
@@ -1076,7 +1065,7 @@ int xc_domain_save(xc_interface *xch, in
         }
     }
 
-    analysis_phase(xch, dom, ctx, to_skip, 0);
+    analysis_phase(xch, dom, ctx, HYPERCALL_BUFFER(to_skip), 0);
 
     pfn_type   = xc_memalign(PAGE_SIZE, ROUNDUP(
                               MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
@@ -1188,7 +1177,7 @@ int xc_domain_save(xc_interface *xch, in
                 /* Slightly wasteful to peek the whole array evey time,
                    but this is fast enough for the moment. */
                 frc = xc_shadow_control(
-                    xch, dom, XEN_DOMCTL_SHADOW_OP_PEEK, to_skip, 
+                    xch, dom, XEN_DOMCTL_SHADOW_OP_PEEK, 
HYPERCALL_BUFFER(to_skip),
                     dinfo->p2m_size, NULL, 0, NULL);
                 if ( frc != dinfo->p2m_size )
                 {
@@ -1528,8 +1517,8 @@ int xc_domain_save(xc_interface *xch, in
 
             }
 
-            if ( xc_shadow_control(xch, dom, 
-                                   XEN_DOMCTL_SHADOW_OP_CLEAN, to_send, 
+            if ( xc_shadow_control(xch, dom,
+                                   XEN_DOMCTL_SHADOW_OP_CLEAN, 
HYPERCALL_BUFFER(to_send),
                                    dinfo->p2m_size, NULL, 0, &stats) != 
dinfo->p2m_size )
             {
                 PERROR("Error flushing shadow PT");
@@ -1857,7 +1846,7 @@ int xc_domain_save(xc_interface *xch, in
         print_stats(xch, dom, 0, &stats, 1);
 
         if ( xc_shadow_control(xch, dom,
-                               XEN_DOMCTL_SHADOW_OP_CLEAN, to_send,
+                               XEN_DOMCTL_SHADOW_OP_CLEAN, 
HYPERCALL_BUFFER(to_send),
                                dinfo->p2m_size, NULL, 0, &stats) != 
dinfo->p2m_size )
         {
             PERROR("Error flushing shadow PT");
@@ -1888,12 +1877,13 @@ int xc_domain_save(xc_interface *xch, in
     if ( ctx->live_m2p )
         munmap(ctx->live_m2p, M2P_SIZE(ctx->max_mfn));
 
+    xc_hypercall_buffer_free_pages(xch, to_send, NRPAGES(BITMAP_SIZE));
+    xc_hypercall_buffer_free_pages(xch, to_skip, NRPAGES(BITMAP_SIZE));
+
     free(pfn_type);
     free(pfn_batch);
     free(pfn_err);
-    free(to_send);
     free(to_fix);
-    free(to_skip);
 
     DPRINTF("Save exit rc=%d\n",rc);
 
diff -r 7f735088ac1d -r 2a5e84fe718a tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xenctrl.h     Mon Sep 06 14:28:11 2010 +0100
@@ -610,7 +610,7 @@ int xc_shadow_control(xc_interface *xch,
 int xc_shadow_control(xc_interface *xch,
                       uint32_t domid,
                       unsigned int sop,
-                      unsigned long *dirty_bitmap,
+                      xc_hypercall_buffer_t *dirty_bitmap,
                       unsigned long pages,
                       unsigned long *mb,
                       uint32_t mode,
diff -r 7f735088ac1d -r 2a5e84fe718a tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xg_private.h  Mon Sep 06 14:28:11 2010 +0100
@@ -157,6 +157,7 @@ typedef l4_pgentry_64_t l4_pgentry_t;
 #define PAGE_MASK_IA64          (~(PAGE_SIZE_IA64-1))
 
 #define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
+#define NRPAGES(x) (ROUNDUP(x, PAGE_SHIFT) >> PAGE_SHIFT)
 
 
 /* XXX SMH: following skanky macros rely on variable p2m_size being set */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>