[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v2 6/8] Adapted libxc for migration of local disk



---
 tools/libxc/include/xenguest.h    |   6 +-
 tools/libxc/xc_nomigrate.c        |   6 +-
 tools/libxc/xc_sr_common.h        |   3 +
 tools/libxc/xc_sr_restore.c       |  14 +++--
 tools/libxc/xc_sr_save.c          | 118 +++++++++++++++++++++++++++++++++++++-
 tools/libxc/xc_sr_save_x86_hvm.c  |   7 ++-
 tools/libxc/xc_sr_stream_format.h |   4 ++
 7 files changed, 144 insertions(+), 14 deletions(-)

diff --git a/tools/libxc/include/xenguest.h b/tools/libxc/include/xenguest.h
index 5cd8111..a6f52f1 100644
--- a/tools/libxc/include/xenguest.h
+++ b/tools/libxc/include/xenguest.h
@@ -103,7 +103,8 @@ typedef enum {
 int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t 
max_iters,
                    uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
                    struct save_callbacks* callbacks, int hvm,
-                   xc_migration_stream_t stream_type, int recv_fd);
+                   xc_migration_stream_t stream_type, int recv_fd,
+                   int migration_phase);
 
 /* callbacks provided by xc_domain_restore */
 struct restore_callbacks {
@@ -168,7 +169,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
                       unsigned long *console_mfn, domid_t console_domid,
                       unsigned int hvm, unsigned int pae,
                       xc_migration_stream_t stream_type,
-                      struct restore_callbacks *callbacks, int send_back_fd);
+                      struct restore_callbacks *callbacks, int send_back_fd,
+                      int migration_phase);
 
 /**
  * This function will create a domain for a paravirtualized Linux
diff --git a/tools/libxc/xc_nomigrate.c b/tools/libxc/xc_nomigrate.c
index 317c8ce..c75411b 100644
--- a/tools/libxc/xc_nomigrate.c
+++ b/tools/libxc/xc_nomigrate.c
@@ -23,7 +23,8 @@
 int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t 
max_iters,
                    uint32_t max_factor, uint32_t flags,
                    struct save_callbacks* callbacks, int hvm,
-                   xc_migration_stream_t stream_type, int recv_fd)
+                   xc_migration_stream_t stream_type, int recv_fd,
+                   int migration_phase)
 {
     errno = ENOSYS;
     return -1;
@@ -35,7 +36,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t 
dom,
                       unsigned long *console_mfn, domid_t console_domid,
                       unsigned int hvm, unsigned int pae,
                       xc_migration_stream_t stream_type,
-                      struct restore_callbacks *callbacks, int send_back_fd)
+                      struct restore_callbacks *callbacks, int send_back_fd,
+                      int migration_phase)
 {
     errno = ENOSYS;
     return -1;
diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index a83f22a..903f18a 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -96,6 +96,8 @@ struct xc_sr_save_ops
      * after a successful save, or upon encountering an error.
      */
     int (*cleanup)(struct xc_sr_context *ctx);
+
+    int (*local_disks)(struct xc_sr_context *ctx);
 };
 
 
@@ -177,6 +179,7 @@ struct xc_sr_context
     xc_interface *xch;
     uint32_t domid;
     int fd;
+    int migration_phase;
 
     xc_dominfo_t dominfo;
 
diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index a016678..13e6abc 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -799,11 +799,13 @@ static int restore(struct xc_sr_context *ctx)
      * With Remus, if we reach here, there must be some error on primary,
      * failover from the last checkpoint state.
      */
-    rc = ctx->restore.ops.stream_complete(ctx);
-    if ( rc )
-        goto err;
+    if ( !ctx->migration_phase != MIGRATION_PHASE_MIRROR_DISK ) {
+        rc = ctx->restore.ops.stream_complete(ctx);
+        if ( rc )
+            goto err;
 
-    IPRINTF("Restore successful");
+        IPRINTF("Restore successful");
+    }
     goto done;
 
  err:
@@ -829,13 +831,15 @@ int xc_domain_restore(xc_interface *xch, int io_fd, 
uint32_t dom,
                       unsigned long *console_gfn, domid_t console_domid,
                       unsigned int hvm, unsigned int pae,
                       xc_migration_stream_t stream_type,
-                      struct restore_callbacks *callbacks, int send_back_fd)
+                      struct restore_callbacks *callbacks, int send_back_fd,
+                      int migration_phase)
 {
     xen_pfn_t nr_pfns;
     struct xc_sr_context ctx =
         {
             .xch = xch,
             .fd = io_fd,
+            .migration_phase = migration_phase
         };
 
     /* GCC 4.4 (of CentOS 6.x vintage) can' t initialise anonymous unions. */
diff --git a/tools/libxc/xc_sr_save.c b/tools/libxc/xc_sr_save.c
index ca6913b..181a0c8 100644
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -412,6 +412,96 @@ static int send_all_pages(struct xc_sr_context *ctx)
     return send_dirty_pages(ctx, ctx->save.p2m_size);
 }
 
+static void clear_virtual_devices_memory(struct xc_sr_context *ctx)
+{
+    xen_pfn_t p;
+    DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
+                                    &ctx->save.dirty_bitmap_hbuf);
+
+    for ( p = 0x7800; p < 0xfeff2; p++ ){
+        if ( test_bit(p, dirty_bitmap) ){
+            clear_bit(p, dirty_bitmap);
+        }
+    }
+    return;
+}
+
+static int send_virtual_ram(struct xc_sr_context *ctx)
+{
+    DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
+                                    &ctx->save.dirty_bitmap_hbuf);
+
+    bitmap_set(dirty_bitmap, ctx->save.p2m_size);
+
+    /*
+     * On the second stream of a migration with local disk,
+     * don't send the vfb, virtual devices. Only virtual RAM
+     */
+    clear_virtual_devices_memory(ctx);
+
+    return send_dirty_pages(ctx, ctx->save.p2m_size);
+}
+
+static int send_specific_pages(struct xc_sr_context *ctx, uint64_t value)
+{
+
+    int rc = 0;
+    DECLARE_HYPERCALL_BUFFER_SHADOW(unsigned long, dirty_bitmap,
+                                    &ctx->save.dirty_bitmap_hbuf);
+
+    bitmap_clear(dirty_bitmap, ctx->save.p2m_size);
+    set_bit(value, dirty_bitmap);
+
+    rc = send_dirty_pages(ctx, 1);
+    bitmap_clear(dirty_bitmap, ctx->save.p2m_size);
+    return rc;
+
+}
+
+static int send_virtual_devices_and_params(struct xc_sr_context *ctx)
+{
+    xc_interface *xch = ctx->xch;
+    uint64_t i = 0;
+    int rc = 0;
+
+    fprintf(stderr, "BRUNO: SEND VIRTUAL DEVICES AND PARAMS\n");
+    xc_set_progress_prefix(xch, "Frames");
+
+    //FOR RTL AND VGA IN 128MB VM . Might change on size of VM
+    for( i = 0x8000; i < 0x8050; i++ )
+    {
+        rc = send_specific_pages(ctx, i);
+        if( rc )
+            goto out;
+    }
+    //VGA
+    for( i = 0xf0000; i < 0xf0800; i++ )
+    {
+        rc = send_specific_pages(ctx, i);
+        if( rc )
+            goto out;
+    }
+
+    //Virtual Device
+    for( i = 0xfc000; i < 0xfc00b; i++ )
+    {
+        rc = send_specific_pages(ctx, i);
+        if( rc )
+            goto out;
+    }
+
+    for( i = 0xfeff2; i < 0xff000; i++ )
+    {
+        rc = send_specific_pages(ctx, i);
+          if( rc )
+            goto out;
+    }
+
+   rc = ctx->save.ops.local_disks(ctx);
+ out:
+    return rc;
+}
+
 static int enable_logdirty(struct xc_sr_context *ctx)
 {
     xc_interface *xch = ctx->xch;
@@ -481,7 +571,11 @@ static int send_memory_live(struct xc_sr_context *ctx)
     if ( rc )
         goto out;
 
-    rc = send_all_pages(ctx);
+    if ( !ctx->migration_phase )
+        rc = send_all_pages(ctx);
+    else
+        rc = send_virtual_ram(ctx);
+
     if ( rc )
         goto out;
 
@@ -499,6 +593,9 @@ static int send_memory_live(struct xc_sr_context *ctx)
             goto out;
         }
 
+        if ( ctx->migration_phase )
+            clear_virtual_devices_memory(ctx);
+
         if ( stats.dirty_count == 0 )
             break;
 
@@ -620,6 +717,9 @@ static int suspend_and_send_dirty(struct xc_sr_context *ctx)
         }
     }
 
+    if ( ctx->migration_phase )
+        clear_virtual_devices_memory(ctx);
+
     rc = send_dirty_pages(ctx, stats.dirty_count + 
ctx->save.nr_deferred_pages);
     if ( rc )
         goto out;
@@ -805,6 +905,14 @@ static int save(struct xc_sr_context *ctx, uint16_t 
guest_type)
     if ( rc )
         goto err;
 
+    /* First pass of QEMU disk migration */
+    if ( ctx->migration_phase == MIGRATION_PHASE_MIRROR_DISK ) {
+        rc = send_virtual_devices_and_params(ctx);
+        if ( rc )
+            goto err;
+        goto end;
+    }
+
     rc = ctx->save.ops.start_of_stream(ctx);
     if ( rc )
         goto err;
@@ -889,6 +997,7 @@ static int save(struct xc_sr_context *ctx, uint16_t 
guest_type)
         }
     } while ( ctx->save.checkpointed != XC_MIG_STREAM_NONE );
 
+ end:
     xc_report_progress_single(xch, "End of stream");
 
     rc = write_end_record(ctx);
@@ -918,12 +1027,14 @@ static int save(struct xc_sr_context *ctx, uint16_t 
guest_type)
 int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
                    uint32_t max_iters, uint32_t max_factor, uint32_t flags,
                    struct save_callbacks* callbacks, int hvm,
-                   xc_migration_stream_t stream_type, int recv_fd)
+                   xc_migration_stream_t stream_type, int recv_fd,
+                   int migration_phase)
 {
     struct xc_sr_context ctx =
         {
             .xch = xch,
             .fd = io_fd,
+            .migration_phase = migration_phase
         };
 
     /* GCC 4.4 (of CentOS 6.x vintage) can' t initialise anonymous unions. */
@@ -948,7 +1059,8 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t 
dom,
     ctx.save.dirty_threshold = 50;
 
     /* Sanity checks for callbacks. */
-    if ( hvm )
+    /* Now the mirror qemu stream doesn't enable/disable qemu log */
+    if ( hvm && ctx.migration_phase != MIGRATION_PHASE_MIRROR_DISK )
         assert(callbacks->switch_qemu_logdirty);
     if ( ctx.save.checkpointed )
         assert(callbacks->checkpoint && callbacks->postcopy);
diff --git a/tools/libxc/xc_sr_save_x86_hvm.c b/tools/libxc/xc_sr_save_x86_hvm.c
index e17bb59..e417da2 100644
--- a/tools/libxc/xc_sr_save_x86_hvm.c
+++ b/tools/libxc/xc_sr_save_x86_hvm.c
@@ -157,7 +157,8 @@ static int x86_hvm_setup(struct xc_sr_context *ctx)
 
     ctx->save.p2m_size = nr_pfns;
 
-    if ( ctx->save.callbacks->switch_qemu_logdirty(
+    if ( ctx->migration_phase != MIGRATION_PHASE_MIRROR_DISK &&
+         ctx->save.callbacks->switch_qemu_logdirty(
              ctx->domid, 1, ctx->save.callbacks->data) )
     {
         PERROR("Couldn't enable qemu log-dirty mode");
@@ -214,7 +215,8 @@ static int x86_hvm_cleanup(struct xc_sr_context *ctx)
     xc_interface *xch = ctx->xch;
 
     /* If qemu successfully enabled logdirty mode, attempt to disable. */
-    if ( ctx->x86_hvm.save.qemu_enabled_logdirty &&
+    if ( ctx->migration_phase != MIGRATION_PHASE_MIRROR_DISK &&
+         ctx->x86_hvm.save.qemu_enabled_logdirty &&
          ctx->save.callbacks->switch_qemu_logdirty(
              ctx->domid, 0, ctx->save.callbacks->data) )
     {
@@ -235,6 +237,7 @@ struct xc_sr_save_ops save_ops_x86_hvm =
     .end_of_checkpoint   = x86_hvm_end_of_checkpoint,
     .check_vm_state      = x86_hvm_check_vm_state,
     .cleanup             = x86_hvm_cleanup,
+    .local_disks         = x86_hvm_end_of_checkpoint,
 };
 
 /*
diff --git a/tools/libxc/xc_sr_stream_format.h 
b/tools/libxc/xc_sr_stream_format.h
index 15ff1c7..1f0b274 100644
--- a/tools/libxc/xc_sr_stream_format.h
+++ b/tools/libxc/xc_sr_stream_format.h
@@ -8,6 +8,10 @@
 
 #include <inttypes.h>
 
+#define MIGRATION_PHASE_NON_LOCAL_DISK 0
+#define MIGRATION_PHASE_VIRTUAL_RAM 1
+#define MIGRATION_PHASE_MIRROR_DISK 2
+
 /*
  * Image Header
  */
-- 
2.3.2 (Apple Git-55)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.