[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 15/15] tools/libxc: Migration v2 compatibility for unmodified libxl



These changes cause migration v2 to behave similarly enough to legacy
migration to function for HVM guests under an unmodified xl/libxl.

The migration v2 work for libxl will fix the layering issues with the
toolstack and qemu records, at which point this patch will be unneeded.

It is however included here for people wishing to experiment with migration v2
ahead of the libxl work.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Ian Campbell <Ian.Campbell@xxxxxxxxxx>
CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 tools/libxc/Makefile                |    2 +
 tools/libxc/xc_sr_restore_x86_hvm.c |  102 +++++++++++++++++++++++++++++++++++
 tools/libxc/xc_sr_save_x86_hvm.c    |   36 +++++++++++++
 3 files changed, 140 insertions(+)

diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index b8a7ef1..babefa4 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -63,6 +63,8 @@ GUEST_SRCS-$(CONFIG_X86) += xc_sr_save_x86_hvm.c
 GUEST_SRCS-y += xc_sr_restore.c
 GUEST_SRCS-y += xc_sr_save.c
 GUEST_SRCS-y += xc_offline_page.c xc_compression.c
+$(patsubst %.c,%.o,$(GUEST_SRCS-y)): CFLAGS += -DXG_LIBXL_HVM_COMPAT
+$(patsubst %.c,%.opic,$(GUEST_SRCS-y)): CFLAGS += -DXG_LIBXL_HVM_COMPAT
 else
 GUEST_SRCS-y += xc_nomigrate.c
 endif
diff --git a/tools/libxc/xc_sr_restore_x86_hvm.c 
b/tools/libxc/xc_sr_restore_x86_hvm.c
index cd0a004..0f80a4f 100644
--- a/tools/libxc/xc_sr_restore_x86_hvm.c
+++ b/tools/libxc/xc_sr_restore_x86_hvm.c
@@ -3,6 +3,24 @@
 
 #include "xc_sr_common_x86.h"
 
+#ifdef XG_LIBXL_HVM_COMPAT
+static int handle_toolstack(struct xc_sr_context *ctx, struct xc_sr_record 
*rec)
+{
+    xc_interface *xch = ctx->xch;
+    int rc;
+
+    if ( !ctx->restore.callbacks || !ctx->restore.callbacks->toolstack_restore 
)
+        return 0;
+
+    rc = ctx->restore.callbacks->toolstack_restore(
+        ctx->domid, rec->data, rec->length, ctx->restore.callbacks->data);
+
+    if ( rc < 0 )
+        PERROR("restoring toolstack");
+    return rc;
+}
+#endif
+
 /*
  * Process an HVM_CONTEXT record from the stream.
  */
@@ -75,6 +93,76 @@ static int handle_hvm_params(struct xc_sr_context *ctx,
     return 0;
 }
 
+#ifdef XG_LIBXL_HVM_COMPAT
+static int dump_qemu(struct xc_sr_context *ctx)
+{
+    xc_interface *xch = ctx->xch;
+    char qemusig[21], path[256];
+    uint32_t qlen;
+    void *qbuf = NULL;
+    int rc = -1;
+    FILE *fp = NULL;
+
+    if ( read_exact(ctx->fd, qemusig, sizeof(qemusig)) )
+    {
+        PERROR("Error reading QEMU signature");
+        goto out;
+    }
+
+    if ( !memcmp(qemusig, "DeviceModelRecord0002", sizeof(qemusig)) )
+    {
+        if ( read_exact(ctx->fd, &qlen, sizeof(qlen)) )
+        {
+            PERROR("Error reading QEMU record length");
+            goto out;
+        }
+
+        qbuf = malloc(qlen);
+        if ( !qbuf )
+        {
+            PERROR("no memory for device model state");
+            goto out;
+        }
+
+        if ( read_exact(ctx->fd, qbuf, qlen) )
+        {
+            PERROR("Error reading device model state");
+            goto out;
+        }
+    }
+    else
+    {
+        ERROR("Invalid device model state signature '%*.*s'",
+              (int)sizeof(qemusig), (int)sizeof(qemusig), qemusig);
+        goto out;
+    }
+
+    sprintf(path, XC_DEVICE_MODEL_RESTORE_FILE".%u", ctx->domid);
+    fp = fopen(path, "wb");
+    if ( !fp )
+    {
+        PERROR("Failed to open '%s' for writing", path);
+        goto out;
+    }
+
+    DPRINTF("Writing %u bytes of QEMU data", qlen);
+    if ( fwrite(qbuf, 1, qlen, fp) != qlen )
+    {
+        PERROR("Failed to write %u bytes of QEMU data", qlen);
+        goto out;
+    }
+
+    rc = 0;
+
+ out:
+    if ( fp )
+        fclose(fp);
+    free(qbuf);
+
+    return rc;
+}
+#endif
+
 /* restore_ops function. */
 static bool x86_hvm_pfn_is_valid(const struct xc_sr_context *ctx, xen_pfn_t 
pfn)
 {
@@ -150,6 +238,11 @@ static int x86_hvm_process_record(struct xc_sr_context 
*ctx,
     case REC_TYPE_HVM_PARAMS:
         return handle_hvm_params(ctx, rec);
 
+#ifdef XG_LIBXL_HVM_COMPAT
+    case REC_TYPE_TOOLSTACK:
+        return handle_toolstack(ctx, rec);
+#endif
+
     default:
         return RECORD_NOT_PROCESSED;
     }
@@ -191,6 +284,15 @@ static int x86_hvm_stream_complete(struct xc_sr_context 
*ctx)
         return rc;
     }
 
+#ifdef XG_LIBXL_HVM_COMPAT
+    rc = dump_qemu(ctx);
+    if ( rc )
+    {
+        ERROR("Failed to dump qemu");
+        return rc;
+    }
+#endif
+
     return rc;
 }
 
diff --git a/tools/libxc/xc_sr_save_x86_hvm.c b/tools/libxc/xc_sr_save_x86_hvm.c
index 5b4d336..2642bff 100644
--- a/tools/libxc/xc_sr_save_x86_hvm.c
+++ b/tools/libxc/xc_sr_save_x86_hvm.c
@@ -118,6 +118,36 @@ static int write_hvm_params(struct xc_sr_context *ctx)
     return rc;
 }
 
+#ifdef XG_LIBXL_HVM_COMPAT
+static int write_toolstack(struct xc_sr_context *ctx)
+{
+    xc_interface *xch = ctx->xch;
+    struct xc_sr_record rec = {
+        .type = REC_TYPE_TOOLSTACK,
+        .length = 0,
+    };
+    uint8_t *buf;
+    uint32_t len;
+    int rc;
+
+    if ( !ctx->save.callbacks || !ctx->save.callbacks->toolstack_save )
+        return 0;
+
+    if ( ctx->save.callbacks->toolstack_save(
+             ctx->domid, &buf, &len, ctx->save.callbacks->data) < 0 )
+    {
+        PERROR("Error calling toolstack_save");
+        return -1;
+    }
+
+    rc = write_split_record(ctx, &rec, buf, len);
+    if ( rc < 0 )
+        PERROR("Error writing TOOLSTACK record");
+    free(buf);
+    return rc;
+}
+#endif
+
 static xen_pfn_t x86_hvm_pfn_to_gfn(const struct xc_sr_context *ctx,
                                     xen_pfn_t pfn)
 {
@@ -170,6 +200,12 @@ static int x86_hvm_end_of_stream(struct xc_sr_context *ctx)
     if ( rc )
         return rc;
 
+#ifdef XG_LIBXL_HVM_COMPAT
+    rc = write_toolstack(ctx);
+    if ( rc )
+        return rc;
+#endif
+
     /* Write the HVM_CONTEXT record. */
     rc = write_hvm_context(ctx);
     if ( rc )
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.