[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2] xl: HVM domain S3 bugfix



Currently HVM S3 has a bug coming from the difference between
qemu-traditioanl and qemu-xen. For qemu-traditional, the way
to resume from hvm s3 is via 'xl trigger' command. However,
for qemu-xen, the way to resume from hvm s3 inherited from
standard qemu, i.e. via QMP, and it doesn't work under Xen.

The root cause is, for qemu-xen, 'xl trigger' command didn't reset
devices, while QMP didn't unpause hvm domain though they did qemu
system reset.

We have 2 patches to fix the HVM S3 bug: qemu-xen patch and xl patch.
This patch is the xl patch. It invokes QMP system_wakeup so that
qemu-xen logic for hvm s3 could be triggerred.

Signed-off-by: Liu Jinsong <jinsong.liu@xxxxxxxxx>
---
 tools/libxl/libxl.c          |   31 +++++++++++++++++++++++++++++--
 tools/libxl/libxl_internal.h |    2 ++
 tools/libxl/libxl_qmp.c      |    5 +++++
 3 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 81785df..267bc25 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4641,10 +4641,37 @@ int libxl_domain_sched_params_get(libxl_ctx *ctx, 
uint32_t domid,
     return ret;
 }
 
+static int libxl__domain_s3_resume(libxl__gc *gc, int domid)
+{
+    int rc = 0;
+
+    switch (libxl__domain_type(gc, domid)) {
+    case LIBXL_DOMAIN_TYPE_HVM:
+        switch (libxl__device_model_version_running(gc, domid)) {
+        case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
+            rc = xc_set_hvm_param(CTX->xch, domid, HVM_PARAM_ACPI_S_STATE, 0);
+            break;
+        case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN:
+            rc = libxl__qmp_system_wakeup(gc, domid);
+            break;
+        default:
+            rc = ERROR_INVAL;
+            break;
+        }
+        break;
+    default:
+        rc = ERROR_INVAL;
+        break;
+    }
+
+    return rc;
+}
+
 int libxl_send_trigger(libxl_ctx *ctx, uint32_t domid,
                        libxl_trigger trigger, uint32_t vcpuid)
 {
     int rc;
+    GC_INIT(ctx);
 
     switch (trigger) {
     case LIBXL_TRIGGER_POWER:
@@ -4668,8 +4695,7 @@ int libxl_send_trigger(libxl_ctx *ctx, uint32_t domid,
                                     XEN_DOMCTL_SENDTRIGGER_RESET, vcpuid);
         break;
     case LIBXL_TRIGGER_S3RESUME:
-        xc_set_hvm_param(ctx->xch, domid, HVM_PARAM_ACPI_S_STATE, 0);
-        rc = 0;
+        rc = libxl__domain_s3_resume(gc, domid);
         break;
     default:
         rc = -1;
@@ -4684,6 +4710,7 @@ int libxl_send_trigger(libxl_ctx *ctx, uint32_t domid,
         rc = ERROR_FAIL;
     }
 
+    GC_FREE;
     return rc;
 }
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index f051d91..0ef0a3a 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -1408,6 +1408,8 @@ _hidden int libxl__qmp_query_serial(libxl__qmp_handler 
*qmp);
 _hidden int libxl__qmp_pci_add(libxl__gc *gc, int d, libxl_device_pci *pcidev);
 _hidden int libxl__qmp_pci_del(libxl__gc *gc, int domid,
                                libxl_device_pci *pcidev);
+/* Resume hvm domain */
+_hidden int libxl__qmp_system_wakeup(libxl__gc *gc, int domid);
 /* Suspend QEMU. */
 _hidden int libxl__qmp_stop(libxl__gc *gc, int domid);
 /* Resume QEMU. */
diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c
index f681f3a..3c3b301 100644
--- a/tools/libxl/libxl_qmp.c
+++ b/tools/libxl/libxl_qmp.c
@@ -878,6 +878,11 @@ int libxl__qmp_pci_del(libxl__gc *gc, int domid, 
libxl_device_pci *pcidev)
     return qmp_device_del(gc, domid, id);
 }
 
+int libxl__qmp_system_wakeup(libxl__gc *gc, int domid)
+{
+    return qmp_run_command(gc, domid, "system_wakeup", NULL, NULL, NULL);
+}
+
 int libxl__qmp_save(libxl__gc *gc, int domid, const char *filename)
 {
     libxl__json_object *args = NULL;
-- 
1.7.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.