[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/5] Xen/MCE: Abort live migration when vMCE occur



Xen/MCE: Abort live migration when vMCE occur

This patch monitor the critical area of live migration (from vMCE point of view,
the copypages stage of migration is the critical area while other areas are 
not).

If a vMCE occur at the critical area of live migration, abort and try migration 
later.

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>

diff -r f843ac6f93c9 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Wed Sep 19 01:21:18 2012 +0800
+++ b/tools/libxc/xc_domain.c   Wed Sep 19 03:31:30 2012 +0800
@@ -283,6 +283,37 @@
     return ret;
 }
 
+/* Start vmce monitor */
+int xc_domain_vmce_monitor_strat(xc_interface *xch,
+                                 uint32_t domid)
+{
+    int ret;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_vmce_monitor_start;
+    domctl.domain = (domid_t)domid;
+    ret = do_domctl(xch, &domctl);
+
+    return ret ? -1 : 0;
+}
+
+/* End vmce monitor */
+int xc_domain_vmce_monitor_end(xc_interface *xch,
+                               uint32_t domid,
+                               signed char *vmce_while_monitor)
+{
+    int ret;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_vmce_monitor_end;
+    domctl.domain = (domid_t)domid;
+    ret = do_domctl(xch, &domctl);
+    if ( !ret )
+        *vmce_while_monitor = domctl.u.vmce_monitor.vmce_while_monitor;
+
+    return ret ? -1 : 0;
+}
+
 /* get info from hvm guest for save */
 int xc_domain_hvm_getcontext(xc_interface *xch,
                              uint32_t domid,
diff -r f843ac6f93c9 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Wed Sep 19 01:21:18 2012 +0800
+++ b/tools/libxc/xc_domain_save.c      Wed Sep 19 03:31:30 2012 +0800
@@ -895,6 +895,8 @@
      */
     int compressing = 0;
 
+    signed char vmce_while_monitor = 0;
+
     int completed = 0;
 
     if ( hvm && !callbacks->switch_qemu_logdirty )
@@ -1109,6 +1111,12 @@
         goto out;
     }
 
+    if ( xc_domain_vmce_monitor_strat(xch, dom) )
+    {
+        PERROR("Error when start vmce monitor\n");
+        goto out;
+    }
+
   copypages:
 #define wrexact(fd, buf, len) write_buffer(xch, last_iter, ob, (fd), (buf), 
(len))
 #define wruncached(fd, live, buf, len) write_uncached(xch, last_iter, ob, 
(fd), (buf), (len))
@@ -1571,6 +1579,17 @@
 
     DPRINTF("All memory is saved\n");
 
+    if ( xc_domain_vmce_monitor_end(xch, dom, &vmce_while_monitor) )
+    {
+        PERROR("Error when end vmce monitor\n");
+        goto out;
+    }
+    else if ( vmce_while_monitor == -1 )
+    {
+        fprintf(stderr, "vMCE occurred, abort this time and try later.\n");
+        goto out;
+    }
+
     /* After last_iter, buffer the rest of pagebuf & tailbuf data into a
      * separate output buffer and flush it after the compressed page chunks.
      */
diff -r f843ac6f93c9 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Wed Sep 19 01:21:18 2012 +0800
+++ b/tools/libxc/xenctrl.h     Wed Sep 19 03:31:30 2012 +0800
@@ -571,6 +571,26 @@
                           xc_domaininfo_t *info);
 
 /**
+ * This function start monitor vmce event.
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id monitored
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_vmce_monitor_strat(xc_interface *xch,
+                                 uint32_t domid);
+
+/**
+ * This function end monitor vmce event
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id monitored
+ * @parm vmce_while_migrate a pointer return whether vMCE occur when migrate 
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_vmce_monitor_end(xc_interface *xch,
+                               uint32_t domid,
+                               signed char *vmce_while_monitor);
+
+/**
  * This function returns information about the context of a hvm domain
  * @parm xch a handle to an open hypervisor interface
  * @parm domid the domain to get information from
diff -r f843ac6f93c9 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed Sep 19 01:21:18 2012 +0800
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed Sep 19 03:31:30 2012 +0800
@@ -596,6 +596,12 @@
                     goto vmce_failed;
                 }
 
+                if ( unlikely(d->arch.vmce_monitor) )
+                {
+                    /* vMCE occur when guest migration */
+                    d->arch.vmce_monitor = -1;
+                }
+
                 /* We will inject vMCE to DOMU*/
                 if ( inject_vmce(d) < 0 )
                 {
diff -r f843ac6f93c9 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Sep 19 01:21:18 2012 +0800
+++ b/xen/arch/x86/domctl.c     Wed Sep 19 03:31:30 2012 +0800
@@ -1514,6 +1514,40 @@
     }
     break;
 
+    case XEN_DOMCTL_vmce_monitor_start:
+    {
+        struct domain *d;
+
+        d = rcu_lock_domain_by_id(domctl->domain);
+        if ( d != NULL )
+        {
+            d->arch.vmce_monitor = 1;
+            rcu_unlock_domain(d);
+        }
+        else
+            ret = -ESRCH;
+    }
+    break;
+
+    case XEN_DOMCTL_vmce_monitor_end:
+    {
+        struct domain *d;
+
+        d = rcu_lock_domain_by_id(domctl->domain);
+        if ( d != NULL)
+        {
+            domctl->u.vmce_monitor.vmce_while_monitor =
+                                      d->arch.vmce_monitor;
+            d->arch.vmce_monitor = 0;
+            rcu_unlock_domain(d);
+            if ( copy_to_guest(u_domctl, domctl, 1) )
+                ret = -EFAULT;
+        }
+        else
+            ret = -ESRCH;
+    }
+    break;
+
     default:
         ret = iommu_do_domctl(domctl, u_domctl);
         break;
diff -r f843ac6f93c9 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Sep 19 01:21:18 2012 +0800
+++ b/xen/include/asm-x86/domain.h      Wed Sep 19 03:31:30 2012 +0800
@@ -279,6 +279,11 @@
     bool_t has_32bit_shinfo;
     /* Domain cannot handle spurious page faults? */
     bool_t suppress_spurious_page_faults;
+    /* Monitoring guest memory copy of migration
+     * = 0 - not monitoring
+     * > 0 - monitoring
+     * < 0 - vMCE occurred while monitoring */
+    s8 vmce_monitor;
 
     /* Continuable domain_relinquish_resources(). */
     enum {
diff -r f843ac6f93c9 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Wed Sep 19 01:21:18 2012 +0800
+++ b/xen/include/public/domctl.h       Wed Sep 19 03:31:30 2012 +0800
@@ -828,6 +828,12 @@
 typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
 
+struct xen_domctl_vmce_monitor {
+    signed char vmce_while_monitor;
+};
+typedef struct xen_domctl_vmce_monitor xen_domctl_vmce_monitor_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vmce_monitor_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -893,6 +899,8 @@
 #define XEN_DOMCTL_set_access_required           64
 #define XEN_DOMCTL_audit_p2m                     65
 #define XEN_DOMCTL_set_virq_handler              66
+#define XEN_DOMCTL_vmce_monitor_start            67
+#define XEN_DOMCTL_vmce_monitor_end              68
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -947,6 +955,7 @@
         struct xen_domctl_set_access_required access_required;
         struct xen_domctl_audit_p2m         audit_p2m;
         struct xen_domctl_set_virq_handler  set_virq_handler;
+        struct xen_domctl_vmce_monitor      vmce_monitor;
         struct xen_domctl_gdbsx_memio       gdbsx_guest_memio;
         struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;

Attachment: 4_vmce_when_migrate.patch
Description: 4_vmce_when_migrate.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.