[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 9/9] xen/arm: Implement toolstack for xl restore/save and migrate



From: Alexey Sokolov <sokolov.a@xxxxxxxxxxx>

Implement for xl restore/save (which are also used for migrate) operation in
xc_arm_migrate.c and make it compilable.
The overall process of save is the following:
1) save guest parameters (i.e., memory map, console and store pfn, etc)
2) save memory (if it is live, perform dirty-page tracing)
3) save hvm states (i.e., gic, timer, etc)
4) save vcpu registers (i.e., pc, sp, lr, etc) The overall process of restore
   is the same to the one in save.

Singed-off-by: Alexey Sokolov <sokolov.a@xxxxxxxxxxx>
---
 config/arm32.mk              |   1 +
 tools/libxc/Makefile         |   5 +
 tools/libxc/xc_arm_migrate.c | 805 +++++++++++++++++++++++++++++++++++++++++++
 tools/misc/Makefile          |   4 +
 4 files changed, 815 insertions(+)
 create mode 100644 tools/libxc/xc_arm_migrate.c

diff --git a/config/arm32.mk b/config/arm32.mk
index aa79d22..01374c9 100644
--- a/config/arm32.mk
+++ b/config/arm32.mk
@@ -1,6 +1,7 @@
 CONFIG_ARM := y
 CONFIG_ARM_32 := y
 CONFIG_ARM_$(XEN_OS) := y
+CONFIG_MIGRATE := y
 
 CONFIG_XEN_INSTALL_SUFFIX :=
 
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index 512a994..05dfef4 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -42,8 +42,13 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c
 GUEST_SRCS-y :=
 GUEST_SRCS-y += xg_private.c xc_suspend.c
 ifeq ($(CONFIG_MIGRATE),y)
+ifeq ($(CONFIG_X86),y)
 GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c
 GUEST_SRCS-y += xc_offline_page.c xc_compression.c
+endif
+ifeq ($(CONFIG_ARM),y)
+GUEST_SRCS-y += xc_arm_migrate.c
+endif
 else
 GUEST_SRCS-y += xc_nomigrate.c
 endif
diff --git a/tools/libxc/xc_arm_migrate.c b/tools/libxc/xc_arm_migrate.c
new file mode 100644
index 0000000..52f06b5
--- /dev/null
+++ b/tools/libxc/xc_arm_migrate.c
@@ -0,0 +1,805 @@
+/******************************************************************************
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  
USA
+ *
+ * Copyright (c) 2013, Samsung Electronics
+ */
+
+#include <inttypes.h>
+#include <errno.h>
+#include <xenctrl.h>
+#include <xenguest.h>
+
+#include <unistd.h>
+#include <xc_private.h>
+#include <xc_dom.h>
+#include "xc_bitops.h"
+#include "xg_private.h"
+
+#define DEF_MAX_ITERS          29 /* limit us to 30 times round loop   */
+#define DEF_MAX_FACTOR         3  /* never send more than 3x p2m_size  */
+#define DEF_MIN_DIRTY_PER_ITER 50 /* dirty page count to define last iter */
+#define DEF_PROGRESS_RATE      50 /* progress bar update rate */
+
+/* Enable this macro for debug only: "static" migration instead of live */
+/*
+#define DISABLE_LIVE_MIGRATION
+*/
+
+/* Enable this macro for debug only: additional debug info */
+#define ARM_MIGRATE_VERBOSE
+
+/*
+ * Guest params to save: used HVM params, save flags, memory map
+ */
+typedef struct guest_params
+{
+    unsigned long console_pfn;
+    unsigned long store_pfn;
+    uint32_t flags;
+    struct dt_mem_info memmap; /* Memory map */
+    uint32_t max_vcpu_id;
+} guest_params_t;
+
+static int suspend_and_state(int (*suspend)(void*), void *data,
+                             xc_interface *xch, int dom)
+{
+    xc_dominfo_t info;
+    if ( !(*suspend)(data) )
+    {
+        ERROR("Suspend request failed");
+        return -1;
+    }
+
+    if ( (xc_domain_getinfo(xch, dom, 1, &info) != 1) ||
+         !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
+    {
+        ERROR("Domain is not in suspended state after suspend attempt");
+        return -1;
+    }
+
+    return 0;
+}
+
+static int write_exact_handled(xc_interface *xch, int fd, const void *data,
+                               size_t size)
+{
+    if ( write_exact(fd, data, size) )
+    {
+        ERROR("Write failed, check space");
+        return -1;
+    }
+    return 0;
+}
+
+/* ============ Memory ============= */
+static int save_memory(xc_interface *xch, int io_fd, uint32_t dom,
+                       struct save_callbacks *callbacks,
+                       uint32_t max_iters, uint32_t max_factor,
+                       guest_params_t *params)
+{
+    int live =  !!(params->flags & XCFLAGS_LIVE);
+    int debug =  !!(params->flags & XCFLAGS_DEBUG);
+    xen_pfn_t i;
+    char reportbuf[80];
+    int iter = 0;
+    int last_iter = !live;
+    int total_dirty_pages_num = 0;
+    int dirty_pages_on_prev_iter_num = 0;
+    int count = 0;
+    char *page = 0;
+    xen_pfn_t *busy_pages = 0;
+    int busy_pages_count = 0;
+    int busy_pages_max = 256;
+
+    DECLARE_HYPERCALL_BUFFER(unsigned long, to_send);
+
+    /* We suppose that guest's memory base is the first region base */
+    xen_pfn_t start = (params->memmap.bank[0].start  >> PAGE_SHIFT);
+    const xen_pfn_t end = xc_domain_maximum_gpfn(xch, dom);
+    const xen_pfn_t mem_size = end - start;
+    if ( debug )
+    {
+        IPRINTF("(save mem) start=%llx end=%llx!\n", start, end);
+    }
+
+    if ( write_exact_handled(xch, io_fd, &end, sizeof(xen_pfn_t)) )
+        return -1;
+
+    if ( live )
+    {
+        if ( xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
+                    NULL, 0, NULL, 0, NULL) < 0 )
+        {
+            ERROR("Couldn't enable log-dirty mode !\n");
+            return -1;
+        }
+
+        max_iters  = max_iters  ? : DEF_MAX_ITERS;
+        max_factor = max_factor ? : DEF_MAX_FACTOR;
+
+        if ( debug )
+            IPRINTF("Log-dirty mode enabled, max_iters=%d, max_factor=%d!\n",
+                    max_iters, max_factor);
+    }
+
+    to_send = xc_hypercall_buffer_alloc_pages(xch, to_send,
+                                              NRPAGES(bitmap_size(mem_size)));
+    if ( !to_send )
+    {
+        ERROR("Couldn't allocate to_send array!\n");
+        return -1;
+    }
+
+    /* send all pages on first iter */
+    memset(to_send, 0xff, bitmap_size(mem_size));
+
+    for ( ; ; )
+    {
+        int dirty_pages_on_current_iter_num = 0;
+        int frc;
+        iter++;
+
+        snprintf(reportbuf, sizeof(reportbuf),
+                 "Saving memory: iter %d (last sent %u)",
+                 iter, dirty_pages_on_prev_iter_num);
+
+        xc_report_progress_start(xch, reportbuf, mem_size);
+
+        if ( (iter > 1 &&
+              dirty_pages_on_prev_iter_num < DEF_MIN_DIRTY_PER_ITER) ||
+             (iter == max_iters) ||
+             (total_dirty_pages_num >= mem_size*max_factor) )
+        {
+            if ( debug )
+                IPRINTF("Last iteration");
+            last_iter = 1;
+        }
+
+        if ( last_iter )
+        {
+            if ( suspend_and_state(callbacks->suspend, callbacks->data,
+                                   xch, dom) )
+            {
+                ERROR("Domain appears not to have suspended");
+                return -1;
+            }
+        }
+        if ( live && iter > 1 )
+        {
+            frc = xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_CLEAN,
+                                    HYPERCALL_BUFFER(to_send), mem_size,
+                                                     NULL, 0, NULL);
+            if ( frc != mem_size )
+            {
+                ERROR("Error peeking shadow bitmap");
+                xc_hypercall_buffer_free_pages(xch, to_send,
+                                               NRPAGES(bitmap_size(mem_size)));
+                return -1;
+            }
+        }
+
+        busy_pages = malloc(sizeof(xen_pfn_t) * busy_pages_max);
+
+        for ( i = start; i < end; ++i )
+        {
+            if ( test_bit(i - start, to_send) )
+            {
+                page = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ, i);
+                if ( !page )
+                {
+                    /* This page is mapped elsewhere, should be resent later */
+                    busy_pages[busy_pages_count] = i;
+                    busy_pages_count++;
+                    if ( busy_pages_count >= busy_pages_max )
+                    {
+                        busy_pages_max += 256;
+                        busy_pages = realloc(busy_pages, sizeof(xen_pfn_t) *
+                                                         busy_pages_max);
+                    }
+                    continue;
+                }
+
+                if ( write_exact_handled(xch, io_fd, &i, sizeof(i)) ||
+                     write_exact_handled(xch, io_fd, page, PAGE_SIZE) )
+                {
+                    munmap(page, PAGE_SIZE);
+                    free(busy_pages);
+                    return -1;
+                }
+                count++;
+                munmap(page, PAGE_SIZE);
+
+                if ( (i % DEF_PROGRESS_RATE) == 0 )
+                    xc_report_progress_step(xch, i - start, mem_size);
+                dirty_pages_on_current_iter_num++;
+            }
+        }
+
+        while ( busy_pages_count )
+        {
+            /* Send busy pages */
+            busy_pages_count--;
+            i = busy_pages[busy_pages_count];
+            if ( test_bit(i - start, to_send) )
+            {
+                page = xc_map_foreign_range(xch, dom, PAGE_SIZE,PROT_READ, i);
+                if ( !page )
+                {
+                    IPRINTF("WARNING: 2nd attempt to save page "
+                            "busy failed pfn=%llx", i);
+                    continue;
+                }
+
+                if ( debug )
+                {
+                    IPRINTF("save mem: resend busy page %llx\n", i);
+                }
+
+                if ( write_exact_handled(xch, io_fd, &i, sizeof(i)) ||
+                     write_exact_handled(xch, io_fd, page, PAGE_SIZE) )
+                {
+                    munmap(page, PAGE_SIZE);
+                    free(busy_pages);
+                    return -1;
+                }
+                count++;
+                munmap(page, PAGE_SIZE);
+                dirty_pages_on_current_iter_num++;
+            }
+        }
+        free(busy_pages);
+
+        if ( debug )
+            IPRINTF("Dirty pages=%d", dirty_pages_on_current_iter_num);
+
+        xc_report_progress_step(xch, mem_size, mem_size);
+
+        dirty_pages_on_prev_iter_num = dirty_pages_on_current_iter_num;
+        total_dirty_pages_num += dirty_pages_on_current_iter_num;
+
+        if ( last_iter )
+        {
+            xc_hypercall_buffer_free_pages(xch, to_send,
+                                           NRPAGES(bitmap_size(mem_size)));
+            if ( live )
+            {
+                if ( xc_shadow_control(xch, dom, XEN_DOMCTL_SHADOW_OP_OFF,
+                                       NULL, 0, NULL, 0, NULL) < 0 )
+                    ERROR("Couldn't disable log-dirty mode");
+            }
+            break;
+        }
+    }
+    if ( debug )
+    {
+        IPRINTF("save mem: pages count = %d\n", count);
+    }
+
+    i = (xen_pfn_t) -1; /* end page marker */
+    return write_exact_handled(xch, io_fd, &i, sizeof(i));
+}
+
+static int restore_memory(xc_interface *xch, int io_fd, uint32_t dom,
+                          guest_params_t *params)
+{
+    xen_pfn_t end;
+    xen_pfn_t gpfn;
+    int debug =  !!(params->flags & XCFLAGS_DEBUG);
+    int count = 0;
+    char *page;
+
+    /* We suppose that guest's memory base is the first region base */
+    xen_pfn_t start = (params->memmap.bank[0].start >> PAGE_SHIFT);
+
+    if ( read_exact(io_fd, &end, sizeof(xen_pfn_t)) )
+    {
+        PERROR("First read of incoming memory failed");
+        return -1;
+    }
+
+    /* TODO allocate several pages per call */
+    for ( gpfn = start; gpfn < end; ++gpfn )
+    {
+        if ( xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &gpfn) )
+        {
+            PERROR("Memory allocation for a new domain failed");
+            return -1;
+        }
+    }
+    while ( 1 )
+    {
+
+        if ( read_exact(io_fd, &gpfn, sizeof(gpfn)) )
+        {
+            PERROR("GPFN read failed during memory transfer, count=%d", count);
+            return -1;
+        }
+        if ( gpfn == (xen_pfn_t) -1 ) break; /* end page marker */
+
+        if ( gpfn < start || gpfn >= end )
+        {
+            ERROR("GPFN %llx doesn't belong to RAM address space, count=%d",
+                    gpfn, count);
+            return -1;
+        }
+        page = xc_map_foreign_range(xch, dom, PAGE_SIZE,
+                                    PROT_READ | PROT_WRITE, gpfn);
+        if ( !page )
+        {
+            PERROR("xc_map_foreign_range failed, pfn=%llx", gpfn);
+            return -1;
+        }
+        if ( read_exact(io_fd, page, PAGE_SIZE) )
+        {
+            PERROR("Page data read failed during memory transfer, pfn=%llx",
+                    gpfn);
+            return -1;
+        }
+        munmap(page, PAGE_SIZE);
+        count++;
+    }
+
+    if ( debug )
+    {
+        IPRINTF("Memory restored, pages count=%d", count);
+    }
+    return 0;
+}
+
+/* ============ HVM context =========== */
+static int save_armhvm(xc_interface *xch, int io_fd, uint32_t dom, int debug)
+{
+    /* HVM: a buffer for holding HVM context */
+    uint32_t hvm_buf_size = 0;
+    uint8_t *hvm_buf = NULL;
+    uint32_t rec_size;
+    int retval = -1;
+
+    /* Need another buffer for HVM context */
+    hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0);
+    if ( hvm_buf_size == -1 )
+    {
+        ERROR("Couldn't get HVM context size from Xen");
+        goto out;
+    }
+    hvm_buf = malloc(hvm_buf_size);
+
+    if ( !hvm_buf )
+    {
+        ERROR("Couldn't allocate memory for hvm buffer");
+        goto out;
+    }
+
+    /* Get HVM context from Xen and save it too */
+    if ( (rec_size = xc_domain_hvm_getcontext(xch, dom, hvm_buf,
+                    hvm_buf_size)) == -1 )
+    {
+        ERROR("HVM:Could not get hvm buffer");
+        goto out;
+    }
+
+    if ( debug )
+        IPRINTF("HVM save size %d %d", hvm_buf_size, rec_size);
+
+    if ( write_exact_handled(xch, io_fd, &rec_size, sizeof(uint32_t)) )
+        goto out;
+
+    if ( write_exact_handled(xch, io_fd, hvm_buf, rec_size) )
+    {
+        goto out;
+    }
+    retval = 0;
+
+out:
+    if ( hvm_buf )
+        free (hvm_buf);
+    return retval;
+}
+
+static int restore_armhvm(xc_interface *xch, int io_fd,
+                          uint32_t dom, int debug)
+{
+    uint32_t rec_size;
+    uint32_t hvm_buf_size = 0;
+    uint8_t *hvm_buf = NULL;
+    int frc = 0;
+    int retval = -1;
+
+    if ( read_exact(io_fd, &rec_size, sizeof(uint32_t)) )
+    {
+        PERROR("Could not read HVM size");
+        goto out;
+    }
+
+    if ( !rec_size )
+    {
+        ERROR("Zero HVM size");
+        goto out;
+    }
+
+    hvm_buf_size = xc_domain_hvm_getcontext(xch, dom, 0, 0);
+    if ( hvm_buf_size != rec_size )
+    {
+        ERROR("HVM size for this domain is not the same as stored");
+    }
+
+    hvm_buf = malloc(hvm_buf_size);
+    if ( !hvm_buf )
+    {
+        ERROR("Couldn't allocate memory");
+        goto out;
+    }
+
+    if ( read_exact(io_fd, hvm_buf, hvm_buf_size) )
+    {
+        PERROR("Could not read HVM context");
+        goto out;
+    }
+
+    frc = xc_domain_hvm_setcontext(xch, dom, hvm_buf, hvm_buf_size);
+    if ( frc )
+    {
+        ERROR("error setting the HVM context");
+        goto out;
+    }
+    retval = 0;
+
+    if ( debug )
+    {
+            IPRINTF("HVM restore size %d %d", hvm_buf_size, rec_size);
+    }
+out:
+    if ( hvm_buf )
+        free (hvm_buf);
+    return retval;
+}
+
+/* ================= Console & Xenstore & Memory map =========== */
+static int save_guest_params(xc_interface *xch, int io_fd,
+                             uint32_t dom, uint32_t flags,
+                             guest_params_t *params)
+{
+    size_t sz = sizeof(guest_params_t);
+    xc_dominfo_t dom_info;
+
+    if ( xc_domain_get_memory_map(xch, dom, &params->memmap) )
+    {
+        ERROR("Can't get memory map");
+        return -1;
+    }
+
+    if ( flags & XCFLAGS_DEBUG )
+    {
+        IPRINTF("Guest param save size: %d ", sz);
+        IPRINTF("Guest memory map save %d entries", params->memmap.nr_banks);
+    }
+
+    if ( xc_get_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
+            &params->console_pfn) )
+    {
+        ERROR("Can't get console gpfn");
+        return -1;
+    }
+
+    if ( xc_get_hvm_param(xch, dom, HVM_PARAM_STORE_PFN, &params->store_pfn) )
+    {
+        ERROR("Can't get store gpfn");
+        return -1;
+    }
+
+    if ( xc_domain_getinfo(xch, dom, 1, &dom_info ) < 0)
+    {
+        ERROR("Can't get domain info for dom %d", dom);
+        return -1;
+    }
+    params->max_vcpu_id = dom_info.max_vcpu_id;
+
+    params->flags = flags;
+
+    if ( write_exact_handled(xch, io_fd, params, sz) )
+    {
+        return -1;
+    }
+
+    return 0;
+}
+
+static int restore_guest_params(xc_interface *xch, int io_fd,
+                                uint32_t dom, guest_params_t *params)
+{
+    size_t sz = sizeof(guest_params_t);
+
+    if ( read_exact(io_fd, params, sizeof(guest_params_t)) )
+    {
+        PERROR("Can't read guest params");
+        return -1;
+    }
+
+    if ( params->flags & XCFLAGS_DEBUG )
+    {
+        IPRINTF("Guest param restore size: %d ", sz);
+        IPRINTF("Guest memory map restore %d entries",
+                params->memmap.nr_banks);
+    }
+
+    if ( xc_domain_set_memory_map(xch, dom, &params->memmap) )
+    {
+        ERROR("Can't set memory map");
+        return -1;
+    }
+    /* Set max. number of vcpus as max_vcpu_id + 1 */
+    if ( xc_domain_max_vcpus(xch, dom, params->max_vcpu_id + 1) )
+    {
+        ERROR("Can't set max vcpu number for domain");
+        return -1;
+    }
+
+    return 0;
+}
+
+static int set_guest_params(xc_interface *xch, int io_fd, uint32_t dom,
+                            guest_params_t *params, unsigned int 
console_evtchn,
+                            domid_t console_domid, unsigned int store_evtchn,
+                            domid_t store_domid)
+{
+    int rc = 0;
+
+    if ( (rc = xc_clear_domain_page(xch, dom, params->console_pfn)) )
+    {
+        ERROR("Can't clear console page");
+        return rc;
+    }
+
+    if ( (rc = xc_clear_domain_page(xch, dom, params->store_pfn)) )
+    {
+        ERROR("Can't clear xenstore page");
+        return rc;
+    }
+
+    if ( (rc = xc_dom_gnttab_hvm_seed(xch, dom, params->console_pfn,
+                                      params->store_pfn, console_domid,
+                                      store_domid)) )
+    {
+        ERROR("Can't grant console and xenstore pages");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
+                                params->console_pfn)) )
+    {
+        ERROR("Can't set console gpfn");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
+                                params->store_pfn)) )
+    {
+        ERROR("Can't set xenstore gpfn");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_EVTCHN,
+                                console_evtchn)) )
+    {
+        ERROR("Can't set console event channel");
+        return rc;
+    }
+
+    if ( (rc = xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_EVTCHN,
+                                store_evtchn)) )
+    {
+        ERROR("Can't set xenstore event channel");
+        return rc;
+    }
+    return 0;
+}
+
+/* ====================== VCPU ============== */
+static int save_vcpus(xc_interface *xch, int io_fd, uint32_t dom,
+        guest_params_t *params)
+{
+    vcpu_guest_context_any_t ctxt;
+    int rc, nr_online = 0;
+    uint32_t i;
+    xc_vcpuinfo_t info;
+    int debug = !!(params->flags & XCFLAGS_DEBUG);
+
+    for ( i = 0; i<= params->max_vcpu_id; i++ )
+    {
+        rc = xc_vcpu_getinfo(xch, dom, i, &info);
+        /* Store contexts for online vcpus only;
+         * vcpu id is also stored before */
+        if ( rc || info.online == 0 ) continue;
+        if ( xc_vcpu_getcontext(xch, dom, i, &ctxt) ) continue;
+        rc = write_exact_handled(xch, io_fd, &i, sizeof(i));
+        if ( rc ) return rc;
+        rc = write_exact_handled(xch, io_fd, &ctxt, sizeof(ctxt));
+        if ( rc ) return rc;
+        nr_online++;
+    }
+    /* end marker */
+    i = params->max_vcpu_id + 1;
+    rc = write_exact_handled(xch, io_fd, &i, sizeof(i));
+    if ( rc ) return rc;
+    if ( debug )
+        IPRINTF("save: max_vcpu=%d, nr_online=%d\n", i, nr_online);
+    return 0;
+}
+
+static int restore_vcpus(xc_interface *xch, int io_fd, uint32_t dom,
+        guest_params_t *params)
+{
+    int rc = 0;
+    uint32_t i;
+    int nr_online = 0;
+    int debug = !!(params->flags & XCFLAGS_DEBUG);
+
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
+
+    ctxt = xc_hypercall_buffer_alloc(xch, ctxt, sizeof(*ctxt));
+
+    while ( 1 )
+    {
+        memset(ctxt, 0, sizeof(*ctxt));
+
+        if ( read_exact(io_fd, &i, sizeof(i)) )
+        {
+            PERROR("VCPU context read failed");
+            rc = -1;
+            break;
+        }
+        if ( i > params->max_vcpu_id ) break;
+        if ( read_exact(io_fd, ctxt, sizeof(*ctxt)) )
+        {
+            PERROR("VCPU context read failed");
+            rc = -1;
+            break;
+        }
+        nr_online++;
+        memset(&domctl, 0, sizeof(domctl));
+        domctl.cmd = XEN_DOMCTL_setvcpucontext;
+        domctl.domain = dom;
+        domctl.u.vcpucontext.vcpu = i;
+        set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
+        rc = do_domctl(xch, &domctl);
+        if ( rc )
+        {
+            ERROR("VCPU context set failed (error %d)", rc);
+            break;
+        }
+    }
+    if ( debug )
+        IPRINTF("restore: max_vcpu=%d, nr_online=%d\n",
+                params->max_vcpu_id + 1, nr_online);
+
+    xc_hypercall_buffer_free(xch, ctxt);
+    return rc;
+}
+
+/* ================== Main ============== */
+int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
+                   uint32_t max_iters, uint32_t max_factor, uint32_t flags,
+                   struct save_callbacks *callbacks, int hvm,
+                   unsigned long vm_generationid_addr)
+{
+    int debug;
+    guest_params_t params;
+
+#ifdef ARM_MIGRATE_VERBOSE
+    flags |= XCFLAGS_DEBUG;
+#endif
+
+#ifdef DISABLE_LIVE_MIGRATION
+    flags &= ~(XCFLAGS_LIVE);
+#endif
+
+    debug = !!(flags & XCFLAGS_DEBUG);
+    if ( save_guest_params(xch, io_fd, dom, flags, &params) )
+    {
+       ERROR("Can't save guest params");
+       return -1;
+    }
+
+    if ( save_memory(xch, io_fd, dom, callbacks, max_iters,
+            max_factor, &params) )
+    {
+        ERROR("Memory not saved");
+        return -1;
+    }
+
+    if ( save_armhvm(xch, io_fd, dom, debug) )
+    {
+        ERROR("HVM not saved");
+        return -1;
+    }
+
+    if ( save_vcpus(xch, io_fd, dom, &params) )
+    {
+        ERROR("VCPU(s) not saved");
+        return -1;
+    }
+    if ( debug )
+    {
+        IPRINTF("Domain %d saved", dom);
+    }
+    return 0;
+}
+
+int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
+                      unsigned int store_evtchn, unsigned long *store_gpfn,
+                      domid_t store_domid, unsigned int console_evtchn,
+                      unsigned long *console_gpfn, domid_t console_domid,
+                      unsigned int hvm, unsigned int pae, int superpages,
+                      int no_incr_generationid,
+                      unsigned long *vm_generationid_addr,
+                      struct restore_callbacks *callbacks)
+{
+    guest_params_t params;
+    int debug = 1;
+
+    if ( restore_guest_params(xch, io_fd, dom, &params) )
+    {
+        ERROR("Can't restore guest params");
+        return -1;
+    }
+    debug = !!(params.flags & XCFLAGS_DEBUG);
+
+    if ( restore_memory(xch, io_fd, dom, &params) )
+    {
+        ERROR("Can't restore memory");
+        return -1;
+    }
+    if ( set_guest_params(xch, io_fd, dom, &params,
+                console_evtchn, console_domid,
+                store_evtchn, store_domid) )
+    {
+        ERROR("Can't setup guest params");
+        return -1;
+    }
+
+    /* Setup console and store PFNs to caller */
+    *console_gpfn = params.console_pfn;
+    *store_gpfn = params.store_pfn;
+
+    if ( restore_armhvm(xch, io_fd, dom, debug) )
+    {
+        ERROR("HVM not restored");
+        return -1;
+    }
+
+    if ( restore_vcpus(xch, io_fd, dom, &params) )
+    {
+        ERROR("Can't restore VCPU(s)");
+        return -1;
+    }
+
+    if ( debug )
+    {
+         IPRINTF("Domain %d restored", dom);
+    }
+
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/misc/Makefile b/tools/misc/Makefile
index 17aeda5..29a1f66 100644
--- a/tools/misc/Makefile
+++ b/tools/misc/Makefile
@@ -11,7 +11,9 @@ HDRS     = $(wildcard *.h)
 
 TARGETS-y := xenperf xenpm xen-tmem-list-parse gtraceview gtracestat 
xenlockprof xenwatchdogd xencov
 TARGETS-$(CONFIG_X86) += xen-detect xen-hvmctx xen-hvmcrash xen-lowmemd 
xen-mfndump
+ifeq ($(CONFIG_X86),y)
 TARGETS-$(CONFIG_MIGRATE) += xen-hptool
+endif
 TARGETS := $(TARGETS-y)
 
 SUBDIRS := $(SUBDIRS-y)
@@ -23,7 +25,9 @@ INSTALL_BIN := $(INSTALL_BIN-y)
 INSTALL_SBIN-y := xen-bugtool xen-python-path xenperf xenpm 
xen-tmem-list-parse gtraceview \
        gtracestat xenlockprof xenwatchdogd xen-ringwatch xencov
 INSTALL_SBIN-$(CONFIG_X86) += xen-hvmctx xen-hvmcrash xen-lowmemd xen-mfndump
+ifeq ($(CONFIG_X86),y)
 INSTALL_SBIN-$(CONFIG_MIGRATE) += xen-hptool
+endif
 INSTALL_SBIN := $(INSTALL_SBIN-y)
 
 INSTALL_PRIVBIN-y := xenpvnetboot
-- 
1.8.1.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.